1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) Microsoft Corporation.
4 *
5 * Author:
6 * Jake Oshins <jakeo@microsoft.com>
7 *
8 * This driver acts as a paravirtual front-end for PCI Express root buses.
9 * When a PCI Express function (either an entire device or an SR-IOV
10 * Virtual Function) is being passed through to the VM, this driver exposes
11 * a new bus to the guest VM. This is modeled as a root PCI bus because
12 * no bridges are being exposed to the VM. In fact, with a "Generation 2"
13 * VM within Hyper-V, there may seem to be no PCI bus at all in the VM
14 * until a device as been exposed using this driver.
15 *
16 * Each root PCI bus has its own PCI domain, which is called "Segment" in
17 * the PCI Firmware Specifications. Thus while each device passed through
18 * to the VM using this front-end will appear at "device 0", the domain will
19 * be unique. Typically, each bus will have one PCI function on it, though
20 * this driver does support more than one.
21 *
22 * In order to map the interrupts from the device through to the guest VM,
23 * this driver also implements an IRQ Domain, which handles interrupts (either
24 * MSI or MSI-X) associated with the functions on the bus. As interrupts are
25 * set up, torn down, or reaffined, this driver communicates with the
26 * underlying hypervisor to adjust the mappings in the I/O MMU so that each
27 * interrupt will be delivered to the correct virtual processor at the right
28 * vector. This driver does not support level-triggered (line-based)
29 * interrupts, and will report that the Interrupt Line register in the
30 * function's configuration space is zero.
31 *
32 * The rest of this driver mostly maps PCI concepts onto underlying Hyper-V
33 * facilities. For instance, the configuration space of a function exposed
34 * by Hyper-V is mapped into a single page of memory space, and the
35 * read and write handlers for config space must be aware of this mechanism.
36 * Similarly, device setup and teardown involves messages sent to and from
37 * the PCI back-end driver in Hyper-V.
38 */
39
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/pci.h>
43 #include <linux/pci-ecam.h>
44 #include <linux/delay.h>
45 #include <linux/semaphore.h>
46 #include <linux/irq.h>
47 #include <linux/msi.h>
48 #include <linux/hyperv.h>
49 #include <linux/refcount.h>
50 #include <linux/irqdomain.h>
51 #include <linux/acpi.h>
52 #include <linux/sizes.h>
53 #include <asm/mshyperv.h>
54
55 /*
56 * Protocol versions. The low word is the minor version, the high word the
57 * major version.
58 */
59
60 #define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (minor)))
61 #define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16)
62 #define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff)
63
64 enum pci_protocol_version_t {
65 PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), /* Win10 */
66 PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2), /* RS1 */
67 PCI_PROTOCOL_VERSION_1_3 = PCI_MAKE_VERSION(1, 3), /* Vibranium */
68 PCI_PROTOCOL_VERSION_1_4 = PCI_MAKE_VERSION(1, 4), /* WS2022 */
69 };
70
71 #define CPU_AFFINITY_ALL -1ULL
72
73 /*
74 * Supported protocol versions in the order of probing - highest go
75 * first.
76 */
77 static enum pci_protocol_version_t pci_protocol_versions[] = {
78 PCI_PROTOCOL_VERSION_1_4,
79 PCI_PROTOCOL_VERSION_1_3,
80 PCI_PROTOCOL_VERSION_1_2,
81 PCI_PROTOCOL_VERSION_1_1,
82 };
83
84 #define PCI_CONFIG_MMIO_LENGTH 0x2000
85 #define CFG_PAGE_OFFSET 0x1000
86 #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET)
87
88 #define MAX_SUPPORTED_MSI_MESSAGES 0x400
89
90 #define STATUS_REVISION_MISMATCH 0xC0000059
91
92 /* space for 32bit serial number as string */
93 #define SLOT_NAME_SIZE 11
94
95 /*
96 * Size of requestor for VMbus; the value is based on the observation
97 * that having more than one request outstanding is 'rare', and so 64
98 * should be generous in ensuring that we don't ever run out.
99 */
100 #define HV_PCI_RQSTOR_SIZE 64
101
102 /*
103 * Message Types
104 */
105
106 enum pci_message_type {
107 /*
108 * Version 1.1
109 */
110 PCI_MESSAGE_BASE = 0x42490000,
111 PCI_BUS_RELATIONS = PCI_MESSAGE_BASE + 0,
112 PCI_QUERY_BUS_RELATIONS = PCI_MESSAGE_BASE + 1,
113 PCI_POWER_STATE_CHANGE = PCI_MESSAGE_BASE + 4,
114 PCI_QUERY_RESOURCE_REQUIREMENTS = PCI_MESSAGE_BASE + 5,
115 PCI_QUERY_RESOURCE_RESOURCES = PCI_MESSAGE_BASE + 6,
116 PCI_BUS_D0ENTRY = PCI_MESSAGE_BASE + 7,
117 PCI_BUS_D0EXIT = PCI_MESSAGE_BASE + 8,
118 PCI_READ_BLOCK = PCI_MESSAGE_BASE + 9,
119 PCI_WRITE_BLOCK = PCI_MESSAGE_BASE + 0xA,
120 PCI_EJECT = PCI_MESSAGE_BASE + 0xB,
121 PCI_QUERY_STOP = PCI_MESSAGE_BASE + 0xC,
122 PCI_REENABLE = PCI_MESSAGE_BASE + 0xD,
123 PCI_QUERY_STOP_FAILED = PCI_MESSAGE_BASE + 0xE,
124 PCI_EJECTION_COMPLETE = PCI_MESSAGE_BASE + 0xF,
125 PCI_RESOURCES_ASSIGNED = PCI_MESSAGE_BASE + 0x10,
126 PCI_RESOURCES_RELEASED = PCI_MESSAGE_BASE + 0x11,
127 PCI_INVALIDATE_BLOCK = PCI_MESSAGE_BASE + 0x12,
128 PCI_QUERY_PROTOCOL_VERSION = PCI_MESSAGE_BASE + 0x13,
129 PCI_CREATE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x14,
130 PCI_DELETE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x15,
131 PCI_RESOURCES_ASSIGNED2 = PCI_MESSAGE_BASE + 0x16,
132 PCI_CREATE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x17,
133 PCI_DELETE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x18, /* unused */
134 PCI_BUS_RELATIONS2 = PCI_MESSAGE_BASE + 0x19,
135 PCI_RESOURCES_ASSIGNED3 = PCI_MESSAGE_BASE + 0x1A,
136 PCI_CREATE_INTERRUPT_MESSAGE3 = PCI_MESSAGE_BASE + 0x1B,
137 PCI_MESSAGE_MAXIMUM
138 };
139
140 /*
141 * Structures defining the virtual PCI Express protocol.
142 */
143
144 union pci_version {
145 struct {
146 u16 minor_version;
147 u16 major_version;
148 } parts;
149 u32 version;
150 } __packed;
151
152 /*
153 * Function numbers are 8-bits wide on Express, as interpreted through ARI,
154 * which is all this driver does. This representation is the one used in
155 * Windows, which is what is expected when sending this back and forth with
156 * the Hyper-V parent partition.
157 */
158 union win_slot_encoding {
159 struct {
160 u32 dev:5;
161 u32 func:3;
162 u32 reserved:24;
163 } bits;
164 u32 slot;
165 } __packed;
166
167 /*
168 * Pretty much as defined in the PCI Specifications.
169 */
170 struct pci_function_description {
171 u16 v_id; /* vendor ID */
172 u16 d_id; /* device ID */
173 u8 rev;
174 u8 prog_intf;
175 u8 subclass;
176 u8 base_class;
177 u32 subsystem_id;
178 union win_slot_encoding win_slot;
179 u32 ser; /* serial number */
180 } __packed;
181
182 enum pci_device_description_flags {
183 HV_PCI_DEVICE_FLAG_NONE = 0x0,
184 HV_PCI_DEVICE_FLAG_NUMA_AFFINITY = 0x1,
185 };
186
187 struct pci_function_description2 {
188 u16 v_id; /* vendor ID */
189 u16 d_id; /* device ID */
190 u8 rev;
191 u8 prog_intf;
192 u8 subclass;
193 u8 base_class;
194 u32 subsystem_id;
195 union win_slot_encoding win_slot;
196 u32 ser; /* serial number */
197 u32 flags;
198 u16 virtual_numa_node;
199 u16 reserved;
200 } __packed;
201
202 /**
203 * struct hv_msi_desc
204 * @vector: IDT entry
205 * @delivery_mode: As defined in Intel's Programmer's
206 * Reference Manual, Volume 3, Chapter 8.
207 * @vector_count: Number of contiguous entries in the
208 * Interrupt Descriptor Table that are
209 * occupied by this Message-Signaled
210 * Interrupt. For "MSI", as first defined
211 * in PCI 2.2, this can be between 1 and
212 * 32. For "MSI-X," as first defined in PCI
213 * 3.0, this must be 1, as each MSI-X table
214 * entry would have its own descriptor.
215 * @reserved: Empty space
216 * @cpu_mask: All the target virtual processors.
217 */
218 struct hv_msi_desc {
219 u8 vector;
220 u8 delivery_mode;
221 u16 vector_count;
222 u32 reserved;
223 u64 cpu_mask;
224 } __packed;
225
226 /**
227 * struct hv_msi_desc2 - 1.2 version of hv_msi_desc
228 * @vector: IDT entry
229 * @delivery_mode: As defined in Intel's Programmer's
230 * Reference Manual, Volume 3, Chapter 8.
231 * @vector_count: Number of contiguous entries in the
232 * Interrupt Descriptor Table that are
233 * occupied by this Message-Signaled
234 * Interrupt. For "MSI", as first defined
235 * in PCI 2.2, this can be between 1 and
236 * 32. For "MSI-X," as first defined in PCI
237 * 3.0, this must be 1, as each MSI-X table
238 * entry would have its own descriptor.
239 * @processor_count: number of bits enabled in array.
240 * @processor_array: All the target virtual processors.
241 */
242 struct hv_msi_desc2 {
243 u8 vector;
244 u8 delivery_mode;
245 u16 vector_count;
246 u16 processor_count;
247 u16 processor_array[32];
248 } __packed;
249
250 /*
251 * struct hv_msi_desc3 - 1.3 version of hv_msi_desc
252 * Everything is the same as in 'hv_msi_desc2' except that the size of the
253 * 'vector' field is larger to support bigger vector values. For ex: LPI
254 * vectors on ARM.
255 */
256 struct hv_msi_desc3 {
257 u32 vector;
258 u8 delivery_mode;
259 u8 reserved;
260 u16 vector_count;
261 u16 processor_count;
262 u16 processor_array[32];
263 } __packed;
264
265 /**
266 * struct tran_int_desc
267 * @reserved: unused, padding
268 * @vector_count: same as in hv_msi_desc
269 * @data: This is the "data payload" value that is
270 * written by the device when it generates
271 * a message-signaled interrupt, either MSI
272 * or MSI-X.
273 * @address: This is the address to which the data
274 * payload is written on interrupt
275 * generation.
276 */
277 struct tran_int_desc {
278 u16 reserved;
279 u16 vector_count;
280 u32 data;
281 u64 address;
282 } __packed;
283
284 /*
285 * A generic message format for virtual PCI.
286 * Specific message formats are defined later in the file.
287 */
288
289 struct pci_message {
290 u32 type;
291 } __packed;
292
293 struct pci_child_message {
294 struct pci_message message_type;
295 union win_slot_encoding wslot;
296 } __packed;
297
298 struct pci_incoming_message {
299 struct vmpacket_descriptor hdr;
300 struct pci_message message_type;
301 } __packed;
302
303 struct pci_response {
304 struct vmpacket_descriptor hdr;
305 s32 status; /* negative values are failures */
306 } __packed;
307
308 struct pci_packet {
309 void (*completion_func)(void *context, struct pci_response *resp,
310 int resp_packet_size);
311 void *compl_ctxt;
312
313 struct pci_message message[];
314 };
315
316 /*
317 * Specific message types supporting the PCI protocol.
318 */
319
320 /*
321 * Version negotiation message. Sent from the guest to the host.
322 * The guest is free to try different versions until the host
323 * accepts the version.
324 *
325 * pci_version: The protocol version requested.
326 * is_last_attempt: If TRUE, this is the last version guest will request.
327 * reservedz: Reserved field, set to zero.
328 */
329
330 struct pci_version_request {
331 struct pci_message message_type;
332 u32 protocol_version;
333 } __packed;
334
335 /*
336 * Bus D0 Entry. This is sent from the guest to the host when the virtual
337 * bus (PCI Express port) is ready for action.
338 */
339
340 struct pci_bus_d0_entry {
341 struct pci_message message_type;
342 u32 reserved;
343 u64 mmio_base;
344 } __packed;
345
346 struct pci_bus_relations {
347 struct pci_incoming_message incoming;
348 u32 device_count;
349 struct pci_function_description func[];
350 } __packed;
351
352 struct pci_bus_relations2 {
353 struct pci_incoming_message incoming;
354 u32 device_count;
355 struct pci_function_description2 func[];
356 } __packed;
357
358 struct pci_q_res_req_response {
359 struct vmpacket_descriptor hdr;
360 s32 status; /* negative values are failures */
361 u32 probed_bar[PCI_STD_NUM_BARS];
362 } __packed;
363
364 struct pci_set_power {
365 struct pci_message message_type;
366 union win_slot_encoding wslot;
367 u32 power_state; /* In Windows terms */
368 u32 reserved;
369 } __packed;
370
371 struct pci_set_power_response {
372 struct vmpacket_descriptor hdr;
373 s32 status; /* negative values are failures */
374 union win_slot_encoding wslot;
375 u32 resultant_state; /* In Windows terms */
376 u32 reserved;
377 } __packed;
378
379 struct pci_resources_assigned {
380 struct pci_message message_type;
381 union win_slot_encoding wslot;
382 u8 memory_range[0x14][6]; /* not used here */
383 u32 msi_descriptors;
384 u32 reserved[4];
385 } __packed;
386
387 struct pci_resources_assigned2 {
388 struct pci_message message_type;
389 union win_slot_encoding wslot;
390 u8 memory_range[0x14][6]; /* not used here */
391 u32 msi_descriptor_count;
392 u8 reserved[70];
393 } __packed;
394
395 struct pci_create_interrupt {
396 struct pci_message message_type;
397 union win_slot_encoding wslot;
398 struct hv_msi_desc int_desc;
399 } __packed;
400
401 struct pci_create_int_response {
402 struct pci_response response;
403 u32 reserved;
404 struct tran_int_desc int_desc;
405 } __packed;
406
407 struct pci_create_interrupt2 {
408 struct pci_message message_type;
409 union win_slot_encoding wslot;
410 struct hv_msi_desc2 int_desc;
411 } __packed;
412
413 struct pci_create_interrupt3 {
414 struct pci_message message_type;
415 union win_slot_encoding wslot;
416 struct hv_msi_desc3 int_desc;
417 } __packed;
418
419 struct pci_delete_interrupt {
420 struct pci_message message_type;
421 union win_slot_encoding wslot;
422 struct tran_int_desc int_desc;
423 } __packed;
424
425 /*
426 * Note: the VM must pass a valid block id, wslot and bytes_requested.
427 */
428 struct pci_read_block {
429 struct pci_message message_type;
430 u32 block_id;
431 union win_slot_encoding wslot;
432 u32 bytes_requested;
433 } __packed;
434
435 struct pci_read_block_response {
436 struct vmpacket_descriptor hdr;
437 u32 status;
438 u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
439 } __packed;
440
441 /*
442 * Note: the VM must pass a valid block id, wslot and byte_count.
443 */
444 struct pci_write_block {
445 struct pci_message message_type;
446 u32 block_id;
447 union win_slot_encoding wslot;
448 u32 byte_count;
449 u8 bytes[HV_CONFIG_BLOCK_SIZE_MAX];
450 } __packed;
451
452 struct pci_dev_inval_block {
453 struct pci_incoming_message incoming;
454 union win_slot_encoding wslot;
455 u64 block_mask;
456 } __packed;
457
458 struct pci_dev_incoming {
459 struct pci_incoming_message incoming;
460 union win_slot_encoding wslot;
461 } __packed;
462
463 struct pci_eject_response {
464 struct pci_message message_type;
465 union win_slot_encoding wslot;
466 u32 status;
467 } __packed;
468
469 static int pci_ring_size = VMBUS_RING_SIZE(SZ_16K);
470
471 /*
472 * Driver specific state.
473 */
474
475 enum hv_pcibus_state {
476 hv_pcibus_init = 0,
477 hv_pcibus_probed,
478 hv_pcibus_installed,
479 hv_pcibus_removing,
480 hv_pcibus_maximum
481 };
482
483 struct hv_pcibus_device {
484 #ifdef CONFIG_X86
485 struct pci_sysdata sysdata;
486 #elif defined(CONFIG_ARM64)
487 struct pci_config_window sysdata;
488 #endif
489 struct pci_host_bridge *bridge;
490 struct fwnode_handle *fwnode;
491 /* Protocol version negotiated with the host */
492 enum pci_protocol_version_t protocol_version;
493
494 struct mutex state_lock;
495 enum hv_pcibus_state state;
496
497 struct hv_device *hdev;
498 resource_size_t low_mmio_space;
499 resource_size_t high_mmio_space;
500 struct resource *mem_config;
501 struct resource *low_mmio_res;
502 struct resource *high_mmio_res;
503 struct completion *survey_event;
504 struct pci_bus *pci_bus;
505 spinlock_t config_lock; /* Avoid two threads writing index page */
506 spinlock_t device_list_lock; /* Protect lists below */
507 void __iomem *cfg_addr;
508
509 struct list_head children;
510 struct list_head dr_list;
511
512 struct msi_domain_info msi_info;
513 struct irq_domain *irq_domain;
514
515 struct workqueue_struct *wq;
516
517 /* Highest slot of child device with resources allocated */
518 int wslot_res_allocated;
519 bool use_calls; /* Use hypercalls to access mmio cfg space */
520 };
521
522 /*
523 * Tracks "Device Relations" messages from the host, which must be both
524 * processed in order and deferred so that they don't run in the context
525 * of the incoming packet callback.
526 */
527 struct hv_dr_work {
528 struct work_struct wrk;
529 struct hv_pcibus_device *bus;
530 };
531
532 struct hv_pcidev_description {
533 u16 v_id; /* vendor ID */
534 u16 d_id; /* device ID */
535 u8 rev;
536 u8 prog_intf;
537 u8 subclass;
538 u8 base_class;
539 u32 subsystem_id;
540 union win_slot_encoding win_slot;
541 u32 ser; /* serial number */
542 u32 flags;
543 u16 virtual_numa_node;
544 };
545
546 struct hv_dr_state {
547 struct list_head list_entry;
548 u32 device_count;
549 struct hv_pcidev_description func[];
550 };
551
552 struct hv_pci_dev {
553 /* List protected by pci_rescan_remove_lock */
554 struct list_head list_entry;
555 refcount_t refs;
556 struct pci_slot *pci_slot;
557 struct hv_pcidev_description desc;
558 bool reported_missing;
559 struct hv_pcibus_device *hbus;
560 struct work_struct wrk;
561
562 void (*block_invalidate)(void *context, u64 block_mask);
563 void *invalidate_context;
564
565 /*
566 * What would be observed if one wrote 0xFFFFFFFF to a BAR and then
567 * read it back, for each of the BAR offsets within config space.
568 */
569 u32 probed_bar[PCI_STD_NUM_BARS];
570 };
571
572 struct hv_pci_compl {
573 struct completion host_event;
574 s32 completion_status;
575 };
576
577 static void hv_pci_onchannelcallback(void *context);
578
579 #ifdef CONFIG_X86
580 #define DELIVERY_MODE APIC_DELIVERY_MODE_FIXED
581 #define FLOW_HANDLER handle_edge_irq
582 #define FLOW_NAME "edge"
583
hv_pci_irqchip_init(void)584 static int hv_pci_irqchip_init(void)
585 {
586 return 0;
587 }
588
hv_pci_get_root_domain(void)589 static struct irq_domain *hv_pci_get_root_domain(void)
590 {
591 return x86_vector_domain;
592 }
593
hv_msi_get_int_vector(struct irq_data * data)594 static unsigned int hv_msi_get_int_vector(struct irq_data *data)
595 {
596 struct irq_cfg *cfg = irqd_cfg(data);
597
598 return cfg->vector;
599 }
600
601 #define hv_msi_prepare pci_msi_prepare
602
603 /**
604 * hv_arch_irq_unmask() - "Unmask" the IRQ by setting its current
605 * affinity.
606 * @data: Describes the IRQ
607 *
608 * Build new a destination for the MSI and make a hypercall to
609 * update the Interrupt Redirection Table. "Device Logical ID"
610 * is built out of this PCI bus's instance GUID and the function
611 * number of the device.
612 */
hv_arch_irq_unmask(struct irq_data * data)613 static void hv_arch_irq_unmask(struct irq_data *data)
614 {
615 struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
616 struct hv_retarget_device_interrupt *params;
617 struct tran_int_desc *int_desc;
618 struct hv_pcibus_device *hbus;
619 const struct cpumask *dest;
620 cpumask_var_t tmp;
621 struct pci_bus *pbus;
622 struct pci_dev *pdev;
623 unsigned long flags;
624 u32 var_size = 0;
625 int cpu, nr_bank;
626 u64 res;
627
628 dest = irq_data_get_effective_affinity_mask(data);
629 pdev = msi_desc_to_pci_dev(msi_desc);
630 pbus = pdev->bus;
631 hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
632 int_desc = data->chip_data;
633 if (!int_desc) {
634 dev_warn(&hbus->hdev->device, "%s() can not unmask irq %u\n",
635 __func__, data->irq);
636 return;
637 }
638
639 local_irq_save(flags);
640
641 params = *this_cpu_ptr(hyperv_pcpu_input_arg);
642 memset(params, 0, sizeof(*params));
643 params->partition_id = HV_PARTITION_ID_SELF;
644 params->int_entry.source = HV_INTERRUPT_SOURCE_MSI;
645 params->int_entry.msi_entry.address.as_uint32 = int_desc->address & 0xffffffff;
646 params->int_entry.msi_entry.data.as_uint32 = int_desc->data;
647 params->device_id = (hbus->hdev->dev_instance.b[5] << 24) |
648 (hbus->hdev->dev_instance.b[4] << 16) |
649 (hbus->hdev->dev_instance.b[7] << 8) |
650 (hbus->hdev->dev_instance.b[6] & 0xf8) |
651 PCI_FUNC(pdev->devfn);
652 params->int_target.vector = hv_msi_get_int_vector(data);
653
654 /*
655 * Honoring apic->delivery_mode set to APIC_DELIVERY_MODE_FIXED by
656 * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a
657 * spurious interrupt storm. Not doing so does not seem to have a
658 * negative effect (yet?).
659 */
660
661 if (hbus->protocol_version >= PCI_PROTOCOL_VERSION_1_2) {
662 /*
663 * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the
664 * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides
665 * with >64 VP support.
666 * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED
667 * is not sufficient for this hypercall.
668 */
669 params->int_target.flags |=
670 HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET;
671
672 if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) {
673 res = 1;
674 goto out;
675 }
676
677 cpumask_and(tmp, dest, cpu_online_mask);
678 nr_bank = cpumask_to_vpset(¶ms->int_target.vp_set, tmp);
679 free_cpumask_var(tmp);
680
681 if (nr_bank <= 0) {
682 res = 1;
683 goto out;
684 }
685
686 /*
687 * var-sized hypercall, var-size starts after vp_mask (thus
688 * vp_set.format does not count, but vp_set.valid_bank_mask
689 * does).
690 */
691 var_size = 1 + nr_bank;
692 } else {
693 for_each_cpu_and(cpu, dest, cpu_online_mask) {
694 params->int_target.vp_mask |=
695 (1ULL << hv_cpu_number_to_vp_number(cpu));
696 }
697 }
698
699 res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17),
700 params, NULL);
701
702 out:
703 local_irq_restore(flags);
704
705 /*
706 * During hibernation, when a CPU is offlined, the kernel tries
707 * to move the interrupt to the remaining CPUs that haven't
708 * been offlined yet. In this case, the below hv_do_hypercall()
709 * always fails since the vmbus channel has been closed:
710 * refer to cpu_disable_common() -> fixup_irqs() ->
711 * irq_migrate_all_off_this_cpu() -> migrate_one_irq().
712 *
713 * Suppress the error message for hibernation because the failure
714 * during hibernation does not matter (at this time all the devices
715 * have been frozen). Note: the correct affinity info is still updated
716 * into the irqdata data structure in migrate_one_irq() ->
717 * irq_do_set_affinity(), so later when the VM resumes,
718 * hv_pci_restore_msi_state() is able to correctly restore the
719 * interrupt with the correct affinity.
720 */
721 if (!hv_result_success(res) && hbus->state != hv_pcibus_removing)
722 dev_err(&hbus->hdev->device,
723 "%s() failed: %#llx", __func__, res);
724 }
725 #elif defined(CONFIG_ARM64)
726 /*
727 * SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit
728 * of room at the start to allow for SPIs to be specified through ACPI and
729 * starting with a power of two to satisfy power of 2 multi-MSI requirement.
730 */
731 #define HV_PCI_MSI_SPI_START 64
732 #define HV_PCI_MSI_SPI_NR (1020 - HV_PCI_MSI_SPI_START)
733 #define DELIVERY_MODE 0
734 #define FLOW_HANDLER NULL
735 #define FLOW_NAME NULL
736 #define hv_msi_prepare NULL
737
738 struct hv_pci_chip_data {
739 DECLARE_BITMAP(spi_map, HV_PCI_MSI_SPI_NR);
740 struct mutex map_lock;
741 };
742
743 /* Hyper-V vPCI MSI GIC IRQ domain */
744 static struct irq_domain *hv_msi_gic_irq_domain;
745
746 /* Hyper-V PCI MSI IRQ chip */
747 static struct irq_chip hv_arm64_msi_irq_chip = {
748 .name = "MSI",
749 .irq_set_affinity = irq_chip_set_affinity_parent,
750 .irq_eoi = irq_chip_eoi_parent,
751 .irq_mask = irq_chip_mask_parent,
752 .irq_unmask = irq_chip_unmask_parent
753 };
754
hv_msi_get_int_vector(struct irq_data * irqd)755 static unsigned int hv_msi_get_int_vector(struct irq_data *irqd)
756 {
757 return irqd->parent_data->hwirq;
758 }
759
760 /*
761 * @nr_bm_irqs: Indicates the number of IRQs that were allocated from
762 * the bitmap.
763 * @nr_dom_irqs: Indicates the number of IRQs that were allocated from
764 * the parent domain.
765 */
hv_pci_vec_irq_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_bm_irqs,unsigned int nr_dom_irqs)766 static void hv_pci_vec_irq_free(struct irq_domain *domain,
767 unsigned int virq,
768 unsigned int nr_bm_irqs,
769 unsigned int nr_dom_irqs)
770 {
771 struct hv_pci_chip_data *chip_data = domain->host_data;
772 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
773 int first = d->hwirq - HV_PCI_MSI_SPI_START;
774 int i;
775
776 mutex_lock(&chip_data->map_lock);
777 bitmap_release_region(chip_data->spi_map,
778 first,
779 get_count_order(nr_bm_irqs));
780 mutex_unlock(&chip_data->map_lock);
781 for (i = 0; i < nr_dom_irqs; i++) {
782 if (i)
783 d = irq_domain_get_irq_data(domain, virq + i);
784 irq_domain_reset_irq_data(d);
785 }
786
787 irq_domain_free_irqs_parent(domain, virq, nr_dom_irqs);
788 }
789
hv_pci_vec_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)790 static void hv_pci_vec_irq_domain_free(struct irq_domain *domain,
791 unsigned int virq,
792 unsigned int nr_irqs)
793 {
794 hv_pci_vec_irq_free(domain, virq, nr_irqs, nr_irqs);
795 }
796
hv_pci_vec_alloc_device_irq(struct irq_domain * domain,unsigned int nr_irqs,irq_hw_number_t * hwirq)797 static int hv_pci_vec_alloc_device_irq(struct irq_domain *domain,
798 unsigned int nr_irqs,
799 irq_hw_number_t *hwirq)
800 {
801 struct hv_pci_chip_data *chip_data = domain->host_data;
802 int index;
803
804 /* Find and allocate region from the SPI bitmap */
805 mutex_lock(&chip_data->map_lock);
806 index = bitmap_find_free_region(chip_data->spi_map,
807 HV_PCI_MSI_SPI_NR,
808 get_count_order(nr_irqs));
809 mutex_unlock(&chip_data->map_lock);
810 if (index < 0)
811 return -ENOSPC;
812
813 *hwirq = index + HV_PCI_MSI_SPI_START;
814
815 return 0;
816 }
817
hv_pci_vec_irq_gic_domain_alloc(struct irq_domain * domain,unsigned int virq,irq_hw_number_t hwirq)818 static int hv_pci_vec_irq_gic_domain_alloc(struct irq_domain *domain,
819 unsigned int virq,
820 irq_hw_number_t hwirq)
821 {
822 struct irq_fwspec fwspec;
823 struct irq_data *d;
824 int ret;
825
826 fwspec.fwnode = domain->parent->fwnode;
827 fwspec.param_count = 2;
828 fwspec.param[0] = hwirq;
829 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
830
831 ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
832 if (ret)
833 return ret;
834
835 /*
836 * Since the interrupt specifier is not coming from ACPI or DT, the
837 * trigger type will need to be set explicitly. Otherwise, it will be
838 * set to whatever is in the GIC configuration.
839 */
840 d = irq_domain_get_irq_data(domain->parent, virq);
841
842 return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
843 }
844
hv_pci_vec_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)845 static int hv_pci_vec_irq_domain_alloc(struct irq_domain *domain,
846 unsigned int virq, unsigned int nr_irqs,
847 void *args)
848 {
849 irq_hw_number_t hwirq;
850 unsigned int i;
851 int ret;
852
853 ret = hv_pci_vec_alloc_device_irq(domain, nr_irqs, &hwirq);
854 if (ret)
855 return ret;
856
857 for (i = 0; i < nr_irqs; i++) {
858 ret = hv_pci_vec_irq_gic_domain_alloc(domain, virq + i,
859 hwirq + i);
860 if (ret) {
861 hv_pci_vec_irq_free(domain, virq, nr_irqs, i);
862 return ret;
863 }
864
865 irq_domain_set_hwirq_and_chip(domain, virq + i,
866 hwirq + i,
867 &hv_arm64_msi_irq_chip,
868 domain->host_data);
869 pr_debug("pID:%d vID:%u\n", (int)(hwirq + i), virq + i);
870 }
871
872 return 0;
873 }
874
875 /*
876 * Pick the first cpu as the irq affinity that can be temporarily used for
877 * composing MSI from the hypervisor. GIC will eventually set the right
878 * affinity for the irq and the 'unmask' will retarget the interrupt to that
879 * cpu.
880 */
hv_pci_vec_irq_domain_activate(struct irq_domain * domain,struct irq_data * irqd,bool reserve)881 static int hv_pci_vec_irq_domain_activate(struct irq_domain *domain,
882 struct irq_data *irqd, bool reserve)
883 {
884 int cpu = cpumask_first(cpu_present_mask);
885
886 irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
887
888 return 0;
889 }
890
891 static const struct irq_domain_ops hv_pci_domain_ops = {
892 .alloc = hv_pci_vec_irq_domain_alloc,
893 .free = hv_pci_vec_irq_domain_free,
894 .activate = hv_pci_vec_irq_domain_activate,
895 };
896
hv_pci_irqchip_init(void)897 static int hv_pci_irqchip_init(void)
898 {
899 static struct hv_pci_chip_data *chip_data;
900 struct fwnode_handle *fn = NULL;
901 int ret = -ENOMEM;
902
903 chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
904 if (!chip_data)
905 return ret;
906
907 mutex_init(&chip_data->map_lock);
908 fn = irq_domain_alloc_named_fwnode("hv_vpci_arm64");
909 if (!fn)
910 goto free_chip;
911
912 /*
913 * IRQ domain once enabled, should not be removed since there is no
914 * way to ensure that all the corresponding devices are also gone and
915 * no interrupts will be generated.
916 */
917 hv_msi_gic_irq_domain = acpi_irq_create_hierarchy(0, HV_PCI_MSI_SPI_NR,
918 fn, &hv_pci_domain_ops,
919 chip_data);
920
921 if (!hv_msi_gic_irq_domain) {
922 pr_err("Failed to create Hyper-V arm64 vPCI MSI IRQ domain\n");
923 goto free_chip;
924 }
925
926 return 0;
927
928 free_chip:
929 kfree(chip_data);
930 if (fn)
931 irq_domain_free_fwnode(fn);
932
933 return ret;
934 }
935
hv_pci_get_root_domain(void)936 static struct irq_domain *hv_pci_get_root_domain(void)
937 {
938 return hv_msi_gic_irq_domain;
939 }
940
941 /*
942 * SPIs are used for interrupts of PCI devices and SPIs is managed via GICD
943 * registers which Hyper-V already supports, so no hypercall needed.
944 */
hv_arch_irq_unmask(struct irq_data * data)945 static void hv_arch_irq_unmask(struct irq_data *data) { }
946 #endif /* CONFIG_ARM64 */
947
948 /**
949 * hv_pci_generic_compl() - Invoked for a completion packet
950 * @context: Set up by the sender of the packet.
951 * @resp: The response packet
952 * @resp_packet_size: Size in bytes of the packet
953 *
954 * This function is used to trigger an event and report status
955 * for any message for which the completion packet contains a
956 * status and nothing else.
957 */
hv_pci_generic_compl(void * context,struct pci_response * resp,int resp_packet_size)958 static void hv_pci_generic_compl(void *context, struct pci_response *resp,
959 int resp_packet_size)
960 {
961 struct hv_pci_compl *comp_pkt = context;
962
963 comp_pkt->completion_status = resp->status;
964 complete(&comp_pkt->host_event);
965 }
966
967 static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
968 u32 wslot);
969
get_pcichild(struct hv_pci_dev * hpdev)970 static void get_pcichild(struct hv_pci_dev *hpdev)
971 {
972 refcount_inc(&hpdev->refs);
973 }
974
put_pcichild(struct hv_pci_dev * hpdev)975 static void put_pcichild(struct hv_pci_dev *hpdev)
976 {
977 if (refcount_dec_and_test(&hpdev->refs))
978 kfree(hpdev);
979 }
980
981 /*
982 * There is no good way to get notified from vmbus_onoffer_rescind(),
983 * so let's use polling here, since this is not a hot path.
984 */
wait_for_response(struct hv_device * hdev,struct completion * comp)985 static int wait_for_response(struct hv_device *hdev,
986 struct completion *comp)
987 {
988 while (true) {
989 if (hdev->channel->rescind) {
990 dev_warn_once(&hdev->device, "The device is gone.\n");
991 return -ENODEV;
992 }
993
994 if (wait_for_completion_timeout(comp, HZ / 10))
995 break;
996 }
997
998 return 0;
999 }
1000
1001 /**
1002 * devfn_to_wslot() - Convert from Linux PCI slot to Windows
1003 * @devfn: The Linux representation of PCI slot
1004 *
1005 * Windows uses a slightly different representation of PCI slot.
1006 *
1007 * Return: The Windows representation
1008 */
devfn_to_wslot(int devfn)1009 static u32 devfn_to_wslot(int devfn)
1010 {
1011 union win_slot_encoding wslot;
1012
1013 wslot.slot = 0;
1014 wslot.bits.dev = PCI_SLOT(devfn);
1015 wslot.bits.func = PCI_FUNC(devfn);
1016
1017 return wslot.slot;
1018 }
1019
1020 /**
1021 * wslot_to_devfn() - Convert from Windows PCI slot to Linux
1022 * @wslot: The Windows representation of PCI slot
1023 *
1024 * Windows uses a slightly different representation of PCI slot.
1025 *
1026 * Return: The Linux representation
1027 */
wslot_to_devfn(u32 wslot)1028 static int wslot_to_devfn(u32 wslot)
1029 {
1030 union win_slot_encoding slot_no;
1031
1032 slot_no.slot = wslot;
1033 return PCI_DEVFN(slot_no.bits.dev, slot_no.bits.func);
1034 }
1035
hv_pci_read_mmio(struct device * dev,phys_addr_t gpa,int size,u32 * val)1036 static void hv_pci_read_mmio(struct device *dev, phys_addr_t gpa, int size, u32 *val)
1037 {
1038 struct hv_mmio_read_input *in;
1039 struct hv_mmio_read_output *out;
1040 u64 ret;
1041
1042 /*
1043 * Must be called with interrupts disabled so it is safe
1044 * to use the per-cpu input argument page. Use it for
1045 * both input and output.
1046 */
1047 in = *this_cpu_ptr(hyperv_pcpu_input_arg);
1048 out = *this_cpu_ptr(hyperv_pcpu_input_arg) + sizeof(*in);
1049 in->gpa = gpa;
1050 in->size = size;
1051
1052 ret = hv_do_hypercall(HVCALL_MMIO_READ, in, out);
1053 if (hv_result_success(ret)) {
1054 switch (size) {
1055 case 1:
1056 *val = *(u8 *)(out->data);
1057 break;
1058 case 2:
1059 *val = *(u16 *)(out->data);
1060 break;
1061 default:
1062 *val = *(u32 *)(out->data);
1063 break;
1064 }
1065 } else
1066 dev_err(dev, "MMIO read hypercall error %llx addr %llx size %d\n",
1067 ret, gpa, size);
1068 }
1069
hv_pci_write_mmio(struct device * dev,phys_addr_t gpa,int size,u32 val)1070 static void hv_pci_write_mmio(struct device *dev, phys_addr_t gpa, int size, u32 val)
1071 {
1072 struct hv_mmio_write_input *in;
1073 u64 ret;
1074
1075 /*
1076 * Must be called with interrupts disabled so it is safe
1077 * to use the per-cpu input argument memory.
1078 */
1079 in = *this_cpu_ptr(hyperv_pcpu_input_arg);
1080 in->gpa = gpa;
1081 in->size = size;
1082 switch (size) {
1083 case 1:
1084 *(u8 *)(in->data) = val;
1085 break;
1086 case 2:
1087 *(u16 *)(in->data) = val;
1088 break;
1089 default:
1090 *(u32 *)(in->data) = val;
1091 break;
1092 }
1093
1094 ret = hv_do_hypercall(HVCALL_MMIO_WRITE, in, NULL);
1095 if (!hv_result_success(ret))
1096 dev_err(dev, "MMIO write hypercall error %llx addr %llx size %d\n",
1097 ret, gpa, size);
1098 }
1099
1100 /*
1101 * PCI Configuration Space for these root PCI buses is implemented as a pair
1102 * of pages in memory-mapped I/O space. Writing to the first page chooses
1103 * the PCI function being written or read. Once the first page has been
1104 * written to, the following page maps in the entire configuration space of
1105 * the function.
1106 */
1107
1108 /**
1109 * _hv_pcifront_read_config() - Internal PCI config read
1110 * @hpdev: The PCI driver's representation of the device
1111 * @where: Offset within config space
1112 * @size: Size of the transfer
1113 * @val: Pointer to the buffer receiving the data
1114 */
_hv_pcifront_read_config(struct hv_pci_dev * hpdev,int where,int size,u32 * val)1115 static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
1116 int size, u32 *val)
1117 {
1118 struct hv_pcibus_device *hbus = hpdev->hbus;
1119 struct device *dev = &hbus->hdev->device;
1120 int offset = where + CFG_PAGE_OFFSET;
1121 unsigned long flags;
1122
1123 /*
1124 * If the attempt is to read the IDs or the ROM BAR, simulate that.
1125 */
1126 if (where + size <= PCI_COMMAND) {
1127 memcpy(val, ((u8 *)&hpdev->desc.v_id) + where, size);
1128 } else if (where >= PCI_CLASS_REVISION && where + size <=
1129 PCI_CACHE_LINE_SIZE) {
1130 memcpy(val, ((u8 *)&hpdev->desc.rev) + where -
1131 PCI_CLASS_REVISION, size);
1132 } else if (where >= PCI_SUBSYSTEM_VENDOR_ID && where + size <=
1133 PCI_ROM_ADDRESS) {
1134 memcpy(val, (u8 *)&hpdev->desc.subsystem_id + where -
1135 PCI_SUBSYSTEM_VENDOR_ID, size);
1136 } else if (where >= PCI_ROM_ADDRESS && where + size <=
1137 PCI_CAPABILITY_LIST) {
1138 /* ROM BARs are unimplemented */
1139 *val = 0;
1140 } else if ((where >= PCI_INTERRUPT_LINE && where + size <= PCI_INTERRUPT_PIN) ||
1141 (where >= PCI_INTERRUPT_PIN && where + size <= PCI_MIN_GNT)) {
1142 /*
1143 * Interrupt Line and Interrupt PIN are hard-wired to zero
1144 * because this front-end only supports message-signaled
1145 * interrupts.
1146 */
1147 *val = 0;
1148 } else if (where + size <= CFG_PAGE_SIZE) {
1149
1150 spin_lock_irqsave(&hbus->config_lock, flags);
1151 if (hbus->use_calls) {
1152 phys_addr_t addr = hbus->mem_config->start + offset;
1153
1154 hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
1155 hpdev->desc.win_slot.slot);
1156 hv_pci_read_mmio(dev, addr, size, val);
1157 } else {
1158 void __iomem *addr = hbus->cfg_addr + offset;
1159
1160 /* Choose the function to be read. (See comment above) */
1161 writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
1162 /* Make sure the function was chosen before reading. */
1163 mb();
1164 /* Read from that function's config space. */
1165 switch (size) {
1166 case 1:
1167 *val = readb(addr);
1168 break;
1169 case 2:
1170 *val = readw(addr);
1171 break;
1172 default:
1173 *val = readl(addr);
1174 break;
1175 }
1176 /*
1177 * Make sure the read was done before we release the
1178 * spinlock allowing consecutive reads/writes.
1179 */
1180 mb();
1181 }
1182 spin_unlock_irqrestore(&hbus->config_lock, flags);
1183 } else {
1184 dev_err(dev, "Attempt to read beyond a function's config space.\n");
1185 }
1186 }
1187
hv_pcifront_get_vendor_id(struct hv_pci_dev * hpdev)1188 static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev)
1189 {
1190 struct hv_pcibus_device *hbus = hpdev->hbus;
1191 struct device *dev = &hbus->hdev->device;
1192 u32 val;
1193 u16 ret;
1194 unsigned long flags;
1195
1196 spin_lock_irqsave(&hbus->config_lock, flags);
1197
1198 if (hbus->use_calls) {
1199 phys_addr_t addr = hbus->mem_config->start +
1200 CFG_PAGE_OFFSET + PCI_VENDOR_ID;
1201
1202 hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
1203 hpdev->desc.win_slot.slot);
1204 hv_pci_read_mmio(dev, addr, 2, &val);
1205 ret = val; /* Truncates to 16 bits */
1206 } else {
1207 void __iomem *addr = hbus->cfg_addr + CFG_PAGE_OFFSET +
1208 PCI_VENDOR_ID;
1209 /* Choose the function to be read. (See comment above) */
1210 writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
1211 /* Make sure the function was chosen before we start reading. */
1212 mb();
1213 /* Read from that function's config space. */
1214 ret = readw(addr);
1215 /*
1216 * mb() is not required here, because the
1217 * spin_unlock_irqrestore() is a barrier.
1218 */
1219 }
1220
1221 spin_unlock_irqrestore(&hbus->config_lock, flags);
1222
1223 return ret;
1224 }
1225
1226 /**
1227 * _hv_pcifront_write_config() - Internal PCI config write
1228 * @hpdev: The PCI driver's representation of the device
1229 * @where: Offset within config space
1230 * @size: Size of the transfer
1231 * @val: The data being transferred
1232 */
_hv_pcifront_write_config(struct hv_pci_dev * hpdev,int where,int size,u32 val)1233 static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where,
1234 int size, u32 val)
1235 {
1236 struct hv_pcibus_device *hbus = hpdev->hbus;
1237 struct device *dev = &hbus->hdev->device;
1238 int offset = where + CFG_PAGE_OFFSET;
1239 unsigned long flags;
1240
1241 if (where >= PCI_SUBSYSTEM_VENDOR_ID &&
1242 where + size <= PCI_CAPABILITY_LIST) {
1243 /* SSIDs and ROM BARs are read-only */
1244 } else if (where >= PCI_COMMAND && where + size <= CFG_PAGE_SIZE) {
1245 spin_lock_irqsave(&hbus->config_lock, flags);
1246
1247 if (hbus->use_calls) {
1248 phys_addr_t addr = hbus->mem_config->start + offset;
1249
1250 hv_pci_write_mmio(dev, hbus->mem_config->start, 4,
1251 hpdev->desc.win_slot.slot);
1252 hv_pci_write_mmio(dev, addr, size, val);
1253 } else {
1254 void __iomem *addr = hbus->cfg_addr + offset;
1255
1256 /* Choose the function to write. (See comment above) */
1257 writel(hpdev->desc.win_slot.slot, hbus->cfg_addr);
1258 /* Make sure the function was chosen before writing. */
1259 wmb();
1260 /* Write to that function's config space. */
1261 switch (size) {
1262 case 1:
1263 writeb(val, addr);
1264 break;
1265 case 2:
1266 writew(val, addr);
1267 break;
1268 default:
1269 writel(val, addr);
1270 break;
1271 }
1272 /*
1273 * Make sure the write was done before we release the
1274 * spinlock allowing consecutive reads/writes.
1275 */
1276 mb();
1277 }
1278 spin_unlock_irqrestore(&hbus->config_lock, flags);
1279 } else {
1280 dev_err(dev, "Attempt to write beyond a function's config space.\n");
1281 }
1282 }
1283
1284 /**
1285 * hv_pcifront_read_config() - Read configuration space
1286 * @bus: PCI Bus structure
1287 * @devfn: Device/function
1288 * @where: Offset from base
1289 * @size: Byte/word/dword
1290 * @val: Value to be read
1291 *
1292 * Return: PCIBIOS_SUCCESSFUL on success
1293 * PCIBIOS_DEVICE_NOT_FOUND on failure
1294 */
hv_pcifront_read_config(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)1295 static int hv_pcifront_read_config(struct pci_bus *bus, unsigned int devfn,
1296 int where, int size, u32 *val)
1297 {
1298 struct hv_pcibus_device *hbus =
1299 container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
1300 struct hv_pci_dev *hpdev;
1301
1302 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
1303 if (!hpdev)
1304 return PCIBIOS_DEVICE_NOT_FOUND;
1305
1306 _hv_pcifront_read_config(hpdev, where, size, val);
1307
1308 put_pcichild(hpdev);
1309 return PCIBIOS_SUCCESSFUL;
1310 }
1311
1312 /**
1313 * hv_pcifront_write_config() - Write configuration space
1314 * @bus: PCI Bus structure
1315 * @devfn: Device/function
1316 * @where: Offset from base
1317 * @size: Byte/word/dword
1318 * @val: Value to be written to device
1319 *
1320 * Return: PCIBIOS_SUCCESSFUL on success
1321 * PCIBIOS_DEVICE_NOT_FOUND on failure
1322 */
hv_pcifront_write_config(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)1323 static int hv_pcifront_write_config(struct pci_bus *bus, unsigned int devfn,
1324 int where, int size, u32 val)
1325 {
1326 struct hv_pcibus_device *hbus =
1327 container_of(bus->sysdata, struct hv_pcibus_device, sysdata);
1328 struct hv_pci_dev *hpdev;
1329
1330 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn));
1331 if (!hpdev)
1332 return PCIBIOS_DEVICE_NOT_FOUND;
1333
1334 _hv_pcifront_write_config(hpdev, where, size, val);
1335
1336 put_pcichild(hpdev);
1337 return PCIBIOS_SUCCESSFUL;
1338 }
1339
1340 /* PCIe operations */
1341 static struct pci_ops hv_pcifront_ops = {
1342 .read = hv_pcifront_read_config,
1343 .write = hv_pcifront_write_config,
1344 };
1345
1346 /*
1347 * Paravirtual backchannel
1348 *
1349 * Hyper-V SR-IOV provides a backchannel mechanism in software for
1350 * communication between a VF driver and a PF driver. These
1351 * "configuration blocks" are similar in concept to PCI configuration space,
1352 * but instead of doing reads and writes in 32-bit chunks through a very slow
1353 * path, packets of up to 128 bytes can be sent or received asynchronously.
1354 *
1355 * Nearly every SR-IOV device contains just such a communications channel in
1356 * hardware, so using this one in software is usually optional. Using the
1357 * software channel, however, allows driver implementers to leverage software
1358 * tools that fuzz the communications channel looking for vulnerabilities.
1359 *
1360 * The usage model for these packets puts the responsibility for reading or
1361 * writing on the VF driver. The VF driver sends a read or a write packet,
1362 * indicating which "block" is being referred to by number.
1363 *
1364 * If the PF driver wishes to initiate communication, it can "invalidate" one or
1365 * more of the first 64 blocks. This invalidation is delivered via a callback
1366 * supplied by the VF driver by this driver.
1367 *
1368 * No protocol is implied, except that supplied by the PF and VF drivers.
1369 */
1370
1371 struct hv_read_config_compl {
1372 struct hv_pci_compl comp_pkt;
1373 void *buf;
1374 unsigned int len;
1375 unsigned int bytes_returned;
1376 };
1377
1378 /**
1379 * hv_pci_read_config_compl() - Invoked when a response packet
1380 * for a read config block operation arrives.
1381 * @context: Identifies the read config operation
1382 * @resp: The response packet itself
1383 * @resp_packet_size: Size in bytes of the response packet
1384 */
hv_pci_read_config_compl(void * context,struct pci_response * resp,int resp_packet_size)1385 static void hv_pci_read_config_compl(void *context, struct pci_response *resp,
1386 int resp_packet_size)
1387 {
1388 struct hv_read_config_compl *comp = context;
1389 struct pci_read_block_response *read_resp =
1390 (struct pci_read_block_response *)resp;
1391 unsigned int data_len, hdr_len;
1392
1393 hdr_len = offsetof(struct pci_read_block_response, bytes);
1394 if (resp_packet_size < hdr_len) {
1395 comp->comp_pkt.completion_status = -1;
1396 goto out;
1397 }
1398
1399 data_len = resp_packet_size - hdr_len;
1400 if (data_len > 0 && read_resp->status == 0) {
1401 comp->bytes_returned = min(comp->len, data_len);
1402 memcpy(comp->buf, read_resp->bytes, comp->bytes_returned);
1403 } else {
1404 comp->bytes_returned = 0;
1405 }
1406
1407 comp->comp_pkt.completion_status = read_resp->status;
1408 out:
1409 complete(&comp->comp_pkt.host_event);
1410 }
1411
1412 /**
1413 * hv_read_config_block() - Sends a read config block request to
1414 * the back-end driver running in the Hyper-V parent partition.
1415 * @pdev: The PCI driver's representation for this device.
1416 * @buf: Buffer into which the config block will be copied.
1417 * @len: Size in bytes of buf.
1418 * @block_id: Identifies the config block which has been requested.
1419 * @bytes_returned: Size which came back from the back-end driver.
1420 *
1421 * Return: 0 on success, -errno on failure
1422 */
hv_read_config_block(struct pci_dev * pdev,void * buf,unsigned int len,unsigned int block_id,unsigned int * bytes_returned)1423 static int hv_read_config_block(struct pci_dev *pdev, void *buf,
1424 unsigned int len, unsigned int block_id,
1425 unsigned int *bytes_returned)
1426 {
1427 struct hv_pcibus_device *hbus =
1428 container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1429 sysdata);
1430 struct {
1431 struct pci_packet pkt;
1432 char buf[sizeof(struct pci_read_block)];
1433 } pkt;
1434 struct hv_read_config_compl comp_pkt;
1435 struct pci_read_block *read_blk;
1436 int ret;
1437
1438 if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
1439 return -EINVAL;
1440
1441 init_completion(&comp_pkt.comp_pkt.host_event);
1442 comp_pkt.buf = buf;
1443 comp_pkt.len = len;
1444
1445 memset(&pkt, 0, sizeof(pkt));
1446 pkt.pkt.completion_func = hv_pci_read_config_compl;
1447 pkt.pkt.compl_ctxt = &comp_pkt;
1448 read_blk = (struct pci_read_block *)&pkt.pkt.message;
1449 read_blk->message_type.type = PCI_READ_BLOCK;
1450 read_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
1451 read_blk->block_id = block_id;
1452 read_blk->bytes_requested = len;
1453
1454 ret = vmbus_sendpacket(hbus->hdev->channel, read_blk,
1455 sizeof(*read_blk), (unsigned long)&pkt.pkt,
1456 VM_PKT_DATA_INBAND,
1457 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1458 if (ret)
1459 return ret;
1460
1461 ret = wait_for_response(hbus->hdev, &comp_pkt.comp_pkt.host_event);
1462 if (ret)
1463 return ret;
1464
1465 if (comp_pkt.comp_pkt.completion_status != 0 ||
1466 comp_pkt.bytes_returned == 0) {
1467 dev_err(&hbus->hdev->device,
1468 "Read Config Block failed: 0x%x, bytes_returned=%d\n",
1469 comp_pkt.comp_pkt.completion_status,
1470 comp_pkt.bytes_returned);
1471 return -EIO;
1472 }
1473
1474 *bytes_returned = comp_pkt.bytes_returned;
1475 return 0;
1476 }
1477
1478 /**
1479 * hv_pci_write_config_compl() - Invoked when a response packet for a write
1480 * config block operation arrives.
1481 * @context: Identifies the write config operation
1482 * @resp: The response packet itself
1483 * @resp_packet_size: Size in bytes of the response packet
1484 */
hv_pci_write_config_compl(void * context,struct pci_response * resp,int resp_packet_size)1485 static void hv_pci_write_config_compl(void *context, struct pci_response *resp,
1486 int resp_packet_size)
1487 {
1488 struct hv_pci_compl *comp_pkt = context;
1489
1490 comp_pkt->completion_status = resp->status;
1491 complete(&comp_pkt->host_event);
1492 }
1493
1494 /**
1495 * hv_write_config_block() - Sends a write config block request to the
1496 * back-end driver running in the Hyper-V parent partition.
1497 * @pdev: The PCI driver's representation for this device.
1498 * @buf: Buffer from which the config block will be copied.
1499 * @len: Size in bytes of buf.
1500 * @block_id: Identifies the config block which is being written.
1501 *
1502 * Return: 0 on success, -errno on failure
1503 */
hv_write_config_block(struct pci_dev * pdev,void * buf,unsigned int len,unsigned int block_id)1504 static int hv_write_config_block(struct pci_dev *pdev, void *buf,
1505 unsigned int len, unsigned int block_id)
1506 {
1507 struct hv_pcibus_device *hbus =
1508 container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1509 sysdata);
1510 struct {
1511 struct pci_packet pkt;
1512 char buf[sizeof(struct pci_write_block)];
1513 u32 reserved;
1514 } pkt;
1515 struct hv_pci_compl comp_pkt;
1516 struct pci_write_block *write_blk;
1517 u32 pkt_size;
1518 int ret;
1519
1520 if (len == 0 || len > HV_CONFIG_BLOCK_SIZE_MAX)
1521 return -EINVAL;
1522
1523 init_completion(&comp_pkt.host_event);
1524
1525 memset(&pkt, 0, sizeof(pkt));
1526 pkt.pkt.completion_func = hv_pci_write_config_compl;
1527 pkt.pkt.compl_ctxt = &comp_pkt;
1528 write_blk = (struct pci_write_block *)&pkt.pkt.message;
1529 write_blk->message_type.type = PCI_WRITE_BLOCK;
1530 write_blk->wslot.slot = devfn_to_wslot(pdev->devfn);
1531 write_blk->block_id = block_id;
1532 write_blk->byte_count = len;
1533 memcpy(write_blk->bytes, buf, len);
1534 pkt_size = offsetof(struct pci_write_block, bytes) + len;
1535 /*
1536 * This quirk is required on some hosts shipped around 2018, because
1537 * these hosts don't check the pkt_size correctly (new hosts have been
1538 * fixed since early 2019). The quirk is also safe on very old hosts
1539 * and new hosts, because, on them, what really matters is the length
1540 * specified in write_blk->byte_count.
1541 */
1542 pkt_size += sizeof(pkt.reserved);
1543
1544 ret = vmbus_sendpacket(hbus->hdev->channel, write_blk, pkt_size,
1545 (unsigned long)&pkt.pkt, VM_PKT_DATA_INBAND,
1546 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1547 if (ret)
1548 return ret;
1549
1550 ret = wait_for_response(hbus->hdev, &comp_pkt.host_event);
1551 if (ret)
1552 return ret;
1553
1554 if (comp_pkt.completion_status != 0) {
1555 dev_err(&hbus->hdev->device,
1556 "Write Config Block failed: 0x%x\n",
1557 comp_pkt.completion_status);
1558 return -EIO;
1559 }
1560
1561 return 0;
1562 }
1563
1564 /**
1565 * hv_register_block_invalidate() - Invoked when a config block invalidation
1566 * arrives from the back-end driver.
1567 * @pdev: The PCI driver's representation for this device.
1568 * @context: Identifies the device.
1569 * @block_invalidate: Identifies all of the blocks being invalidated.
1570 *
1571 * Return: 0 on success, -errno on failure
1572 */
hv_register_block_invalidate(struct pci_dev * pdev,void * context,void (* block_invalidate)(void * context,u64 block_mask))1573 static int hv_register_block_invalidate(struct pci_dev *pdev, void *context,
1574 void (*block_invalidate)(void *context,
1575 u64 block_mask))
1576 {
1577 struct hv_pcibus_device *hbus =
1578 container_of(pdev->bus->sysdata, struct hv_pcibus_device,
1579 sysdata);
1580 struct hv_pci_dev *hpdev;
1581
1582 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1583 if (!hpdev)
1584 return -ENODEV;
1585
1586 hpdev->block_invalidate = block_invalidate;
1587 hpdev->invalidate_context = context;
1588
1589 put_pcichild(hpdev);
1590 return 0;
1591
1592 }
1593
1594 /* Interrupt management hooks */
hv_int_desc_free(struct hv_pci_dev * hpdev,struct tran_int_desc * int_desc)1595 static void hv_int_desc_free(struct hv_pci_dev *hpdev,
1596 struct tran_int_desc *int_desc)
1597 {
1598 struct pci_delete_interrupt *int_pkt;
1599 struct {
1600 struct pci_packet pkt;
1601 u8 buffer[sizeof(struct pci_delete_interrupt)];
1602 } ctxt;
1603
1604 if (!int_desc->vector_count) {
1605 kfree(int_desc);
1606 return;
1607 }
1608 memset(&ctxt, 0, sizeof(ctxt));
1609 int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message;
1610 int_pkt->message_type.type =
1611 PCI_DELETE_INTERRUPT_MESSAGE;
1612 int_pkt->wslot.slot = hpdev->desc.win_slot.slot;
1613 int_pkt->int_desc = *int_desc;
1614 vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, sizeof(*int_pkt),
1615 0, VM_PKT_DATA_INBAND, 0);
1616 kfree(int_desc);
1617 }
1618
1619 /**
1620 * hv_msi_free() - Free the MSI.
1621 * @domain: The interrupt domain pointer
1622 * @info: Extra MSI-related context
1623 * @irq: Identifies the IRQ.
1624 *
1625 * The Hyper-V parent partition and hypervisor are tracking the
1626 * messages that are in use, keeping the interrupt redirection
1627 * table up to date. This callback sends a message that frees
1628 * the IRT entry and related tracking nonsense.
1629 */
hv_msi_free(struct irq_domain * domain,struct msi_domain_info * info,unsigned int irq)1630 static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
1631 unsigned int irq)
1632 {
1633 struct hv_pcibus_device *hbus;
1634 struct hv_pci_dev *hpdev;
1635 struct pci_dev *pdev;
1636 struct tran_int_desc *int_desc;
1637 struct irq_data *irq_data = irq_domain_get_irq_data(domain, irq);
1638 struct msi_desc *msi = irq_data_get_msi_desc(irq_data);
1639
1640 pdev = msi_desc_to_pci_dev(msi);
1641 hbus = info->data;
1642 int_desc = irq_data_get_irq_chip_data(irq_data);
1643 if (!int_desc)
1644 return;
1645
1646 irq_data->chip_data = NULL;
1647 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1648 if (!hpdev) {
1649 kfree(int_desc);
1650 return;
1651 }
1652
1653 hv_int_desc_free(hpdev, int_desc);
1654 put_pcichild(hpdev);
1655 }
1656
hv_irq_mask(struct irq_data * data)1657 static void hv_irq_mask(struct irq_data *data)
1658 {
1659 pci_msi_mask_irq(data);
1660 if (data->parent_data->chip->irq_mask)
1661 irq_chip_mask_parent(data);
1662 }
1663
hv_irq_unmask(struct irq_data * data)1664 static void hv_irq_unmask(struct irq_data *data)
1665 {
1666 hv_arch_irq_unmask(data);
1667
1668 if (data->parent_data->chip->irq_unmask)
1669 irq_chip_unmask_parent(data);
1670 pci_msi_unmask_irq(data);
1671 }
1672
1673 struct compose_comp_ctxt {
1674 struct hv_pci_compl comp_pkt;
1675 struct tran_int_desc int_desc;
1676 };
1677
hv_pci_compose_compl(void * context,struct pci_response * resp,int resp_packet_size)1678 static void hv_pci_compose_compl(void *context, struct pci_response *resp,
1679 int resp_packet_size)
1680 {
1681 struct compose_comp_ctxt *comp_pkt = context;
1682 struct pci_create_int_response *int_resp =
1683 (struct pci_create_int_response *)resp;
1684
1685 if (resp_packet_size < sizeof(*int_resp)) {
1686 comp_pkt->comp_pkt.completion_status = -1;
1687 goto out;
1688 }
1689 comp_pkt->comp_pkt.completion_status = resp->status;
1690 comp_pkt->int_desc = int_resp->int_desc;
1691 out:
1692 complete(&comp_pkt->comp_pkt.host_event);
1693 }
1694
hv_compose_msi_req_v1(struct pci_create_interrupt * int_pkt,u32 slot,u8 vector,u16 vector_count)1695 static u32 hv_compose_msi_req_v1(
1696 struct pci_create_interrupt *int_pkt,
1697 u32 slot, u8 vector, u16 vector_count)
1698 {
1699 int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE;
1700 int_pkt->wslot.slot = slot;
1701 int_pkt->int_desc.vector = vector;
1702 int_pkt->int_desc.vector_count = vector_count;
1703 int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
1704
1705 /*
1706 * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
1707 * hv_irq_unmask().
1708 */
1709 int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL;
1710
1711 return sizeof(*int_pkt);
1712 }
1713
1714 /*
1715 * The vCPU selected by hv_compose_multi_msi_req_get_cpu() and
1716 * hv_compose_msi_req_get_cpu() is a "dummy" vCPU because the final vCPU to be
1717 * interrupted is specified later in hv_irq_unmask() and communicated to Hyper-V
1718 * via the HVCALL_RETARGET_INTERRUPT hypercall. But the choice of dummy vCPU is
1719 * not irrelevant because Hyper-V chooses the physical CPU to handle the
1720 * interrupts based on the vCPU specified in message sent to the vPCI VSP in
1721 * hv_compose_msi_msg(). Hyper-V's choice of pCPU is not visible to the guest,
1722 * but assigning too many vPCI device interrupts to the same pCPU can cause a
1723 * performance bottleneck. So we spread out the dummy vCPUs to influence Hyper-V
1724 * to spread out the pCPUs that it selects.
1725 *
1726 * For the single-MSI and MSI-X cases, it's OK for hv_compose_msi_req_get_cpu()
1727 * to always return the same dummy vCPU, because a second call to
1728 * hv_compose_msi_msg() contains the "real" vCPU, causing Hyper-V to choose a
1729 * new pCPU for the interrupt. But for the multi-MSI case, the second call to
1730 * hv_compose_msi_msg() exits without sending a message to the vPCI VSP, so the
1731 * original dummy vCPU is used. This dummy vCPU must be round-robin'ed so that
1732 * the pCPUs are spread out. All interrupts for a multi-MSI device end up using
1733 * the same pCPU, even though the vCPUs will be spread out by later calls
1734 * to hv_irq_unmask(), but that is the best we can do now.
1735 *
1736 * With Hyper-V in Nov 2022, the HVCALL_RETARGET_INTERRUPT hypercall does *not*
1737 * cause Hyper-V to reselect the pCPU based on the specified vCPU. Such an
1738 * enhancement is planned for a future version. With that enhancement, the
1739 * dummy vCPU selection won't matter, and interrupts for the same multi-MSI
1740 * device will be spread across multiple pCPUs.
1741 */
1742
1743 /*
1744 * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten
1745 * by subsequent retarget in hv_irq_unmask().
1746 */
hv_compose_msi_req_get_cpu(const struct cpumask * affinity)1747 static int hv_compose_msi_req_get_cpu(const struct cpumask *affinity)
1748 {
1749 return cpumask_first_and(affinity, cpu_online_mask);
1750 }
1751
1752 /*
1753 * Make sure the dummy vCPU values for multi-MSI don't all point to vCPU0.
1754 */
hv_compose_multi_msi_req_get_cpu(void)1755 static int hv_compose_multi_msi_req_get_cpu(void)
1756 {
1757 static DEFINE_SPINLOCK(multi_msi_cpu_lock);
1758
1759 /* -1 means starting with CPU 0 */
1760 static int cpu_next = -1;
1761
1762 unsigned long flags;
1763 int cpu;
1764
1765 spin_lock_irqsave(&multi_msi_cpu_lock, flags);
1766
1767 cpu_next = cpumask_next_wrap(cpu_next, cpu_online_mask, nr_cpu_ids,
1768 false);
1769 cpu = cpu_next;
1770
1771 spin_unlock_irqrestore(&multi_msi_cpu_lock, flags);
1772
1773 return cpu;
1774 }
1775
hv_compose_msi_req_v2(struct pci_create_interrupt2 * int_pkt,int cpu,u32 slot,u8 vector,u16 vector_count)1776 static u32 hv_compose_msi_req_v2(
1777 struct pci_create_interrupt2 *int_pkt, int cpu,
1778 u32 slot, u8 vector, u16 vector_count)
1779 {
1780 int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2;
1781 int_pkt->wslot.slot = slot;
1782 int_pkt->int_desc.vector = vector;
1783 int_pkt->int_desc.vector_count = vector_count;
1784 int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
1785 int_pkt->int_desc.processor_array[0] =
1786 hv_cpu_number_to_vp_number(cpu);
1787 int_pkt->int_desc.processor_count = 1;
1788
1789 return sizeof(*int_pkt);
1790 }
1791
hv_compose_msi_req_v3(struct pci_create_interrupt3 * int_pkt,int cpu,u32 slot,u32 vector,u16 vector_count)1792 static u32 hv_compose_msi_req_v3(
1793 struct pci_create_interrupt3 *int_pkt, int cpu,
1794 u32 slot, u32 vector, u16 vector_count)
1795 {
1796 int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE3;
1797 int_pkt->wslot.slot = slot;
1798 int_pkt->int_desc.vector = vector;
1799 int_pkt->int_desc.reserved = 0;
1800 int_pkt->int_desc.vector_count = vector_count;
1801 int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
1802 int_pkt->int_desc.processor_array[0] =
1803 hv_cpu_number_to_vp_number(cpu);
1804 int_pkt->int_desc.processor_count = 1;
1805
1806 return sizeof(*int_pkt);
1807 }
1808
1809 /**
1810 * hv_compose_msi_msg() - Supplies a valid MSI address/data
1811 * @data: Everything about this MSI
1812 * @msg: Buffer that is filled in by this function
1813 *
1814 * This function unpacks the IRQ looking for target CPU set, IDT
1815 * vector and mode and sends a message to the parent partition
1816 * asking for a mapping for that tuple in this partition. The
1817 * response supplies a data value and address to which that data
1818 * should be written to trigger that interrupt.
1819 */
hv_compose_msi_msg(struct irq_data * data,struct msi_msg * msg)1820 static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1821 {
1822 struct hv_pcibus_device *hbus;
1823 struct vmbus_channel *channel;
1824 struct hv_pci_dev *hpdev;
1825 struct pci_bus *pbus;
1826 struct pci_dev *pdev;
1827 const struct cpumask *dest;
1828 struct compose_comp_ctxt comp;
1829 struct tran_int_desc *int_desc;
1830 struct msi_desc *msi_desc;
1831 /*
1832 * vector_count should be u16: see hv_msi_desc, hv_msi_desc2
1833 * and hv_msi_desc3. vector must be u32: see hv_msi_desc3.
1834 */
1835 u16 vector_count;
1836 u32 vector;
1837 struct {
1838 struct pci_packet pci_pkt;
1839 union {
1840 struct pci_create_interrupt v1;
1841 struct pci_create_interrupt2 v2;
1842 struct pci_create_interrupt3 v3;
1843 } int_pkts;
1844 } __packed ctxt;
1845 bool multi_msi;
1846 u64 trans_id;
1847 u32 size;
1848 int ret;
1849 int cpu;
1850
1851 msi_desc = irq_data_get_msi_desc(data);
1852 multi_msi = !msi_desc->pci.msi_attrib.is_msix &&
1853 msi_desc->nvec_used > 1;
1854
1855 /* Reuse the previous allocation */
1856 if (data->chip_data && multi_msi) {
1857 int_desc = data->chip_data;
1858 msg->address_hi = int_desc->address >> 32;
1859 msg->address_lo = int_desc->address & 0xffffffff;
1860 msg->data = int_desc->data;
1861 return;
1862 }
1863
1864 pdev = msi_desc_to_pci_dev(msi_desc);
1865 dest = irq_data_get_effective_affinity_mask(data);
1866 pbus = pdev->bus;
1867 hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata);
1868 channel = hbus->hdev->channel;
1869 hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn));
1870 if (!hpdev)
1871 goto return_null_message;
1872
1873 /* Free any previous message that might have already been composed. */
1874 if (data->chip_data && !multi_msi) {
1875 int_desc = data->chip_data;
1876 data->chip_data = NULL;
1877 hv_int_desc_free(hpdev, int_desc);
1878 }
1879
1880 int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC);
1881 if (!int_desc)
1882 goto drop_reference;
1883
1884 if (multi_msi) {
1885 /*
1886 * If this is not the first MSI of Multi MSI, we already have
1887 * a mapping. Can exit early.
1888 */
1889 if (msi_desc->irq != data->irq) {
1890 data->chip_data = int_desc;
1891 int_desc->address = msi_desc->msg.address_lo |
1892 (u64)msi_desc->msg.address_hi << 32;
1893 int_desc->data = msi_desc->msg.data +
1894 (data->irq - msi_desc->irq);
1895 msg->address_hi = msi_desc->msg.address_hi;
1896 msg->address_lo = msi_desc->msg.address_lo;
1897 msg->data = int_desc->data;
1898 put_pcichild(hpdev);
1899 return;
1900 }
1901 /*
1902 * The vector we select here is a dummy value. The correct
1903 * value gets sent to the hypervisor in unmask(). This needs
1904 * to be aligned with the count, and also not zero. Multi-msi
1905 * is powers of 2 up to 32, so 32 will always work here.
1906 */
1907 vector = 32;
1908 vector_count = msi_desc->nvec_used;
1909 cpu = hv_compose_multi_msi_req_get_cpu();
1910 } else {
1911 vector = hv_msi_get_int_vector(data);
1912 vector_count = 1;
1913 cpu = hv_compose_msi_req_get_cpu(dest);
1914 }
1915
1916 /*
1917 * hv_compose_msi_req_v1 and v2 are for x86 only, meaning 'vector'
1918 * can't exceed u8. Cast 'vector' down to u8 for v1/v2 explicitly
1919 * for better readability.
1920 */
1921 memset(&ctxt, 0, sizeof(ctxt));
1922 init_completion(&comp.comp_pkt.host_event);
1923 ctxt.pci_pkt.completion_func = hv_pci_compose_compl;
1924 ctxt.pci_pkt.compl_ctxt = ∁
1925
1926 switch (hbus->protocol_version) {
1927 case PCI_PROTOCOL_VERSION_1_1:
1928 size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
1929 hpdev->desc.win_slot.slot,
1930 (u8)vector,
1931 vector_count);
1932 break;
1933
1934 case PCI_PROTOCOL_VERSION_1_2:
1935 case PCI_PROTOCOL_VERSION_1_3:
1936 size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
1937 cpu,
1938 hpdev->desc.win_slot.slot,
1939 (u8)vector,
1940 vector_count);
1941 break;
1942
1943 case PCI_PROTOCOL_VERSION_1_4:
1944 size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3,
1945 cpu,
1946 hpdev->desc.win_slot.slot,
1947 vector,
1948 vector_count);
1949 break;
1950
1951 default:
1952 /* As we only negotiate protocol versions known to this driver,
1953 * this path should never hit. However, this is it not a hot
1954 * path so we print a message to aid future updates.
1955 */
1956 dev_err(&hbus->hdev->device,
1957 "Unexpected vPCI protocol, update driver.");
1958 goto free_int_desc;
1959 }
1960
1961 ret = vmbus_sendpacket_getid(hpdev->hbus->hdev->channel, &ctxt.int_pkts,
1962 size, (unsigned long)&ctxt.pci_pkt,
1963 &trans_id, VM_PKT_DATA_INBAND,
1964 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1965 if (ret) {
1966 dev_err(&hbus->hdev->device,
1967 "Sending request for interrupt failed: 0x%x",
1968 comp.comp_pkt.completion_status);
1969 goto free_int_desc;
1970 }
1971
1972 /*
1973 * Prevents hv_pci_onchannelcallback() from running concurrently
1974 * in the tasklet.
1975 */
1976 tasklet_disable_in_atomic(&channel->callback_event);
1977
1978 /*
1979 * Since this function is called with IRQ locks held, can't
1980 * do normal wait for completion; instead poll.
1981 */
1982 while (!try_wait_for_completion(&comp.comp_pkt.host_event)) {
1983 unsigned long flags;
1984
1985 /* 0xFFFF means an invalid PCI VENDOR ID. */
1986 if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) {
1987 dev_err_once(&hbus->hdev->device,
1988 "the device has gone\n");
1989 goto enable_tasklet;
1990 }
1991
1992 /*
1993 * Make sure that the ring buffer data structure doesn't get
1994 * freed while we dereference the ring buffer pointer. Test
1995 * for the channel's onchannel_callback being NULL within a
1996 * sched_lock critical section. See also the inline comments
1997 * in vmbus_reset_channel_cb().
1998 */
1999 spin_lock_irqsave(&channel->sched_lock, flags);
2000 if (unlikely(channel->onchannel_callback == NULL)) {
2001 spin_unlock_irqrestore(&channel->sched_lock, flags);
2002 goto enable_tasklet;
2003 }
2004 hv_pci_onchannelcallback(hbus);
2005 spin_unlock_irqrestore(&channel->sched_lock, flags);
2006
2007 udelay(100);
2008 }
2009
2010 tasklet_enable(&channel->callback_event);
2011
2012 if (comp.comp_pkt.completion_status < 0) {
2013 dev_err(&hbus->hdev->device,
2014 "Request for interrupt failed: 0x%x",
2015 comp.comp_pkt.completion_status);
2016 goto free_int_desc;
2017 }
2018
2019 /*
2020 * Record the assignment so that this can be unwound later. Using
2021 * irq_set_chip_data() here would be appropriate, but the lock it takes
2022 * is already held.
2023 */
2024 *int_desc = comp.int_desc;
2025 data->chip_data = int_desc;
2026
2027 /* Pass up the result. */
2028 msg->address_hi = comp.int_desc.address >> 32;
2029 msg->address_lo = comp.int_desc.address & 0xffffffff;
2030 msg->data = comp.int_desc.data;
2031
2032 put_pcichild(hpdev);
2033 return;
2034
2035 enable_tasklet:
2036 tasklet_enable(&channel->callback_event);
2037 /*
2038 * The completion packet on the stack becomes invalid after 'return';
2039 * remove the ID from the VMbus requestor if the identifier is still
2040 * mapped to/associated with the packet. (The identifier could have
2041 * been 're-used', i.e., already removed and (re-)mapped.)
2042 *
2043 * Cf. hv_pci_onchannelcallback().
2044 */
2045 vmbus_request_addr_match(channel, trans_id, (unsigned long)&ctxt.pci_pkt);
2046 free_int_desc:
2047 kfree(int_desc);
2048 drop_reference:
2049 put_pcichild(hpdev);
2050 return_null_message:
2051 msg->address_hi = 0;
2052 msg->address_lo = 0;
2053 msg->data = 0;
2054 }
2055
2056 /* HW Interrupt Chip Descriptor */
2057 static struct irq_chip hv_msi_irq_chip = {
2058 .name = "Hyper-V PCIe MSI",
2059 .irq_compose_msi_msg = hv_compose_msi_msg,
2060 .irq_set_affinity = irq_chip_set_affinity_parent,
2061 #ifdef CONFIG_X86
2062 .irq_ack = irq_chip_ack_parent,
2063 #elif defined(CONFIG_ARM64)
2064 .irq_eoi = irq_chip_eoi_parent,
2065 #endif
2066 .irq_mask = hv_irq_mask,
2067 .irq_unmask = hv_irq_unmask,
2068 };
2069
2070 static struct msi_domain_ops hv_msi_ops = {
2071 .msi_prepare = hv_msi_prepare,
2072 .msi_free = hv_msi_free,
2073 };
2074
2075 /**
2076 * hv_pcie_init_irq_domain() - Initialize IRQ domain
2077 * @hbus: The root PCI bus
2078 *
2079 * This function creates an IRQ domain which will be used for
2080 * interrupts from devices that have been passed through. These
2081 * devices only support MSI and MSI-X, not line-based interrupts
2082 * or simulations of line-based interrupts through PCIe's
2083 * fabric-layer messages. Because interrupts are remapped, we
2084 * can support multi-message MSI here.
2085 *
2086 * Return: '0' on success and error value on failure
2087 */
hv_pcie_init_irq_domain(struct hv_pcibus_device * hbus)2088 static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
2089 {
2090 hbus->msi_info.chip = &hv_msi_irq_chip;
2091 hbus->msi_info.ops = &hv_msi_ops;
2092 hbus->msi_info.flags = (MSI_FLAG_USE_DEF_DOM_OPS |
2093 MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI |
2094 MSI_FLAG_PCI_MSIX);
2095 hbus->msi_info.handler = FLOW_HANDLER;
2096 hbus->msi_info.handler_name = FLOW_NAME;
2097 hbus->msi_info.data = hbus;
2098 hbus->irq_domain = pci_msi_create_irq_domain(hbus->fwnode,
2099 &hbus->msi_info,
2100 hv_pci_get_root_domain());
2101 if (!hbus->irq_domain) {
2102 dev_err(&hbus->hdev->device,
2103 "Failed to build an MSI IRQ domain\n");
2104 return -ENODEV;
2105 }
2106
2107 dev_set_msi_domain(&hbus->bridge->dev, hbus->irq_domain);
2108
2109 return 0;
2110 }
2111
2112 /**
2113 * get_bar_size() - Get the address space consumed by a BAR
2114 * @bar_val: Value that a BAR returned after -1 was written
2115 * to it.
2116 *
2117 * This function returns the size of the BAR, rounded up to 1
2118 * page. It has to be rounded up because the hypervisor's page
2119 * table entry that maps the BAR into the VM can't specify an
2120 * offset within a page. The invariant is that the hypervisor
2121 * must place any BARs of smaller than page length at the
2122 * beginning of a page.
2123 *
2124 * Return: Size in bytes of the consumed MMIO space.
2125 */
get_bar_size(u64 bar_val)2126 static u64 get_bar_size(u64 bar_val)
2127 {
2128 return round_up((1 + ~(bar_val & PCI_BASE_ADDRESS_MEM_MASK)),
2129 PAGE_SIZE);
2130 }
2131
2132 /**
2133 * survey_child_resources() - Total all MMIO requirements
2134 * @hbus: Root PCI bus, as understood by this driver
2135 */
survey_child_resources(struct hv_pcibus_device * hbus)2136 static void survey_child_resources(struct hv_pcibus_device *hbus)
2137 {
2138 struct hv_pci_dev *hpdev;
2139 resource_size_t bar_size = 0;
2140 unsigned long flags;
2141 struct completion *event;
2142 u64 bar_val;
2143 int i;
2144
2145 /* If nobody is waiting on the answer, don't compute it. */
2146 event = xchg(&hbus->survey_event, NULL);
2147 if (!event)
2148 return;
2149
2150 /* If the answer has already been computed, go with it. */
2151 if (hbus->low_mmio_space || hbus->high_mmio_space) {
2152 complete(event);
2153 return;
2154 }
2155
2156 spin_lock_irqsave(&hbus->device_list_lock, flags);
2157
2158 /*
2159 * Due to an interesting quirk of the PCI spec, all memory regions
2160 * for a child device are a power of 2 in size and aligned in memory,
2161 * so it's sufficient to just add them up without tracking alignment.
2162 */
2163 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2164 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
2165 if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO)
2166 dev_err(&hbus->hdev->device,
2167 "There's an I/O BAR in this list!\n");
2168
2169 if (hpdev->probed_bar[i] != 0) {
2170 /*
2171 * A probed BAR has all the upper bits set that
2172 * can be changed.
2173 */
2174
2175 bar_val = hpdev->probed_bar[i];
2176 if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
2177 bar_val |=
2178 ((u64)hpdev->probed_bar[++i] << 32);
2179 else
2180 bar_val |= 0xffffffff00000000ULL;
2181
2182 bar_size = get_bar_size(bar_val);
2183
2184 if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64)
2185 hbus->high_mmio_space += bar_size;
2186 else
2187 hbus->low_mmio_space += bar_size;
2188 }
2189 }
2190 }
2191
2192 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2193 complete(event);
2194 }
2195
2196 /**
2197 * prepopulate_bars() - Fill in BARs with defaults
2198 * @hbus: Root PCI bus, as understood by this driver
2199 *
2200 * The core PCI driver code seems much, much happier if the BARs
2201 * for a device have values upon first scan. So fill them in.
2202 * The algorithm below works down from large sizes to small,
2203 * attempting to pack the assignments optimally. The assumption,
2204 * enforced in other parts of the code, is that the beginning of
2205 * the memory-mapped I/O space will be aligned on the largest
2206 * BAR size.
2207 */
prepopulate_bars(struct hv_pcibus_device * hbus)2208 static void prepopulate_bars(struct hv_pcibus_device *hbus)
2209 {
2210 resource_size_t high_size = 0;
2211 resource_size_t low_size = 0;
2212 resource_size_t high_base = 0;
2213 resource_size_t low_base = 0;
2214 resource_size_t bar_size;
2215 struct hv_pci_dev *hpdev;
2216 unsigned long flags;
2217 u64 bar_val;
2218 u32 command;
2219 bool high;
2220 int i;
2221
2222 if (hbus->low_mmio_space) {
2223 low_size = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
2224 low_base = hbus->low_mmio_res->start;
2225 }
2226
2227 if (hbus->high_mmio_space) {
2228 high_size = 1ULL <<
2229 (63 - __builtin_clzll(hbus->high_mmio_space));
2230 high_base = hbus->high_mmio_res->start;
2231 }
2232
2233 spin_lock_irqsave(&hbus->device_list_lock, flags);
2234
2235 /*
2236 * Clear the memory enable bit, in case it's already set. This occurs
2237 * in the suspend path of hibernation, where the device is suspended,
2238 * resumed and suspended again: see hibernation_snapshot() and
2239 * hibernation_platform_enter().
2240 *
2241 * If the memory enable bit is already set, Hyper-V silently ignores
2242 * the below BAR updates, and the related PCI device driver can not
2243 * work, because reading from the device register(s) always returns
2244 * 0xFFFFFFFF (PCI_ERROR_RESPONSE).
2245 */
2246 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2247 _hv_pcifront_read_config(hpdev, PCI_COMMAND, 2, &command);
2248 command &= ~PCI_COMMAND_MEMORY;
2249 _hv_pcifront_write_config(hpdev, PCI_COMMAND, 2, command);
2250 }
2251
2252 /* Pick addresses for the BARs. */
2253 do {
2254 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2255 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
2256 bar_val = hpdev->probed_bar[i];
2257 if (bar_val == 0)
2258 continue;
2259 high = bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64;
2260 if (high) {
2261 bar_val |=
2262 ((u64)hpdev->probed_bar[i + 1]
2263 << 32);
2264 } else {
2265 bar_val |= 0xffffffffULL << 32;
2266 }
2267 bar_size = get_bar_size(bar_val);
2268 if (high) {
2269 if (high_size != bar_size) {
2270 i++;
2271 continue;
2272 }
2273 _hv_pcifront_write_config(hpdev,
2274 PCI_BASE_ADDRESS_0 + (4 * i),
2275 4,
2276 (u32)(high_base & 0xffffff00));
2277 i++;
2278 _hv_pcifront_write_config(hpdev,
2279 PCI_BASE_ADDRESS_0 + (4 * i),
2280 4, (u32)(high_base >> 32));
2281 high_base += bar_size;
2282 } else {
2283 if (low_size != bar_size)
2284 continue;
2285 _hv_pcifront_write_config(hpdev,
2286 PCI_BASE_ADDRESS_0 + (4 * i),
2287 4,
2288 (u32)(low_base & 0xffffff00));
2289 low_base += bar_size;
2290 }
2291 }
2292 if (high_size <= 1 && low_size <= 1) {
2293 /*
2294 * No need to set the PCI_COMMAND_MEMORY bit as
2295 * the core PCI driver doesn't require the bit
2296 * to be pre-set. Actually here we intentionally
2297 * keep the bit off so that the PCI BAR probing
2298 * in the core PCI driver doesn't cause Hyper-V
2299 * to unnecessarily unmap/map the virtual BARs
2300 * from/to the physical BARs multiple times.
2301 * This reduces the VM boot time significantly
2302 * if the BAR sizes are huge.
2303 */
2304 break;
2305 }
2306 }
2307
2308 high_size >>= 1;
2309 low_size >>= 1;
2310 } while (high_size || low_size);
2311
2312 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2313 }
2314
2315 /*
2316 * Assign entries in sysfs pci slot directory.
2317 *
2318 * Note that this function does not need to lock the children list
2319 * because it is called from pci_devices_present_work which
2320 * is serialized with hv_eject_device_work because they are on the
2321 * same ordered workqueue. Therefore hbus->children list will not change
2322 * even when pci_create_slot sleeps.
2323 */
hv_pci_assign_slots(struct hv_pcibus_device * hbus)2324 static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
2325 {
2326 struct hv_pci_dev *hpdev;
2327 char name[SLOT_NAME_SIZE];
2328 int slot_nr;
2329
2330 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2331 if (hpdev->pci_slot)
2332 continue;
2333
2334 slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot));
2335 snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
2336 hpdev->pci_slot = pci_create_slot(hbus->bridge->bus, slot_nr,
2337 name, NULL);
2338 if (IS_ERR(hpdev->pci_slot)) {
2339 pr_warn("pci_create slot %s failed\n", name);
2340 hpdev->pci_slot = NULL;
2341 }
2342 }
2343 }
2344
2345 /*
2346 * Remove entries in sysfs pci slot directory.
2347 */
hv_pci_remove_slots(struct hv_pcibus_device * hbus)2348 static void hv_pci_remove_slots(struct hv_pcibus_device *hbus)
2349 {
2350 struct hv_pci_dev *hpdev;
2351
2352 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2353 if (!hpdev->pci_slot)
2354 continue;
2355 pci_destroy_slot(hpdev->pci_slot);
2356 hpdev->pci_slot = NULL;
2357 }
2358 }
2359
2360 /*
2361 * Set NUMA node for the devices on the bus
2362 */
hv_pci_assign_numa_node(struct hv_pcibus_device * hbus)2363 static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
2364 {
2365 struct pci_dev *dev;
2366 struct pci_bus *bus = hbus->bridge->bus;
2367 struct hv_pci_dev *hv_dev;
2368
2369 list_for_each_entry(dev, &bus->devices, bus_list) {
2370 hv_dev = get_pcichild_wslot(hbus, devfn_to_wslot(dev->devfn));
2371 if (!hv_dev)
2372 continue;
2373
2374 if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY &&
2375 hv_dev->desc.virtual_numa_node < num_possible_nodes())
2376 /*
2377 * The kernel may boot with some NUMA nodes offline
2378 * (e.g. in a KDUMP kernel) or with NUMA disabled via
2379 * "numa=off". In those cases, adjust the host provided
2380 * NUMA node to a valid NUMA node used by the kernel.
2381 */
2382 set_dev_node(&dev->dev,
2383 numa_map_to_online_node(
2384 hv_dev->desc.virtual_numa_node));
2385
2386 put_pcichild(hv_dev);
2387 }
2388 }
2389
2390 /**
2391 * create_root_hv_pci_bus() - Expose a new root PCI bus
2392 * @hbus: Root PCI bus, as understood by this driver
2393 *
2394 * Return: 0 on success, -errno on failure
2395 */
create_root_hv_pci_bus(struct hv_pcibus_device * hbus)2396 static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
2397 {
2398 int error;
2399 struct pci_host_bridge *bridge = hbus->bridge;
2400
2401 bridge->dev.parent = &hbus->hdev->device;
2402 bridge->sysdata = &hbus->sysdata;
2403 bridge->ops = &hv_pcifront_ops;
2404
2405 error = pci_scan_root_bus_bridge(bridge);
2406 if (error)
2407 return error;
2408
2409 pci_lock_rescan_remove();
2410 hv_pci_assign_numa_node(hbus);
2411 pci_bus_assign_resources(bridge->bus);
2412 hv_pci_assign_slots(hbus);
2413 pci_bus_add_devices(bridge->bus);
2414 pci_unlock_rescan_remove();
2415 hbus->state = hv_pcibus_installed;
2416 return 0;
2417 }
2418
2419 struct q_res_req_compl {
2420 struct completion host_event;
2421 struct hv_pci_dev *hpdev;
2422 };
2423
2424 /**
2425 * q_resource_requirements() - Query Resource Requirements
2426 * @context: The completion context.
2427 * @resp: The response that came from the host.
2428 * @resp_packet_size: The size in bytes of resp.
2429 *
2430 * This function is invoked on completion of a Query Resource
2431 * Requirements packet.
2432 */
q_resource_requirements(void * context,struct pci_response * resp,int resp_packet_size)2433 static void q_resource_requirements(void *context, struct pci_response *resp,
2434 int resp_packet_size)
2435 {
2436 struct q_res_req_compl *completion = context;
2437 struct pci_q_res_req_response *q_res_req =
2438 (struct pci_q_res_req_response *)resp;
2439 s32 status;
2440 int i;
2441
2442 status = (resp_packet_size < sizeof(*q_res_req)) ? -1 : resp->status;
2443 if (status < 0) {
2444 dev_err(&completion->hpdev->hbus->hdev->device,
2445 "query resource requirements failed: %x\n",
2446 status);
2447 } else {
2448 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
2449 completion->hpdev->probed_bar[i] =
2450 q_res_req->probed_bar[i];
2451 }
2452 }
2453
2454 complete(&completion->host_event);
2455 }
2456
2457 /**
2458 * new_pcichild_device() - Create a new child device
2459 * @hbus: The internal struct tracking this root PCI bus.
2460 * @desc: The information supplied so far from the host
2461 * about the device.
2462 *
2463 * This function creates the tracking structure for a new child
2464 * device and kicks off the process of figuring out what it is.
2465 *
2466 * Return: Pointer to the new tracking struct
2467 */
new_pcichild_device(struct hv_pcibus_device * hbus,struct hv_pcidev_description * desc)2468 static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
2469 struct hv_pcidev_description *desc)
2470 {
2471 struct hv_pci_dev *hpdev;
2472 struct pci_child_message *res_req;
2473 struct q_res_req_compl comp_pkt;
2474 struct {
2475 struct pci_packet init_packet;
2476 u8 buffer[sizeof(struct pci_child_message)];
2477 } pkt;
2478 unsigned long flags;
2479 int ret;
2480
2481 hpdev = kzalloc(sizeof(*hpdev), GFP_KERNEL);
2482 if (!hpdev)
2483 return NULL;
2484
2485 hpdev->hbus = hbus;
2486
2487 memset(&pkt, 0, sizeof(pkt));
2488 init_completion(&comp_pkt.host_event);
2489 comp_pkt.hpdev = hpdev;
2490 pkt.init_packet.compl_ctxt = &comp_pkt;
2491 pkt.init_packet.completion_func = q_resource_requirements;
2492 res_req = (struct pci_child_message *)&pkt.init_packet.message;
2493 res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS;
2494 res_req->wslot.slot = desc->win_slot.slot;
2495
2496 ret = vmbus_sendpacket(hbus->hdev->channel, res_req,
2497 sizeof(struct pci_child_message),
2498 (unsigned long)&pkt.init_packet,
2499 VM_PKT_DATA_INBAND,
2500 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
2501 if (ret)
2502 goto error;
2503
2504 if (wait_for_response(hbus->hdev, &comp_pkt.host_event))
2505 goto error;
2506
2507 hpdev->desc = *desc;
2508 refcount_set(&hpdev->refs, 1);
2509 get_pcichild(hpdev);
2510 spin_lock_irqsave(&hbus->device_list_lock, flags);
2511
2512 list_add_tail(&hpdev->list_entry, &hbus->children);
2513 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2514 return hpdev;
2515
2516 error:
2517 kfree(hpdev);
2518 return NULL;
2519 }
2520
2521 /**
2522 * get_pcichild_wslot() - Find device from slot
2523 * @hbus: Root PCI bus, as understood by this driver
2524 * @wslot: Location on the bus
2525 *
2526 * This function looks up a PCI device and returns the internal
2527 * representation of it. It acquires a reference on it, so that
2528 * the device won't be deleted while somebody is using it. The
2529 * caller is responsible for calling put_pcichild() to release
2530 * this reference.
2531 *
2532 * Return: Internal representation of a PCI device
2533 */
get_pcichild_wslot(struct hv_pcibus_device * hbus,u32 wslot)2534 static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
2535 u32 wslot)
2536 {
2537 unsigned long flags;
2538 struct hv_pci_dev *iter, *hpdev = NULL;
2539
2540 spin_lock_irqsave(&hbus->device_list_lock, flags);
2541 list_for_each_entry(iter, &hbus->children, list_entry) {
2542 if (iter->desc.win_slot.slot == wslot) {
2543 hpdev = iter;
2544 get_pcichild(hpdev);
2545 break;
2546 }
2547 }
2548 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2549
2550 return hpdev;
2551 }
2552
2553 /**
2554 * pci_devices_present_work() - Handle new list of child devices
2555 * @work: Work struct embedded in struct hv_dr_work
2556 *
2557 * "Bus Relations" is the Windows term for "children of this
2558 * bus." The terminology is preserved here for people trying to
2559 * debug the interaction between Hyper-V and Linux. This
2560 * function is called when the parent partition reports a list
2561 * of functions that should be observed under this PCI Express
2562 * port (bus).
2563 *
2564 * This function updates the list, and must tolerate being
2565 * called multiple times with the same information. The typical
2566 * number of child devices is one, with very atypical cases
2567 * involving three or four, so the algorithms used here can be
2568 * simple and inefficient.
2569 *
2570 * It must also treat the omission of a previously observed device as
2571 * notification that the device no longer exists.
2572 *
2573 * Note that this function is serialized with hv_eject_device_work(),
2574 * because both are pushed to the ordered workqueue hbus->wq.
2575 */
pci_devices_present_work(struct work_struct * work)2576 static void pci_devices_present_work(struct work_struct *work)
2577 {
2578 u32 child_no;
2579 bool found;
2580 struct hv_pcidev_description *new_desc;
2581 struct hv_pci_dev *hpdev;
2582 struct hv_pcibus_device *hbus;
2583 struct list_head removed;
2584 struct hv_dr_work *dr_wrk;
2585 struct hv_dr_state *dr = NULL;
2586 unsigned long flags;
2587
2588 dr_wrk = container_of(work, struct hv_dr_work, wrk);
2589 hbus = dr_wrk->bus;
2590 kfree(dr_wrk);
2591
2592 INIT_LIST_HEAD(&removed);
2593
2594 /* Pull this off the queue and process it if it was the last one. */
2595 spin_lock_irqsave(&hbus->device_list_lock, flags);
2596 while (!list_empty(&hbus->dr_list)) {
2597 dr = list_first_entry(&hbus->dr_list, struct hv_dr_state,
2598 list_entry);
2599 list_del(&dr->list_entry);
2600
2601 /* Throw this away if the list still has stuff in it. */
2602 if (!list_empty(&hbus->dr_list)) {
2603 kfree(dr);
2604 continue;
2605 }
2606 }
2607 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2608
2609 if (!dr)
2610 return;
2611
2612 mutex_lock(&hbus->state_lock);
2613
2614 /* First, mark all existing children as reported missing. */
2615 spin_lock_irqsave(&hbus->device_list_lock, flags);
2616 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2617 hpdev->reported_missing = true;
2618 }
2619 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2620
2621 /* Next, add back any reported devices. */
2622 for (child_no = 0; child_no < dr->device_count; child_no++) {
2623 found = false;
2624 new_desc = &dr->func[child_no];
2625
2626 spin_lock_irqsave(&hbus->device_list_lock, flags);
2627 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2628 if ((hpdev->desc.win_slot.slot == new_desc->win_slot.slot) &&
2629 (hpdev->desc.v_id == new_desc->v_id) &&
2630 (hpdev->desc.d_id == new_desc->d_id) &&
2631 (hpdev->desc.ser == new_desc->ser)) {
2632 hpdev->reported_missing = false;
2633 found = true;
2634 }
2635 }
2636 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2637
2638 if (!found) {
2639 hpdev = new_pcichild_device(hbus, new_desc);
2640 if (!hpdev)
2641 dev_err(&hbus->hdev->device,
2642 "couldn't record a child device.\n");
2643 }
2644 }
2645
2646 /* Move missing children to a list on the stack. */
2647 spin_lock_irqsave(&hbus->device_list_lock, flags);
2648 do {
2649 found = false;
2650 list_for_each_entry(hpdev, &hbus->children, list_entry) {
2651 if (hpdev->reported_missing) {
2652 found = true;
2653 put_pcichild(hpdev);
2654 list_move_tail(&hpdev->list_entry, &removed);
2655 break;
2656 }
2657 }
2658 } while (found);
2659 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2660
2661 /* Delete everything that should no longer exist. */
2662 while (!list_empty(&removed)) {
2663 hpdev = list_first_entry(&removed, struct hv_pci_dev,
2664 list_entry);
2665 list_del(&hpdev->list_entry);
2666
2667 if (hpdev->pci_slot)
2668 pci_destroy_slot(hpdev->pci_slot);
2669
2670 put_pcichild(hpdev);
2671 }
2672
2673 switch (hbus->state) {
2674 case hv_pcibus_installed:
2675 /*
2676 * Tell the core to rescan bus
2677 * because there may have been changes.
2678 */
2679 pci_lock_rescan_remove();
2680 pci_scan_child_bus(hbus->bridge->bus);
2681 hv_pci_assign_numa_node(hbus);
2682 hv_pci_assign_slots(hbus);
2683 pci_unlock_rescan_remove();
2684 break;
2685
2686 case hv_pcibus_init:
2687 case hv_pcibus_probed:
2688 survey_child_resources(hbus);
2689 break;
2690
2691 default:
2692 break;
2693 }
2694
2695 mutex_unlock(&hbus->state_lock);
2696
2697 kfree(dr);
2698 }
2699
2700 /**
2701 * hv_pci_start_relations_work() - Queue work to start device discovery
2702 * @hbus: Root PCI bus, as understood by this driver
2703 * @dr: The list of children returned from host
2704 *
2705 * Return: 0 on success, -errno on failure
2706 */
hv_pci_start_relations_work(struct hv_pcibus_device * hbus,struct hv_dr_state * dr)2707 static int hv_pci_start_relations_work(struct hv_pcibus_device *hbus,
2708 struct hv_dr_state *dr)
2709 {
2710 struct hv_dr_work *dr_wrk;
2711 unsigned long flags;
2712 bool pending_dr;
2713
2714 if (hbus->state == hv_pcibus_removing) {
2715 dev_info(&hbus->hdev->device,
2716 "PCI VMBus BUS_RELATIONS: ignored\n");
2717 return -ENOENT;
2718 }
2719
2720 dr_wrk = kzalloc(sizeof(*dr_wrk), GFP_NOWAIT);
2721 if (!dr_wrk)
2722 return -ENOMEM;
2723
2724 INIT_WORK(&dr_wrk->wrk, pci_devices_present_work);
2725 dr_wrk->bus = hbus;
2726
2727 spin_lock_irqsave(&hbus->device_list_lock, flags);
2728 /*
2729 * If pending_dr is true, we have already queued a work,
2730 * which will see the new dr. Otherwise, we need to
2731 * queue a new work.
2732 */
2733 pending_dr = !list_empty(&hbus->dr_list);
2734 list_add_tail(&dr->list_entry, &hbus->dr_list);
2735 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2736
2737 if (pending_dr)
2738 kfree(dr_wrk);
2739 else
2740 queue_work(hbus->wq, &dr_wrk->wrk);
2741
2742 return 0;
2743 }
2744
2745 /**
2746 * hv_pci_devices_present() - Handle list of new children
2747 * @hbus: Root PCI bus, as understood by this driver
2748 * @relations: Packet from host listing children
2749 *
2750 * Process a new list of devices on the bus. The list of devices is
2751 * discovered by VSP and sent to us via VSP message PCI_BUS_RELATIONS,
2752 * whenever a new list of devices for this bus appears.
2753 */
hv_pci_devices_present(struct hv_pcibus_device * hbus,struct pci_bus_relations * relations)2754 static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
2755 struct pci_bus_relations *relations)
2756 {
2757 struct hv_dr_state *dr;
2758 int i;
2759
2760 dr = kzalloc(struct_size(dr, func, relations->device_count),
2761 GFP_NOWAIT);
2762 if (!dr)
2763 return;
2764
2765 dr->device_count = relations->device_count;
2766 for (i = 0; i < dr->device_count; i++) {
2767 dr->func[i].v_id = relations->func[i].v_id;
2768 dr->func[i].d_id = relations->func[i].d_id;
2769 dr->func[i].rev = relations->func[i].rev;
2770 dr->func[i].prog_intf = relations->func[i].prog_intf;
2771 dr->func[i].subclass = relations->func[i].subclass;
2772 dr->func[i].base_class = relations->func[i].base_class;
2773 dr->func[i].subsystem_id = relations->func[i].subsystem_id;
2774 dr->func[i].win_slot = relations->func[i].win_slot;
2775 dr->func[i].ser = relations->func[i].ser;
2776 }
2777
2778 if (hv_pci_start_relations_work(hbus, dr))
2779 kfree(dr);
2780 }
2781
2782 /**
2783 * hv_pci_devices_present2() - Handle list of new children
2784 * @hbus: Root PCI bus, as understood by this driver
2785 * @relations: Packet from host listing children
2786 *
2787 * This function is the v2 version of hv_pci_devices_present()
2788 */
hv_pci_devices_present2(struct hv_pcibus_device * hbus,struct pci_bus_relations2 * relations)2789 static void hv_pci_devices_present2(struct hv_pcibus_device *hbus,
2790 struct pci_bus_relations2 *relations)
2791 {
2792 struct hv_dr_state *dr;
2793 int i;
2794
2795 dr = kzalloc(struct_size(dr, func, relations->device_count),
2796 GFP_NOWAIT);
2797 if (!dr)
2798 return;
2799
2800 dr->device_count = relations->device_count;
2801 for (i = 0; i < dr->device_count; i++) {
2802 dr->func[i].v_id = relations->func[i].v_id;
2803 dr->func[i].d_id = relations->func[i].d_id;
2804 dr->func[i].rev = relations->func[i].rev;
2805 dr->func[i].prog_intf = relations->func[i].prog_intf;
2806 dr->func[i].subclass = relations->func[i].subclass;
2807 dr->func[i].base_class = relations->func[i].base_class;
2808 dr->func[i].subsystem_id = relations->func[i].subsystem_id;
2809 dr->func[i].win_slot = relations->func[i].win_slot;
2810 dr->func[i].ser = relations->func[i].ser;
2811 dr->func[i].flags = relations->func[i].flags;
2812 dr->func[i].virtual_numa_node =
2813 relations->func[i].virtual_numa_node;
2814 }
2815
2816 if (hv_pci_start_relations_work(hbus, dr))
2817 kfree(dr);
2818 }
2819
2820 /**
2821 * hv_eject_device_work() - Asynchronously handles ejection
2822 * @work: Work struct embedded in internal device struct
2823 *
2824 * This function handles ejecting a device. Windows will
2825 * attempt to gracefully eject a device, waiting 60 seconds to
2826 * hear back from the guest OS that this completed successfully.
2827 * If this timer expires, the device will be forcibly removed.
2828 */
hv_eject_device_work(struct work_struct * work)2829 static void hv_eject_device_work(struct work_struct *work)
2830 {
2831 struct pci_eject_response *ejct_pkt;
2832 struct hv_pcibus_device *hbus;
2833 struct hv_pci_dev *hpdev;
2834 struct pci_dev *pdev;
2835 unsigned long flags;
2836 int wslot;
2837 struct {
2838 struct pci_packet pkt;
2839 u8 buffer[sizeof(struct pci_eject_response)];
2840 } ctxt;
2841
2842 hpdev = container_of(work, struct hv_pci_dev, wrk);
2843 hbus = hpdev->hbus;
2844
2845 mutex_lock(&hbus->state_lock);
2846
2847 /*
2848 * Ejection can come before or after the PCI bus has been set up, so
2849 * attempt to find it and tear down the bus state, if it exists. This
2850 * must be done without constructs like pci_domain_nr(hbus->bridge->bus)
2851 * because hbus->bridge->bus may not exist yet.
2852 */
2853 wslot = wslot_to_devfn(hpdev->desc.win_slot.slot);
2854 pdev = pci_get_domain_bus_and_slot(hbus->bridge->domain_nr, 0, wslot);
2855 if (pdev) {
2856 pci_lock_rescan_remove();
2857 pci_stop_and_remove_bus_device(pdev);
2858 pci_dev_put(pdev);
2859 pci_unlock_rescan_remove();
2860 }
2861
2862 spin_lock_irqsave(&hbus->device_list_lock, flags);
2863 list_del(&hpdev->list_entry);
2864 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
2865
2866 if (hpdev->pci_slot)
2867 pci_destroy_slot(hpdev->pci_slot);
2868
2869 memset(&ctxt, 0, sizeof(ctxt));
2870 ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
2871 ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
2872 ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot;
2873 vmbus_sendpacket(hbus->hdev->channel, ejct_pkt,
2874 sizeof(*ejct_pkt), 0,
2875 VM_PKT_DATA_INBAND, 0);
2876
2877 /* For the get_pcichild() in hv_pci_eject_device() */
2878 put_pcichild(hpdev);
2879 /* For the two refs got in new_pcichild_device() */
2880 put_pcichild(hpdev);
2881 put_pcichild(hpdev);
2882 /* hpdev has been freed. Do not use it any more. */
2883
2884 mutex_unlock(&hbus->state_lock);
2885 }
2886
2887 /**
2888 * hv_pci_eject_device() - Handles device ejection
2889 * @hpdev: Internal device tracking struct
2890 *
2891 * This function is invoked when an ejection packet arrives. It
2892 * just schedules work so that we don't re-enter the packet
2893 * delivery code handling the ejection.
2894 */
hv_pci_eject_device(struct hv_pci_dev * hpdev)2895 static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
2896 {
2897 struct hv_pcibus_device *hbus = hpdev->hbus;
2898 struct hv_device *hdev = hbus->hdev;
2899
2900 if (hbus->state == hv_pcibus_removing) {
2901 dev_info(&hdev->device, "PCI VMBus EJECT: ignored\n");
2902 return;
2903 }
2904
2905 get_pcichild(hpdev);
2906 INIT_WORK(&hpdev->wrk, hv_eject_device_work);
2907 queue_work(hbus->wq, &hpdev->wrk);
2908 }
2909
2910 /**
2911 * hv_pci_onchannelcallback() - Handles incoming packets
2912 * @context: Internal bus tracking struct
2913 *
2914 * This function is invoked whenever the host sends a packet to
2915 * this channel (which is private to this root PCI bus).
2916 */
hv_pci_onchannelcallback(void * context)2917 static void hv_pci_onchannelcallback(void *context)
2918 {
2919 const int packet_size = 0x100;
2920 int ret;
2921 struct hv_pcibus_device *hbus = context;
2922 struct vmbus_channel *chan = hbus->hdev->channel;
2923 u32 bytes_recvd;
2924 u64 req_id, req_addr;
2925 struct vmpacket_descriptor *desc;
2926 unsigned char *buffer;
2927 int bufferlen = packet_size;
2928 struct pci_packet *comp_packet;
2929 struct pci_response *response;
2930 struct pci_incoming_message *new_message;
2931 struct pci_bus_relations *bus_rel;
2932 struct pci_bus_relations2 *bus_rel2;
2933 struct pci_dev_inval_block *inval;
2934 struct pci_dev_incoming *dev_message;
2935 struct hv_pci_dev *hpdev;
2936 unsigned long flags;
2937
2938 buffer = kmalloc(bufferlen, GFP_ATOMIC);
2939 if (!buffer)
2940 return;
2941
2942 while (1) {
2943 ret = vmbus_recvpacket_raw(chan, buffer, bufferlen,
2944 &bytes_recvd, &req_id);
2945
2946 if (ret == -ENOBUFS) {
2947 kfree(buffer);
2948 /* Handle large packet */
2949 bufferlen = bytes_recvd;
2950 buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
2951 if (!buffer)
2952 return;
2953 continue;
2954 }
2955
2956 /* Zero length indicates there are no more packets. */
2957 if (ret || !bytes_recvd)
2958 break;
2959
2960 /*
2961 * All incoming packets must be at least as large as a
2962 * response.
2963 */
2964 if (bytes_recvd <= sizeof(struct pci_response))
2965 continue;
2966 desc = (struct vmpacket_descriptor *)buffer;
2967
2968 switch (desc->type) {
2969 case VM_PKT_COMP:
2970
2971 lock_requestor(chan, flags);
2972 req_addr = __vmbus_request_addr_match(chan, req_id,
2973 VMBUS_RQST_ADDR_ANY);
2974 if (req_addr == VMBUS_RQST_ERROR) {
2975 unlock_requestor(chan, flags);
2976 dev_err(&hbus->hdev->device,
2977 "Invalid transaction ID %llx\n",
2978 req_id);
2979 break;
2980 }
2981 comp_packet = (struct pci_packet *)req_addr;
2982 response = (struct pci_response *)buffer;
2983 /*
2984 * Call ->completion_func() within the critical section to make
2985 * sure that the packet pointer is still valid during the call:
2986 * here 'valid' means that there's a task still waiting for the
2987 * completion, and that the packet data is still on the waiting
2988 * task's stack. Cf. hv_compose_msi_msg().
2989 */
2990 comp_packet->completion_func(comp_packet->compl_ctxt,
2991 response,
2992 bytes_recvd);
2993 unlock_requestor(chan, flags);
2994 break;
2995
2996 case VM_PKT_DATA_INBAND:
2997
2998 new_message = (struct pci_incoming_message *)buffer;
2999 switch (new_message->message_type.type) {
3000 case PCI_BUS_RELATIONS:
3001
3002 bus_rel = (struct pci_bus_relations *)buffer;
3003 if (bytes_recvd < sizeof(*bus_rel) ||
3004 bytes_recvd <
3005 struct_size(bus_rel, func,
3006 bus_rel->device_count)) {
3007 dev_err(&hbus->hdev->device,
3008 "bus relations too small\n");
3009 break;
3010 }
3011
3012 hv_pci_devices_present(hbus, bus_rel);
3013 break;
3014
3015 case PCI_BUS_RELATIONS2:
3016
3017 bus_rel2 = (struct pci_bus_relations2 *)buffer;
3018 if (bytes_recvd < sizeof(*bus_rel2) ||
3019 bytes_recvd <
3020 struct_size(bus_rel2, func,
3021 bus_rel2->device_count)) {
3022 dev_err(&hbus->hdev->device,
3023 "bus relations v2 too small\n");
3024 break;
3025 }
3026
3027 hv_pci_devices_present2(hbus, bus_rel2);
3028 break;
3029
3030 case PCI_EJECT:
3031
3032 dev_message = (struct pci_dev_incoming *)buffer;
3033 if (bytes_recvd < sizeof(*dev_message)) {
3034 dev_err(&hbus->hdev->device,
3035 "eject message too small\n");
3036 break;
3037 }
3038 hpdev = get_pcichild_wslot(hbus,
3039 dev_message->wslot.slot);
3040 if (hpdev) {
3041 hv_pci_eject_device(hpdev);
3042 put_pcichild(hpdev);
3043 }
3044 break;
3045
3046 case PCI_INVALIDATE_BLOCK:
3047
3048 inval = (struct pci_dev_inval_block *)buffer;
3049 if (bytes_recvd < sizeof(*inval)) {
3050 dev_err(&hbus->hdev->device,
3051 "invalidate message too small\n");
3052 break;
3053 }
3054 hpdev = get_pcichild_wslot(hbus,
3055 inval->wslot.slot);
3056 if (hpdev) {
3057 if (hpdev->block_invalidate) {
3058 hpdev->block_invalidate(
3059 hpdev->invalidate_context,
3060 inval->block_mask);
3061 }
3062 put_pcichild(hpdev);
3063 }
3064 break;
3065
3066 default:
3067 dev_warn(&hbus->hdev->device,
3068 "Unimplemented protocol message %x\n",
3069 new_message->message_type.type);
3070 break;
3071 }
3072 break;
3073
3074 default:
3075 dev_err(&hbus->hdev->device,
3076 "unhandled packet type %d, tid %llx len %d\n",
3077 desc->type, req_id, bytes_recvd);
3078 break;
3079 }
3080 }
3081
3082 kfree(buffer);
3083 }
3084
3085 /**
3086 * hv_pci_protocol_negotiation() - Set up protocol
3087 * @hdev: VMBus's tracking struct for this root PCI bus.
3088 * @version: Array of supported channel protocol versions in
3089 * the order of probing - highest go first.
3090 * @num_version: Number of elements in the version array.
3091 *
3092 * This driver is intended to support running on Windows 10
3093 * (server) and later versions. It will not run on earlier
3094 * versions, as they assume that many of the operations which
3095 * Linux needs accomplished with a spinlock held were done via
3096 * asynchronous messaging via VMBus. Windows 10 increases the
3097 * surface area of PCI emulation so that these actions can take
3098 * place by suspending a virtual processor for their duration.
3099 *
3100 * This function negotiates the channel protocol version,
3101 * failing if the host doesn't support the necessary protocol
3102 * level.
3103 */
hv_pci_protocol_negotiation(struct hv_device * hdev,enum pci_protocol_version_t version[],int num_version)3104 static int hv_pci_protocol_negotiation(struct hv_device *hdev,
3105 enum pci_protocol_version_t version[],
3106 int num_version)
3107 {
3108 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3109 struct pci_version_request *version_req;
3110 struct hv_pci_compl comp_pkt;
3111 struct pci_packet *pkt;
3112 int ret;
3113 int i;
3114
3115 /*
3116 * Initiate the handshake with the host and negotiate
3117 * a version that the host can support. We start with the
3118 * highest version number and go down if the host cannot
3119 * support it.
3120 */
3121 pkt = kzalloc(sizeof(*pkt) + sizeof(*version_req), GFP_KERNEL);
3122 if (!pkt)
3123 return -ENOMEM;
3124
3125 init_completion(&comp_pkt.host_event);
3126 pkt->completion_func = hv_pci_generic_compl;
3127 pkt->compl_ctxt = &comp_pkt;
3128 version_req = (struct pci_version_request *)&pkt->message;
3129 version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION;
3130
3131 for (i = 0; i < num_version; i++) {
3132 version_req->protocol_version = version[i];
3133 ret = vmbus_sendpacket(hdev->channel, version_req,
3134 sizeof(struct pci_version_request),
3135 (unsigned long)pkt, VM_PKT_DATA_INBAND,
3136 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3137 if (!ret)
3138 ret = wait_for_response(hdev, &comp_pkt.host_event);
3139
3140 if (ret) {
3141 dev_err(&hdev->device,
3142 "PCI Pass-through VSP failed to request version: %d",
3143 ret);
3144 goto exit;
3145 }
3146
3147 if (comp_pkt.completion_status >= 0) {
3148 hbus->protocol_version = version[i];
3149 dev_info(&hdev->device,
3150 "PCI VMBus probing: Using version %#x\n",
3151 hbus->protocol_version);
3152 goto exit;
3153 }
3154
3155 if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) {
3156 dev_err(&hdev->device,
3157 "PCI Pass-through VSP failed version request: %#x",
3158 comp_pkt.completion_status);
3159 ret = -EPROTO;
3160 goto exit;
3161 }
3162
3163 reinit_completion(&comp_pkt.host_event);
3164 }
3165
3166 dev_err(&hdev->device,
3167 "PCI pass-through VSP failed to find supported version");
3168 ret = -EPROTO;
3169
3170 exit:
3171 kfree(pkt);
3172 return ret;
3173 }
3174
3175 /**
3176 * hv_pci_free_bridge_windows() - Release memory regions for the
3177 * bus
3178 * @hbus: Root PCI bus, as understood by this driver
3179 */
hv_pci_free_bridge_windows(struct hv_pcibus_device * hbus)3180 static void hv_pci_free_bridge_windows(struct hv_pcibus_device *hbus)
3181 {
3182 /*
3183 * Set the resources back to the way they looked when they
3184 * were allocated by setting IORESOURCE_BUSY again.
3185 */
3186
3187 if (hbus->low_mmio_space && hbus->low_mmio_res) {
3188 hbus->low_mmio_res->flags |= IORESOURCE_BUSY;
3189 vmbus_free_mmio(hbus->low_mmio_res->start,
3190 resource_size(hbus->low_mmio_res));
3191 }
3192
3193 if (hbus->high_mmio_space && hbus->high_mmio_res) {
3194 hbus->high_mmio_res->flags |= IORESOURCE_BUSY;
3195 vmbus_free_mmio(hbus->high_mmio_res->start,
3196 resource_size(hbus->high_mmio_res));
3197 }
3198 }
3199
3200 /**
3201 * hv_pci_allocate_bridge_windows() - Allocate memory regions
3202 * for the bus
3203 * @hbus: Root PCI bus, as understood by this driver
3204 *
3205 * This function calls vmbus_allocate_mmio(), which is itself a
3206 * bit of a compromise. Ideally, we might change the pnp layer
3207 * in the kernel such that it comprehends either PCI devices
3208 * which are "grandchildren of ACPI," with some intermediate bus
3209 * node (in this case, VMBus) or change it such that it
3210 * understands VMBus. The pnp layer, however, has been declared
3211 * deprecated, and not subject to change.
3212 *
3213 * The workaround, implemented here, is to ask VMBus to allocate
3214 * MMIO space for this bus. VMBus itself knows which ranges are
3215 * appropriate by looking at its own ACPI objects. Then, after
3216 * these ranges are claimed, they're modified to look like they
3217 * would have looked if the ACPI and pnp code had allocated
3218 * bridge windows. These descriptors have to exist in this form
3219 * in order to satisfy the code which will get invoked when the
3220 * endpoint PCI function driver calls request_mem_region() or
3221 * request_mem_region_exclusive().
3222 *
3223 * Return: 0 on success, -errno on failure
3224 */
hv_pci_allocate_bridge_windows(struct hv_pcibus_device * hbus)3225 static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus)
3226 {
3227 resource_size_t align;
3228 int ret;
3229
3230 if (hbus->low_mmio_space) {
3231 align = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space));
3232 ret = vmbus_allocate_mmio(&hbus->low_mmio_res, hbus->hdev, 0,
3233 (u64)(u32)0xffffffff,
3234 hbus->low_mmio_space,
3235 align, false);
3236 if (ret) {
3237 dev_err(&hbus->hdev->device,
3238 "Need %#llx of low MMIO space. Consider reconfiguring the VM.\n",
3239 hbus->low_mmio_space);
3240 return ret;
3241 }
3242
3243 /* Modify this resource to become a bridge window. */
3244 hbus->low_mmio_res->flags |= IORESOURCE_WINDOW;
3245 hbus->low_mmio_res->flags &= ~IORESOURCE_BUSY;
3246 pci_add_resource(&hbus->bridge->windows, hbus->low_mmio_res);
3247 }
3248
3249 if (hbus->high_mmio_space) {
3250 align = 1ULL << (63 - __builtin_clzll(hbus->high_mmio_space));
3251 ret = vmbus_allocate_mmio(&hbus->high_mmio_res, hbus->hdev,
3252 0x100000000, -1,
3253 hbus->high_mmio_space, align,
3254 false);
3255 if (ret) {
3256 dev_err(&hbus->hdev->device,
3257 "Need %#llx of high MMIO space. Consider reconfiguring the VM.\n",
3258 hbus->high_mmio_space);
3259 goto release_low_mmio;
3260 }
3261
3262 /* Modify this resource to become a bridge window. */
3263 hbus->high_mmio_res->flags |= IORESOURCE_WINDOW;
3264 hbus->high_mmio_res->flags &= ~IORESOURCE_BUSY;
3265 pci_add_resource(&hbus->bridge->windows, hbus->high_mmio_res);
3266 }
3267
3268 return 0;
3269
3270 release_low_mmio:
3271 if (hbus->low_mmio_res) {
3272 vmbus_free_mmio(hbus->low_mmio_res->start,
3273 resource_size(hbus->low_mmio_res));
3274 }
3275
3276 return ret;
3277 }
3278
3279 /**
3280 * hv_allocate_config_window() - Find MMIO space for PCI Config
3281 * @hbus: Root PCI bus, as understood by this driver
3282 *
3283 * This function claims memory-mapped I/O space for accessing
3284 * configuration space for the functions on this bus.
3285 *
3286 * Return: 0 on success, -errno on failure
3287 */
hv_allocate_config_window(struct hv_pcibus_device * hbus)3288 static int hv_allocate_config_window(struct hv_pcibus_device *hbus)
3289 {
3290 int ret;
3291
3292 /*
3293 * Set up a region of MMIO space to use for accessing configuration
3294 * space.
3295 */
3296 ret = vmbus_allocate_mmio(&hbus->mem_config, hbus->hdev, 0, -1,
3297 PCI_CONFIG_MMIO_LENGTH, 0x1000, false);
3298 if (ret)
3299 return ret;
3300
3301 /*
3302 * vmbus_allocate_mmio() gets used for allocating both device endpoint
3303 * resource claims (those which cannot be overlapped) and the ranges
3304 * which are valid for the children of this bus, which are intended
3305 * to be overlapped by those children. Set the flag on this claim
3306 * meaning that this region can't be overlapped.
3307 */
3308
3309 hbus->mem_config->flags |= IORESOURCE_BUSY;
3310
3311 return 0;
3312 }
3313
hv_free_config_window(struct hv_pcibus_device * hbus)3314 static void hv_free_config_window(struct hv_pcibus_device *hbus)
3315 {
3316 vmbus_free_mmio(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH);
3317 }
3318
3319 static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs);
3320
3321 /**
3322 * hv_pci_enter_d0() - Bring the "bus" into the D0 power state
3323 * @hdev: VMBus's tracking struct for this root PCI bus
3324 *
3325 * Return: 0 on success, -errno on failure
3326 */
hv_pci_enter_d0(struct hv_device * hdev)3327 static int hv_pci_enter_d0(struct hv_device *hdev)
3328 {
3329 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3330 struct pci_bus_d0_entry *d0_entry;
3331 struct hv_pci_compl comp_pkt;
3332 struct pci_packet *pkt;
3333 bool retry = true;
3334 int ret;
3335
3336 enter_d0_retry:
3337 /*
3338 * Tell the host that the bus is ready to use, and moved into the
3339 * powered-on state. This includes telling the host which region
3340 * of memory-mapped I/O space has been chosen for configuration space
3341 * access.
3342 */
3343 pkt = kzalloc(sizeof(*pkt) + sizeof(*d0_entry), GFP_KERNEL);
3344 if (!pkt)
3345 return -ENOMEM;
3346
3347 init_completion(&comp_pkt.host_event);
3348 pkt->completion_func = hv_pci_generic_compl;
3349 pkt->compl_ctxt = &comp_pkt;
3350 d0_entry = (struct pci_bus_d0_entry *)&pkt->message;
3351 d0_entry->message_type.type = PCI_BUS_D0ENTRY;
3352 d0_entry->mmio_base = hbus->mem_config->start;
3353
3354 ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry),
3355 (unsigned long)pkt, VM_PKT_DATA_INBAND,
3356 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3357 if (!ret)
3358 ret = wait_for_response(hdev, &comp_pkt.host_event);
3359
3360 if (ret)
3361 goto exit;
3362
3363 /*
3364 * In certain case (Kdump) the pci device of interest was
3365 * not cleanly shut down and resource is still held on host
3366 * side, the host could return invalid device status.
3367 * We need to explicitly request host to release the resource
3368 * and try to enter D0 again.
3369 */
3370 if (comp_pkt.completion_status < 0 && retry) {
3371 retry = false;
3372
3373 dev_err(&hdev->device, "Retrying D0 Entry\n");
3374
3375 /*
3376 * Hv_pci_bus_exit() calls hv_send_resource_released()
3377 * to free up resources of its child devices.
3378 * In the kdump kernel we need to set the
3379 * wslot_res_allocated to 255 so it scans all child
3380 * devices to release resources allocated in the
3381 * normal kernel before panic happened.
3382 */
3383 hbus->wslot_res_allocated = 255;
3384
3385 ret = hv_pci_bus_exit(hdev, true);
3386
3387 if (ret == 0) {
3388 kfree(pkt);
3389 goto enter_d0_retry;
3390 }
3391 dev_err(&hdev->device,
3392 "Retrying D0 failed with ret %d\n", ret);
3393 }
3394
3395 if (comp_pkt.completion_status < 0) {
3396 dev_err(&hdev->device,
3397 "PCI Pass-through VSP failed D0 Entry with status %x\n",
3398 comp_pkt.completion_status);
3399 ret = -EPROTO;
3400 goto exit;
3401 }
3402
3403 ret = 0;
3404
3405 exit:
3406 kfree(pkt);
3407 return ret;
3408 }
3409
3410 /**
3411 * hv_pci_query_relations() - Ask host to send list of child
3412 * devices
3413 * @hdev: VMBus's tracking struct for this root PCI bus
3414 *
3415 * Return: 0 on success, -errno on failure
3416 */
hv_pci_query_relations(struct hv_device * hdev)3417 static int hv_pci_query_relations(struct hv_device *hdev)
3418 {
3419 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3420 struct pci_message message;
3421 struct completion comp;
3422 int ret;
3423
3424 /* Ask the host to send along the list of child devices */
3425 init_completion(&comp);
3426 if (cmpxchg(&hbus->survey_event, NULL, &comp))
3427 return -ENOTEMPTY;
3428
3429 memset(&message, 0, sizeof(message));
3430 message.type = PCI_QUERY_BUS_RELATIONS;
3431
3432 ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message),
3433 0, VM_PKT_DATA_INBAND, 0);
3434 if (!ret)
3435 ret = wait_for_response(hdev, &comp);
3436
3437 /*
3438 * In the case of fast device addition/removal, it's possible that
3439 * vmbus_sendpacket() or wait_for_response() returns -ENODEV but we
3440 * already got a PCI_BUS_RELATIONS* message from the host and the
3441 * channel callback already scheduled a work to hbus->wq, which can be
3442 * running pci_devices_present_work() -> survey_child_resources() ->
3443 * complete(&hbus->survey_event), even after hv_pci_query_relations()
3444 * exits and the stack variable 'comp' is no longer valid; as a result,
3445 * a hang or a page fault may happen when the complete() calls
3446 * raw_spin_lock_irqsave(). Flush hbus->wq before we exit from
3447 * hv_pci_query_relations() to avoid the issues. Note: if 'ret' is
3448 * -ENODEV, there can't be any more work item scheduled to hbus->wq
3449 * after the flush_workqueue(): see vmbus_onoffer_rescind() ->
3450 * vmbus_reset_channel_cb(), vmbus_rescind_cleanup() ->
3451 * channel->rescind = true.
3452 */
3453 flush_workqueue(hbus->wq);
3454
3455 return ret;
3456 }
3457
3458 /**
3459 * hv_send_resources_allocated() - Report local resource choices
3460 * @hdev: VMBus's tracking struct for this root PCI bus
3461 *
3462 * The host OS is expecting to be sent a request as a message
3463 * which contains all the resources that the device will use.
3464 * The response contains those same resources, "translated"
3465 * which is to say, the values which should be used by the
3466 * hardware, when it delivers an interrupt. (MMIO resources are
3467 * used in local terms.) This is nice for Windows, and lines up
3468 * with the FDO/PDO split, which doesn't exist in Linux. Linux
3469 * is deeply expecting to scan an emulated PCI configuration
3470 * space. So this message is sent here only to drive the state
3471 * machine on the host forward.
3472 *
3473 * Return: 0 on success, -errno on failure
3474 */
hv_send_resources_allocated(struct hv_device * hdev)3475 static int hv_send_resources_allocated(struct hv_device *hdev)
3476 {
3477 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3478 struct pci_resources_assigned *res_assigned;
3479 struct pci_resources_assigned2 *res_assigned2;
3480 struct hv_pci_compl comp_pkt;
3481 struct hv_pci_dev *hpdev;
3482 struct pci_packet *pkt;
3483 size_t size_res;
3484 int wslot;
3485 int ret;
3486
3487 size_res = (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2)
3488 ? sizeof(*res_assigned) : sizeof(*res_assigned2);
3489
3490 pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL);
3491 if (!pkt)
3492 return -ENOMEM;
3493
3494 ret = 0;
3495
3496 for (wslot = 0; wslot < 256; wslot++) {
3497 hpdev = get_pcichild_wslot(hbus, wslot);
3498 if (!hpdev)
3499 continue;
3500
3501 memset(pkt, 0, sizeof(*pkt) + size_res);
3502 init_completion(&comp_pkt.host_event);
3503 pkt->completion_func = hv_pci_generic_compl;
3504 pkt->compl_ctxt = &comp_pkt;
3505
3506 if (hbus->protocol_version < PCI_PROTOCOL_VERSION_1_2) {
3507 res_assigned =
3508 (struct pci_resources_assigned *)&pkt->message;
3509 res_assigned->message_type.type =
3510 PCI_RESOURCES_ASSIGNED;
3511 res_assigned->wslot.slot = hpdev->desc.win_slot.slot;
3512 } else {
3513 res_assigned2 =
3514 (struct pci_resources_assigned2 *)&pkt->message;
3515 res_assigned2->message_type.type =
3516 PCI_RESOURCES_ASSIGNED2;
3517 res_assigned2->wslot.slot = hpdev->desc.win_slot.slot;
3518 }
3519 put_pcichild(hpdev);
3520
3521 ret = vmbus_sendpacket(hdev->channel, &pkt->message,
3522 size_res, (unsigned long)pkt,
3523 VM_PKT_DATA_INBAND,
3524 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3525 if (!ret)
3526 ret = wait_for_response(hdev, &comp_pkt.host_event);
3527 if (ret)
3528 break;
3529
3530 if (comp_pkt.completion_status < 0) {
3531 ret = -EPROTO;
3532 dev_err(&hdev->device,
3533 "resource allocated returned 0x%x",
3534 comp_pkt.completion_status);
3535 break;
3536 }
3537
3538 hbus->wslot_res_allocated = wslot;
3539 }
3540
3541 kfree(pkt);
3542 return ret;
3543 }
3544
3545 /**
3546 * hv_send_resources_released() - Report local resources
3547 * released
3548 * @hdev: VMBus's tracking struct for this root PCI bus
3549 *
3550 * Return: 0 on success, -errno on failure
3551 */
hv_send_resources_released(struct hv_device * hdev)3552 static int hv_send_resources_released(struct hv_device *hdev)
3553 {
3554 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3555 struct pci_child_message pkt;
3556 struct hv_pci_dev *hpdev;
3557 int wslot;
3558 int ret;
3559
3560 for (wslot = hbus->wslot_res_allocated; wslot >= 0; wslot--) {
3561 hpdev = get_pcichild_wslot(hbus, wslot);
3562 if (!hpdev)
3563 continue;
3564
3565 memset(&pkt, 0, sizeof(pkt));
3566 pkt.message_type.type = PCI_RESOURCES_RELEASED;
3567 pkt.wslot.slot = hpdev->desc.win_slot.slot;
3568
3569 put_pcichild(hpdev);
3570
3571 ret = vmbus_sendpacket(hdev->channel, &pkt, sizeof(pkt), 0,
3572 VM_PKT_DATA_INBAND, 0);
3573 if (ret)
3574 return ret;
3575
3576 hbus->wslot_res_allocated = wslot - 1;
3577 }
3578
3579 hbus->wslot_res_allocated = -1;
3580
3581 return 0;
3582 }
3583
3584 #define HVPCI_DOM_MAP_SIZE (64 * 1024)
3585 static DECLARE_BITMAP(hvpci_dom_map, HVPCI_DOM_MAP_SIZE);
3586
3587 /*
3588 * PCI domain number 0 is used by emulated devices on Gen1 VMs, so define 0
3589 * as invalid for passthrough PCI devices of this driver.
3590 */
3591 #define HVPCI_DOM_INVALID 0
3592
3593 /**
3594 * hv_get_dom_num() - Get a valid PCI domain number
3595 * Check if the PCI domain number is in use, and return another number if
3596 * it is in use.
3597 *
3598 * @dom: Requested domain number
3599 *
3600 * return: domain number on success, HVPCI_DOM_INVALID on failure
3601 */
hv_get_dom_num(u16 dom)3602 static u16 hv_get_dom_num(u16 dom)
3603 {
3604 unsigned int i;
3605
3606 if (test_and_set_bit(dom, hvpci_dom_map) == 0)
3607 return dom;
3608
3609 for_each_clear_bit(i, hvpci_dom_map, HVPCI_DOM_MAP_SIZE) {
3610 if (test_and_set_bit(i, hvpci_dom_map) == 0)
3611 return i;
3612 }
3613
3614 return HVPCI_DOM_INVALID;
3615 }
3616
3617 /**
3618 * hv_put_dom_num() - Mark the PCI domain number as free
3619 * @dom: Domain number to be freed
3620 */
hv_put_dom_num(u16 dom)3621 static void hv_put_dom_num(u16 dom)
3622 {
3623 clear_bit(dom, hvpci_dom_map);
3624 }
3625
3626 /**
3627 * hv_pci_probe() - New VMBus channel probe, for a root PCI bus
3628 * @hdev: VMBus's tracking struct for this root PCI bus
3629 * @dev_id: Identifies the device itself
3630 *
3631 * Return: 0 on success, -errno on failure
3632 */
hv_pci_probe(struct hv_device * hdev,const struct hv_vmbus_device_id * dev_id)3633 static int hv_pci_probe(struct hv_device *hdev,
3634 const struct hv_vmbus_device_id *dev_id)
3635 {
3636 struct pci_host_bridge *bridge;
3637 struct hv_pcibus_device *hbus;
3638 u16 dom_req, dom;
3639 char *name;
3640 int ret;
3641
3642 bridge = devm_pci_alloc_host_bridge(&hdev->device, 0);
3643 if (!bridge)
3644 return -ENOMEM;
3645
3646 hbus = kzalloc(sizeof(*hbus), GFP_KERNEL);
3647 if (!hbus)
3648 return -ENOMEM;
3649
3650 hbus->bridge = bridge;
3651 mutex_init(&hbus->state_lock);
3652 hbus->state = hv_pcibus_init;
3653 hbus->wslot_res_allocated = -1;
3654
3655 /*
3656 * The PCI bus "domain" is what is called "segment" in ACPI and other
3657 * specs. Pull it from the instance ID, to get something usually
3658 * unique. In rare cases of collision, we will find out another number
3659 * not in use.
3660 *
3661 * Note that, since this code only runs in a Hyper-V VM, Hyper-V
3662 * together with this guest driver can guarantee that (1) The only
3663 * domain used by Gen1 VMs for something that looks like a physical
3664 * PCI bus (which is actually emulated by the hypervisor) is domain 0.
3665 * (2) There will be no overlap between domains (after fixing possible
3666 * collisions) in the same VM.
3667 */
3668 dom_req = hdev->dev_instance.b[5] << 8 | hdev->dev_instance.b[4];
3669 dom = hv_get_dom_num(dom_req);
3670
3671 if (dom == HVPCI_DOM_INVALID) {
3672 dev_err(&hdev->device,
3673 "Unable to use dom# 0x%x or other numbers", dom_req);
3674 ret = -EINVAL;
3675 goto free_bus;
3676 }
3677
3678 if (dom != dom_req)
3679 dev_info(&hdev->device,
3680 "PCI dom# 0x%x has collision, using 0x%x",
3681 dom_req, dom);
3682
3683 hbus->bridge->domain_nr = dom;
3684 #ifdef CONFIG_X86
3685 hbus->sysdata.domain = dom;
3686 hbus->use_calls = !!(ms_hyperv.hints & HV_X64_USE_MMIO_HYPERCALLS);
3687 #elif defined(CONFIG_ARM64)
3688 /*
3689 * Set the PCI bus parent to be the corresponding VMbus
3690 * device. Then the VMbus device will be assigned as the
3691 * ACPI companion in pcibios_root_bridge_prepare() and
3692 * pci_dma_configure() will propagate device coherence
3693 * information to devices created on the bus.
3694 */
3695 hbus->sysdata.parent = hdev->device.parent;
3696 hbus->use_calls = false;
3697 #endif
3698
3699 hbus->hdev = hdev;
3700 INIT_LIST_HEAD(&hbus->children);
3701 INIT_LIST_HEAD(&hbus->dr_list);
3702 spin_lock_init(&hbus->config_lock);
3703 spin_lock_init(&hbus->device_list_lock);
3704 hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0,
3705 hbus->bridge->domain_nr);
3706 if (!hbus->wq) {
3707 ret = -ENOMEM;
3708 goto free_dom;
3709 }
3710
3711 hdev->channel->next_request_id_callback = vmbus_next_request_id;
3712 hdev->channel->request_addr_callback = vmbus_request_addr;
3713 hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE;
3714
3715 ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
3716 hv_pci_onchannelcallback, hbus);
3717 if (ret)
3718 goto destroy_wq;
3719
3720 hv_set_drvdata(hdev, hbus);
3721
3722 ret = hv_pci_protocol_negotiation(hdev, pci_protocol_versions,
3723 ARRAY_SIZE(pci_protocol_versions));
3724 if (ret)
3725 goto close;
3726
3727 ret = hv_allocate_config_window(hbus);
3728 if (ret)
3729 goto close;
3730
3731 hbus->cfg_addr = ioremap(hbus->mem_config->start,
3732 PCI_CONFIG_MMIO_LENGTH);
3733 if (!hbus->cfg_addr) {
3734 dev_err(&hdev->device,
3735 "Unable to map a virtual address for config space\n");
3736 ret = -ENOMEM;
3737 goto free_config;
3738 }
3739
3740 name = kasprintf(GFP_KERNEL, "%pUL", &hdev->dev_instance);
3741 if (!name) {
3742 ret = -ENOMEM;
3743 goto unmap;
3744 }
3745
3746 hbus->fwnode = irq_domain_alloc_named_fwnode(name);
3747 kfree(name);
3748 if (!hbus->fwnode) {
3749 ret = -ENOMEM;
3750 goto unmap;
3751 }
3752
3753 ret = hv_pcie_init_irq_domain(hbus);
3754 if (ret)
3755 goto free_fwnode;
3756
3757 ret = hv_pci_query_relations(hdev);
3758 if (ret)
3759 goto free_irq_domain;
3760
3761 mutex_lock(&hbus->state_lock);
3762
3763 ret = hv_pci_enter_d0(hdev);
3764 if (ret)
3765 goto release_state_lock;
3766
3767 ret = hv_pci_allocate_bridge_windows(hbus);
3768 if (ret)
3769 goto exit_d0;
3770
3771 ret = hv_send_resources_allocated(hdev);
3772 if (ret)
3773 goto free_windows;
3774
3775 prepopulate_bars(hbus);
3776
3777 hbus->state = hv_pcibus_probed;
3778
3779 ret = create_root_hv_pci_bus(hbus);
3780 if (ret)
3781 goto free_windows;
3782
3783 mutex_unlock(&hbus->state_lock);
3784 return 0;
3785
3786 free_windows:
3787 hv_pci_free_bridge_windows(hbus);
3788 exit_d0:
3789 (void) hv_pci_bus_exit(hdev, true);
3790 release_state_lock:
3791 mutex_unlock(&hbus->state_lock);
3792 free_irq_domain:
3793 irq_domain_remove(hbus->irq_domain);
3794 free_fwnode:
3795 irq_domain_free_fwnode(hbus->fwnode);
3796 unmap:
3797 iounmap(hbus->cfg_addr);
3798 free_config:
3799 hv_free_config_window(hbus);
3800 close:
3801 vmbus_close(hdev->channel);
3802 destroy_wq:
3803 destroy_workqueue(hbus->wq);
3804 free_dom:
3805 hv_put_dom_num(hbus->bridge->domain_nr);
3806 free_bus:
3807 kfree(hbus);
3808 return ret;
3809 }
3810
hv_pci_bus_exit(struct hv_device * hdev,bool keep_devs)3811 static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
3812 {
3813 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3814 struct vmbus_channel *chan = hdev->channel;
3815 struct {
3816 struct pci_packet teardown_packet;
3817 u8 buffer[sizeof(struct pci_message)];
3818 } pkt;
3819 struct hv_pci_compl comp_pkt;
3820 struct hv_pci_dev *hpdev, *tmp;
3821 unsigned long flags;
3822 u64 trans_id;
3823 int ret;
3824
3825 /*
3826 * After the host sends the RESCIND_CHANNEL message, it doesn't
3827 * access the per-channel ringbuffer any longer.
3828 */
3829 if (chan->rescind)
3830 return 0;
3831
3832 if (!keep_devs) {
3833 struct list_head removed;
3834
3835 /* Move all present children to the list on stack */
3836 INIT_LIST_HEAD(&removed);
3837 spin_lock_irqsave(&hbus->device_list_lock, flags);
3838 list_for_each_entry_safe(hpdev, tmp, &hbus->children, list_entry)
3839 list_move_tail(&hpdev->list_entry, &removed);
3840 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
3841
3842 /* Remove all children in the list */
3843 list_for_each_entry_safe(hpdev, tmp, &removed, list_entry) {
3844 list_del(&hpdev->list_entry);
3845 if (hpdev->pci_slot)
3846 pci_destroy_slot(hpdev->pci_slot);
3847 /* For the two refs got in new_pcichild_device() */
3848 put_pcichild(hpdev);
3849 put_pcichild(hpdev);
3850 }
3851 }
3852
3853 ret = hv_send_resources_released(hdev);
3854 if (ret) {
3855 dev_err(&hdev->device,
3856 "Couldn't send resources released packet(s)\n");
3857 return ret;
3858 }
3859
3860 memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet));
3861 init_completion(&comp_pkt.host_event);
3862 pkt.teardown_packet.completion_func = hv_pci_generic_compl;
3863 pkt.teardown_packet.compl_ctxt = &comp_pkt;
3864 pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT;
3865
3866 ret = vmbus_sendpacket_getid(chan, &pkt.teardown_packet.message,
3867 sizeof(struct pci_message),
3868 (unsigned long)&pkt.teardown_packet,
3869 &trans_id, VM_PKT_DATA_INBAND,
3870 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
3871 if (ret)
3872 return ret;
3873
3874 if (wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ) == 0) {
3875 /*
3876 * The completion packet on the stack becomes invalid after
3877 * 'return'; remove the ID from the VMbus requestor if the
3878 * identifier is still mapped to/associated with the packet.
3879 *
3880 * Cf. hv_pci_onchannelcallback().
3881 */
3882 vmbus_request_addr_match(chan, trans_id,
3883 (unsigned long)&pkt.teardown_packet);
3884 return -ETIMEDOUT;
3885 }
3886
3887 return 0;
3888 }
3889
3890 /**
3891 * hv_pci_remove() - Remove routine for this VMBus channel
3892 * @hdev: VMBus's tracking struct for this root PCI bus
3893 */
hv_pci_remove(struct hv_device * hdev)3894 static void hv_pci_remove(struct hv_device *hdev)
3895 {
3896 struct hv_pcibus_device *hbus;
3897
3898 hbus = hv_get_drvdata(hdev);
3899 if (hbus->state == hv_pcibus_installed) {
3900 tasklet_disable(&hdev->channel->callback_event);
3901 hbus->state = hv_pcibus_removing;
3902 tasklet_enable(&hdev->channel->callback_event);
3903 destroy_workqueue(hbus->wq);
3904 hbus->wq = NULL;
3905 /*
3906 * At this point, no work is running or can be scheduled
3907 * on hbus-wq. We can't race with hv_pci_devices_present()
3908 * or hv_pci_eject_device(), it's safe to proceed.
3909 */
3910
3911 /* Remove the bus from PCI's point of view. */
3912 pci_lock_rescan_remove();
3913 pci_stop_root_bus(hbus->bridge->bus);
3914 hv_pci_remove_slots(hbus);
3915 pci_remove_root_bus(hbus->bridge->bus);
3916 pci_unlock_rescan_remove();
3917 }
3918
3919 hv_pci_bus_exit(hdev, false);
3920
3921 vmbus_close(hdev->channel);
3922
3923 iounmap(hbus->cfg_addr);
3924 hv_free_config_window(hbus);
3925 hv_pci_free_bridge_windows(hbus);
3926 irq_domain_remove(hbus->irq_domain);
3927 irq_domain_free_fwnode(hbus->fwnode);
3928
3929 hv_put_dom_num(hbus->bridge->domain_nr);
3930
3931 kfree(hbus);
3932 }
3933
hv_pci_suspend(struct hv_device * hdev)3934 static int hv_pci_suspend(struct hv_device *hdev)
3935 {
3936 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
3937 enum hv_pcibus_state old_state;
3938 int ret;
3939
3940 /*
3941 * hv_pci_suspend() must make sure there are no pending work items
3942 * before calling vmbus_close(), since it runs in a process context
3943 * as a callback in dpm_suspend(). When it starts to run, the channel
3944 * callback hv_pci_onchannelcallback(), which runs in a tasklet
3945 * context, can be still running concurrently and scheduling new work
3946 * items onto hbus->wq in hv_pci_devices_present() and
3947 * hv_pci_eject_device(), and the work item handlers can access the
3948 * vmbus channel, which can be being closed by hv_pci_suspend(), e.g.
3949 * the work item handler pci_devices_present_work() ->
3950 * new_pcichild_device() writes to the vmbus channel.
3951 *
3952 * To eliminate the race, hv_pci_suspend() disables the channel
3953 * callback tasklet, sets hbus->state to hv_pcibus_removing, and
3954 * re-enables the tasklet. This way, when hv_pci_suspend() proceeds,
3955 * it knows that no new work item can be scheduled, and then it flushes
3956 * hbus->wq and safely closes the vmbus channel.
3957 */
3958 tasklet_disable(&hdev->channel->callback_event);
3959
3960 /* Change the hbus state to prevent new work items. */
3961 old_state = hbus->state;
3962 if (hbus->state == hv_pcibus_installed)
3963 hbus->state = hv_pcibus_removing;
3964
3965 tasklet_enable(&hdev->channel->callback_event);
3966
3967 if (old_state != hv_pcibus_installed)
3968 return -EINVAL;
3969
3970 flush_workqueue(hbus->wq);
3971
3972 ret = hv_pci_bus_exit(hdev, true);
3973 if (ret)
3974 return ret;
3975
3976 vmbus_close(hdev->channel);
3977
3978 return 0;
3979 }
3980
hv_pci_restore_msi_msg(struct pci_dev * pdev,void * arg)3981 static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
3982 {
3983 struct irq_data *irq_data;
3984 struct msi_desc *entry;
3985 int ret = 0;
3986
3987 if (!pdev->msi_enabled && !pdev->msix_enabled)
3988 return 0;
3989
3990 msi_lock_descs(&pdev->dev);
3991 msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
3992 irq_data = irq_get_irq_data(entry->irq);
3993 if (WARN_ON_ONCE(!irq_data)) {
3994 ret = -EINVAL;
3995 break;
3996 }
3997
3998 hv_compose_msi_msg(irq_data, &entry->msg);
3999 }
4000 msi_unlock_descs(&pdev->dev);
4001
4002 return ret;
4003 }
4004
4005 /*
4006 * Upon resume, pci_restore_msi_state() -> ... -> __pci_write_msi_msg()
4007 * directly writes the MSI/MSI-X registers via MMIO, but since Hyper-V
4008 * doesn't trap and emulate the MMIO accesses, here hv_compose_msi_msg()
4009 * must be used to ask Hyper-V to re-create the IOMMU Interrupt Remapping
4010 * Table entries.
4011 */
hv_pci_restore_msi_state(struct hv_pcibus_device * hbus)4012 static void hv_pci_restore_msi_state(struct hv_pcibus_device *hbus)
4013 {
4014 pci_walk_bus(hbus->bridge->bus, hv_pci_restore_msi_msg, NULL);
4015 }
4016
hv_pci_resume(struct hv_device * hdev)4017 static int hv_pci_resume(struct hv_device *hdev)
4018 {
4019 struct hv_pcibus_device *hbus = hv_get_drvdata(hdev);
4020 enum pci_protocol_version_t version[1];
4021 int ret;
4022
4023 hbus->state = hv_pcibus_init;
4024
4025 hdev->channel->next_request_id_callback = vmbus_next_request_id;
4026 hdev->channel->request_addr_callback = vmbus_request_addr;
4027 hdev->channel->rqstor_size = HV_PCI_RQSTOR_SIZE;
4028
4029 ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
4030 hv_pci_onchannelcallback, hbus);
4031 if (ret)
4032 return ret;
4033
4034 /* Only use the version that was in use before hibernation. */
4035 version[0] = hbus->protocol_version;
4036 ret = hv_pci_protocol_negotiation(hdev, version, 1);
4037 if (ret)
4038 goto out;
4039
4040 ret = hv_pci_query_relations(hdev);
4041 if (ret)
4042 goto out;
4043
4044 mutex_lock(&hbus->state_lock);
4045
4046 ret = hv_pci_enter_d0(hdev);
4047 if (ret)
4048 goto release_state_lock;
4049
4050 ret = hv_send_resources_allocated(hdev);
4051 if (ret)
4052 goto release_state_lock;
4053
4054 prepopulate_bars(hbus);
4055
4056 hv_pci_restore_msi_state(hbus);
4057
4058 hbus->state = hv_pcibus_installed;
4059 mutex_unlock(&hbus->state_lock);
4060 return 0;
4061
4062 release_state_lock:
4063 mutex_unlock(&hbus->state_lock);
4064 out:
4065 vmbus_close(hdev->channel);
4066 return ret;
4067 }
4068
4069 static const struct hv_vmbus_device_id hv_pci_id_table[] = {
4070 /* PCI Pass-through Class ID */
4071 /* 44C4F61D-4444-4400-9D52-802E27EDE19F */
4072 { HV_PCIE_GUID, },
4073 { },
4074 };
4075
4076 MODULE_DEVICE_TABLE(vmbus, hv_pci_id_table);
4077
4078 static struct hv_driver hv_pci_drv = {
4079 .name = "hv_pci",
4080 .id_table = hv_pci_id_table,
4081 .probe = hv_pci_probe,
4082 .remove = hv_pci_remove,
4083 .suspend = hv_pci_suspend,
4084 .resume = hv_pci_resume,
4085 };
4086
exit_hv_pci_drv(void)4087 static void __exit exit_hv_pci_drv(void)
4088 {
4089 vmbus_driver_unregister(&hv_pci_drv);
4090
4091 hvpci_block_ops.read_block = NULL;
4092 hvpci_block_ops.write_block = NULL;
4093 hvpci_block_ops.reg_blk_invalidate = NULL;
4094 }
4095
init_hv_pci_drv(void)4096 static int __init init_hv_pci_drv(void)
4097 {
4098 int ret;
4099
4100 if (!hv_is_hyperv_initialized())
4101 return -ENODEV;
4102
4103 ret = hv_pci_irqchip_init();
4104 if (ret)
4105 return ret;
4106
4107 /* Set the invalid domain number's bit, so it will not be used */
4108 set_bit(HVPCI_DOM_INVALID, hvpci_dom_map);
4109
4110 /* Initialize PCI block r/w interface */
4111 hvpci_block_ops.read_block = hv_read_config_block;
4112 hvpci_block_ops.write_block = hv_write_config_block;
4113 hvpci_block_ops.reg_blk_invalidate = hv_register_block_invalidate;
4114
4115 return vmbus_driver_register(&hv_pci_drv);
4116 }
4117
4118 module_init(init_hv_pci_drv);
4119 module_exit(exit_hv_pci_drv);
4120
4121 MODULE_DESCRIPTION("Hyper-V PCI");
4122 MODULE_LICENSE("GPL v2");
4123