xref: /openbmc/linux/arch/um/drivers/virt-pci.c (revision c0891ac1)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2020 Intel Corporation
4  * Author: Johannes Berg <johannes@sipsolutions.net>
5  */
6 #include <linux/module.h>
7 #include <linux/pci.h>
8 #include <linux/virtio.h>
9 #include <linux/virtio_config.h>
10 #include <linux/logic_iomem.h>
11 #include <linux/irqdomain.h>
12 #include <linux/virtio_pcidev.h>
13 #include <linux/virtio-uml.h>
14 #include <linux/delay.h>
15 #include <linux/msi.h>
16 #include <asm/unaligned.h>
17 #include <irq_kern.h>
18 
19 #define MAX_DEVICES 8
20 #define MAX_MSI_VECTORS 32
21 #define CFG_SPACE_SIZE 4096
22 
23 /* for MSI-X we have a 32-bit payload */
24 #define MAX_IRQ_MSG_SIZE (sizeof(struct virtio_pcidev_msg) + sizeof(u32))
25 #define NUM_IRQ_MSGS	10
26 
27 #define HANDLE_NO_FREE(ptr) ((void *)((unsigned long)(ptr) | 1))
28 #define HANDLE_IS_NO_FREE(ptr) ((unsigned long)(ptr) & 1)
29 
30 struct um_pci_device {
31 	struct virtio_device *vdev;
32 
33 	/* for now just standard BARs */
34 	u8 resptr[PCI_STD_NUM_BARS];
35 
36 	struct virtqueue *cmd_vq, *irq_vq;
37 
38 #define UM_PCI_STAT_WAITING	0
39 	unsigned long status;
40 
41 	int irq;
42 };
43 
44 struct um_pci_device_reg {
45 	struct um_pci_device *dev;
46 	void __iomem *iomem;
47 };
48 
49 static struct pci_host_bridge *bridge;
50 static DEFINE_MUTEX(um_pci_mtx);
51 static struct um_pci_device_reg um_pci_devices[MAX_DEVICES];
52 static struct fwnode_handle *um_pci_fwnode;
53 static struct irq_domain *um_pci_inner_domain;
54 static struct irq_domain *um_pci_msi_domain;
55 static unsigned long um_pci_msi_used[BITS_TO_LONGS(MAX_MSI_VECTORS)];
56 
57 #define UM_VIRT_PCI_MAXDELAY 40000
58 
59 static int um_pci_send_cmd(struct um_pci_device *dev,
60 			   struct virtio_pcidev_msg *cmd,
61 			   unsigned int cmd_size,
62 			   const void *extra, unsigned int extra_size,
63 			   void *out, unsigned int out_size)
64 {
65 	struct scatterlist out_sg, extra_sg, in_sg;
66 	struct scatterlist *sgs_list[] = {
67 		[0] = &out_sg,
68 		[1] = extra ? &extra_sg : &in_sg,
69 		[2] = extra ? &in_sg : NULL,
70 	};
71 	int delay_count = 0;
72 	int ret, len;
73 	bool posted;
74 
75 	if (WARN_ON(cmd_size < sizeof(*cmd)))
76 		return -EINVAL;
77 
78 	switch (cmd->op) {
79 	case VIRTIO_PCIDEV_OP_CFG_WRITE:
80 	case VIRTIO_PCIDEV_OP_MMIO_WRITE:
81 	case VIRTIO_PCIDEV_OP_MMIO_MEMSET:
82 		/* in PCI, writes are posted, so don't wait */
83 		posted = !out;
84 		WARN_ON(!posted);
85 		break;
86 	default:
87 		posted = false;
88 		break;
89 	}
90 
91 	if (posted) {
92 		u8 *ncmd = kmalloc(cmd_size + extra_size, GFP_ATOMIC);
93 
94 		if (ncmd) {
95 			memcpy(ncmd, cmd, cmd_size);
96 			if (extra)
97 				memcpy(ncmd + cmd_size, extra, extra_size);
98 			cmd = (void *)ncmd;
99 			cmd_size += extra_size;
100 			extra = NULL;
101 			extra_size = 0;
102 		} else {
103 			/* try without allocating memory */
104 			posted = false;
105 		}
106 	}
107 
108 	sg_init_one(&out_sg, cmd, cmd_size);
109 	if (extra)
110 		sg_init_one(&extra_sg, extra, extra_size);
111 	if (out)
112 		sg_init_one(&in_sg, out, out_size);
113 
114 	/* add to internal virtio queue */
115 	ret = virtqueue_add_sgs(dev->cmd_vq, sgs_list,
116 				extra ? 2 : 1,
117 				out ? 1 : 0,
118 				posted ? cmd : HANDLE_NO_FREE(cmd),
119 				GFP_ATOMIC);
120 	if (ret)
121 		return ret;
122 
123 	if (posted) {
124 		virtqueue_kick(dev->cmd_vq);
125 		return 0;
126 	}
127 
128 	/* kick and poll for getting a response on the queue */
129 	set_bit(UM_PCI_STAT_WAITING, &dev->status);
130 	virtqueue_kick(dev->cmd_vq);
131 
132 	while (1) {
133 		void *completed = virtqueue_get_buf(dev->cmd_vq, &len);
134 
135 		if (completed == HANDLE_NO_FREE(cmd))
136 			break;
137 
138 		if (completed && !HANDLE_IS_NO_FREE(completed))
139 			kfree(completed);
140 
141 		if (WARN_ONCE(virtqueue_is_broken(dev->cmd_vq) ||
142 			      ++delay_count > UM_VIRT_PCI_MAXDELAY,
143 			      "um virt-pci delay: %d", delay_count)) {
144 			ret = -EIO;
145 			break;
146 		}
147 		udelay(1);
148 	}
149 	clear_bit(UM_PCI_STAT_WAITING, &dev->status);
150 
151 	return ret;
152 }
153 
154 static unsigned long um_pci_cfgspace_read(void *priv, unsigned int offset,
155 					  int size)
156 {
157 	struct um_pci_device_reg *reg = priv;
158 	struct um_pci_device *dev = reg->dev;
159 	struct virtio_pcidev_msg hdr = {
160 		.op = VIRTIO_PCIDEV_OP_CFG_READ,
161 		.size = size,
162 		.addr = offset,
163 	};
164 	/* maximum size - we may only use parts of it */
165 	u8 data[8];
166 
167 	if (!dev)
168 		return ~0ULL;
169 
170 	memset(data, 0xff, sizeof(data));
171 
172 	switch (size) {
173 	case 1:
174 	case 2:
175 	case 4:
176 #ifdef CONFIG_64BIT
177 	case 8:
178 #endif
179 		break;
180 	default:
181 		WARN(1, "invalid config space read size %d\n", size);
182 		return ~0ULL;
183 	}
184 
185 	if (um_pci_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0,
186 			    data, sizeof(data)))
187 		return ~0ULL;
188 
189 	switch (size) {
190 	case 1:
191 		return data[0];
192 	case 2:
193 		return le16_to_cpup((void *)data);
194 	case 4:
195 		return le32_to_cpup((void *)data);
196 #ifdef CONFIG_64BIT
197 	case 8:
198 		return le64_to_cpup((void *)data);
199 #endif
200 	default:
201 		return ~0ULL;
202 	}
203 }
204 
205 static void um_pci_cfgspace_write(void *priv, unsigned int offset, int size,
206 				  unsigned long val)
207 {
208 	struct um_pci_device_reg *reg = priv;
209 	struct um_pci_device *dev = reg->dev;
210 	struct {
211 		struct virtio_pcidev_msg hdr;
212 		/* maximum size - we may only use parts of it */
213 		u8 data[8];
214 	} msg = {
215 		.hdr = {
216 			.op = VIRTIO_PCIDEV_OP_CFG_WRITE,
217 			.size = size,
218 			.addr = offset,
219 		},
220 	};
221 
222 	if (!dev)
223 		return;
224 
225 	switch (size) {
226 	case 1:
227 		msg.data[0] = (u8)val;
228 		break;
229 	case 2:
230 		put_unaligned_le16(val, (void *)msg.data);
231 		break;
232 	case 4:
233 		put_unaligned_le32(val, (void *)msg.data);
234 		break;
235 #ifdef CONFIG_64BIT
236 	case 8:
237 		put_unaligned_le64(val, (void *)msg.data);
238 		break;
239 #endif
240 	default:
241 		WARN(1, "invalid config space write size %d\n", size);
242 		return;
243 	}
244 
245 	WARN_ON(um_pci_send_cmd(dev, &msg.hdr, sizeof(msg), NULL, 0, NULL, 0));
246 }
247 
248 static const struct logic_iomem_ops um_pci_device_cfgspace_ops = {
249 	.read = um_pci_cfgspace_read,
250 	.write = um_pci_cfgspace_write,
251 };
252 
253 static void um_pci_bar_copy_from(void *priv, void *buffer,
254 				 unsigned int offset, int size)
255 {
256 	u8 *resptr = priv;
257 	struct um_pci_device *dev = container_of(resptr - *resptr,
258 						 struct um_pci_device,
259 						 resptr[0]);
260 	struct virtio_pcidev_msg hdr = {
261 		.op = VIRTIO_PCIDEV_OP_MMIO_READ,
262 		.bar = *resptr,
263 		.size = size,
264 		.addr = offset,
265 	};
266 
267 	memset(buffer, 0xff, size);
268 
269 	um_pci_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, buffer, size);
270 }
271 
272 static unsigned long um_pci_bar_read(void *priv, unsigned int offset,
273 				     int size)
274 {
275 	/* maximum size - we may only use parts of it */
276 	u8 data[8];
277 
278 	switch (size) {
279 	case 1:
280 	case 2:
281 	case 4:
282 #ifdef CONFIG_64BIT
283 	case 8:
284 #endif
285 		break;
286 	default:
287 		WARN(1, "invalid config space read size %d\n", size);
288 		return ~0ULL;
289 	}
290 
291 	um_pci_bar_copy_from(priv, data, offset, size);
292 
293 	switch (size) {
294 	case 1:
295 		return data[0];
296 	case 2:
297 		return le16_to_cpup((void *)data);
298 	case 4:
299 		return le32_to_cpup((void *)data);
300 #ifdef CONFIG_64BIT
301 	case 8:
302 		return le64_to_cpup((void *)data);
303 #endif
304 	default:
305 		return ~0ULL;
306 	}
307 }
308 
309 static void um_pci_bar_copy_to(void *priv, unsigned int offset,
310 			       const void *buffer, int size)
311 {
312 	u8 *resptr = priv;
313 	struct um_pci_device *dev = container_of(resptr - *resptr,
314 						 struct um_pci_device,
315 						 resptr[0]);
316 	struct virtio_pcidev_msg hdr = {
317 		.op = VIRTIO_PCIDEV_OP_MMIO_WRITE,
318 		.bar = *resptr,
319 		.size = size,
320 		.addr = offset,
321 	};
322 
323 	um_pci_send_cmd(dev, &hdr, sizeof(hdr), buffer, size, NULL, 0);
324 }
325 
326 static void um_pci_bar_write(void *priv, unsigned int offset, int size,
327 			     unsigned long val)
328 {
329 	/* maximum size - we may only use parts of it */
330 	u8 data[8];
331 
332 	switch (size) {
333 	case 1:
334 		data[0] = (u8)val;
335 		break;
336 	case 2:
337 		put_unaligned_le16(val, (void *)data);
338 		break;
339 	case 4:
340 		put_unaligned_le32(val, (void *)data);
341 		break;
342 #ifdef CONFIG_64BIT
343 	case 8:
344 		put_unaligned_le64(val, (void *)data);
345 		break;
346 #endif
347 	default:
348 		WARN(1, "invalid config space write size %d\n", size);
349 		return;
350 	}
351 
352 	um_pci_bar_copy_to(priv, offset, data, size);
353 }
354 
355 static void um_pci_bar_set(void *priv, unsigned int offset, u8 value, int size)
356 {
357 	u8 *resptr = priv;
358 	struct um_pci_device *dev = container_of(resptr - *resptr,
359 						 struct um_pci_device,
360 						 resptr[0]);
361 	struct {
362 		struct virtio_pcidev_msg hdr;
363 		u8 data;
364 	} msg = {
365 		.hdr = {
366 			.op = VIRTIO_PCIDEV_OP_CFG_WRITE,
367 			.bar = *resptr,
368 			.size = size,
369 			.addr = offset,
370 		},
371 		.data = value,
372 	};
373 
374 	um_pci_send_cmd(dev, &msg.hdr, sizeof(msg), NULL, 0, NULL, 0);
375 }
376 
377 static const struct logic_iomem_ops um_pci_device_bar_ops = {
378 	.read = um_pci_bar_read,
379 	.write = um_pci_bar_write,
380 	.set = um_pci_bar_set,
381 	.copy_from = um_pci_bar_copy_from,
382 	.copy_to = um_pci_bar_copy_to,
383 };
384 
385 static void __iomem *um_pci_map_bus(struct pci_bus *bus, unsigned int devfn,
386 				    int where)
387 {
388 	struct um_pci_device_reg *dev;
389 	unsigned int busn = bus->number;
390 
391 	if (busn > 0)
392 		return NULL;
393 
394 	/* not allowing functions for now ... */
395 	if (devfn % 8)
396 		return NULL;
397 
398 	if (devfn / 8 >= ARRAY_SIZE(um_pci_devices))
399 		return NULL;
400 
401 	dev = &um_pci_devices[devfn / 8];
402 	if (!dev)
403 		return NULL;
404 
405 	return (void __iomem *)((unsigned long)dev->iomem + where);
406 }
407 
408 static struct pci_ops um_pci_ops = {
409 	.map_bus = um_pci_map_bus,
410 	.read = pci_generic_config_read,
411 	.write = pci_generic_config_write,
412 };
413 
414 static void um_pci_rescan(void)
415 {
416 	pci_lock_rescan_remove();
417 	pci_rescan_bus(bridge->bus);
418 	pci_unlock_rescan_remove();
419 }
420 
421 static void um_pci_irq_vq_addbuf(struct virtqueue *vq, void *buf, bool kick)
422 {
423 	struct scatterlist sg[1];
424 
425 	sg_init_one(sg, buf, MAX_IRQ_MSG_SIZE);
426 	if (virtqueue_add_inbuf(vq, sg, 1, buf, GFP_ATOMIC))
427 		kfree(buf);
428 	else if (kick)
429 		virtqueue_kick(vq);
430 }
431 
432 static void um_pci_handle_irq_message(struct virtqueue *vq,
433 				      struct virtio_pcidev_msg *msg)
434 {
435 	struct virtio_device *vdev = vq->vdev;
436 	struct um_pci_device *dev = vdev->priv;
437 
438 	/* we should properly chain interrupts, but on ARCH=um we don't care */
439 
440 	switch (msg->op) {
441 	case VIRTIO_PCIDEV_OP_INT:
442 		generic_handle_irq(dev->irq);
443 		break;
444 	case VIRTIO_PCIDEV_OP_MSI:
445 		/* our MSI message is just the interrupt number */
446 		if (msg->size == sizeof(u32))
447 			generic_handle_irq(le32_to_cpup((void *)msg->data));
448 		else
449 			generic_handle_irq(le16_to_cpup((void *)msg->data));
450 		break;
451 	case VIRTIO_PCIDEV_OP_PME:
452 		/* nothing to do - we already woke up due to the message */
453 		break;
454 	default:
455 		dev_err(&vdev->dev, "unexpected virt-pci message %d\n", msg->op);
456 		break;
457 	}
458 }
459 
460 static void um_pci_cmd_vq_cb(struct virtqueue *vq)
461 {
462 	struct virtio_device *vdev = vq->vdev;
463 	struct um_pci_device *dev = vdev->priv;
464 	void *cmd;
465 	int len;
466 
467 	if (test_bit(UM_PCI_STAT_WAITING, &dev->status))
468 		return;
469 
470 	while ((cmd = virtqueue_get_buf(vq, &len))) {
471 		if (WARN_ON(HANDLE_IS_NO_FREE(cmd)))
472 			continue;
473 		kfree(cmd);
474 	}
475 }
476 
477 static void um_pci_irq_vq_cb(struct virtqueue *vq)
478 {
479 	struct virtio_pcidev_msg *msg;
480 	int len;
481 
482 	while ((msg = virtqueue_get_buf(vq, &len))) {
483 		if (len >= sizeof(*msg))
484 			um_pci_handle_irq_message(vq, msg);
485 
486 		/* recycle the message buffer */
487 		um_pci_irq_vq_addbuf(vq, msg, true);
488 	}
489 }
490 
491 static int um_pci_init_vqs(struct um_pci_device *dev)
492 {
493 	struct virtqueue *vqs[2];
494 	static const char *const names[2] = { "cmd", "irq" };
495 	vq_callback_t *cbs[2] = { um_pci_cmd_vq_cb, um_pci_irq_vq_cb };
496 	int err, i;
497 
498 	err = virtio_find_vqs(dev->vdev, 2, vqs, cbs, names, NULL);
499 	if (err)
500 		return err;
501 
502 	dev->cmd_vq = vqs[0];
503 	dev->irq_vq = vqs[1];
504 
505 	for (i = 0; i < NUM_IRQ_MSGS; i++) {
506 		void *msg = kzalloc(MAX_IRQ_MSG_SIZE, GFP_KERNEL);
507 
508 		if (msg)
509 			um_pci_irq_vq_addbuf(dev->irq_vq, msg, false);
510 	}
511 
512 	virtqueue_kick(dev->irq_vq);
513 
514 	return 0;
515 }
516 
517 static int um_pci_virtio_probe(struct virtio_device *vdev)
518 {
519 	struct um_pci_device *dev;
520 	int i, free = -1;
521 	int err = -ENOSPC;
522 
523 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
524 	if (!dev)
525 		return -ENOMEM;
526 
527 	dev->vdev = vdev;
528 	vdev->priv = dev;
529 
530 	mutex_lock(&um_pci_mtx);
531 	for (i = 0; i < MAX_DEVICES; i++) {
532 		if (um_pci_devices[i].dev)
533 			continue;
534 		free = i;
535 		break;
536 	}
537 
538 	if (free < 0)
539 		goto error;
540 
541 	err = um_pci_init_vqs(dev);
542 	if (err)
543 		goto error;
544 
545 	dev->irq = irq_alloc_desc(numa_node_id());
546 	if (dev->irq < 0) {
547 		err = dev->irq;
548 		goto error;
549 	}
550 	um_pci_devices[free].dev = dev;
551 	vdev->priv = dev;
552 
553 	mutex_unlock(&um_pci_mtx);
554 
555 	device_set_wakeup_enable(&vdev->dev, true);
556 
557 	/*
558 	 * In order to do suspend-resume properly, don't allow VQs
559 	 * to be suspended.
560 	 */
561 	virtio_uml_set_no_vq_suspend(vdev, true);
562 
563 	um_pci_rescan();
564 	return 0;
565 error:
566 	mutex_unlock(&um_pci_mtx);
567 	kfree(dev);
568 	return err;
569 }
570 
571 static void um_pci_virtio_remove(struct virtio_device *vdev)
572 {
573 	struct um_pci_device *dev = vdev->priv;
574 	int i;
575 
576         /* Stop all virtqueues */
577         vdev->config->reset(vdev);
578         vdev->config->del_vqs(vdev);
579 
580 	device_set_wakeup_enable(&vdev->dev, false);
581 
582 	mutex_lock(&um_pci_mtx);
583 	for (i = 0; i < MAX_DEVICES; i++) {
584 		if (um_pci_devices[i].dev != dev)
585 			continue;
586 		um_pci_devices[i].dev = NULL;
587 		irq_free_desc(dev->irq);
588 	}
589 	mutex_unlock(&um_pci_mtx);
590 
591 	um_pci_rescan();
592 
593 	kfree(dev);
594 }
595 
596 static struct virtio_device_id id_table[] = {
597 	{ CONFIG_UML_PCI_OVER_VIRTIO_DEVICE_ID, VIRTIO_DEV_ANY_ID },
598 	{ 0 },
599 };
600 MODULE_DEVICE_TABLE(virtio, id_table);
601 
602 static struct virtio_driver um_pci_virtio_driver = {
603 	.driver.name = "virtio-pci",
604 	.driver.owner = THIS_MODULE,
605 	.id_table = id_table,
606 	.probe = um_pci_virtio_probe,
607 	.remove = um_pci_virtio_remove,
608 };
609 
610 static struct resource virt_cfgspace_resource = {
611 	.name = "PCI config space",
612 	.start = 0xf0000000 - MAX_DEVICES * CFG_SPACE_SIZE,
613 	.end = 0xf0000000 - 1,
614 	.flags = IORESOURCE_MEM,
615 };
616 
617 static long um_pci_map_cfgspace(unsigned long offset, size_t size,
618 				const struct logic_iomem_ops **ops,
619 				void **priv)
620 {
621 	if (WARN_ON(size > CFG_SPACE_SIZE || offset % CFG_SPACE_SIZE))
622 		return -EINVAL;
623 
624 	if (offset / CFG_SPACE_SIZE < MAX_DEVICES) {
625 		*ops = &um_pci_device_cfgspace_ops;
626 		*priv = &um_pci_devices[offset / CFG_SPACE_SIZE];
627 		return 0;
628 	}
629 
630 	WARN(1, "cannot map offset 0x%lx/0x%zx\n", offset, size);
631 	return -ENOENT;
632 }
633 
634 static const struct logic_iomem_region_ops um_pci_cfgspace_ops = {
635 	.map = um_pci_map_cfgspace,
636 };
637 
638 static struct resource virt_iomem_resource = {
639 	.name = "PCI iomem",
640 	.start = 0xf0000000,
641 	.end = 0xffffffff,
642 	.flags = IORESOURCE_MEM,
643 };
644 
645 struct um_pci_map_iomem_data {
646 	unsigned long offset;
647 	size_t size;
648 	const struct logic_iomem_ops **ops;
649 	void **priv;
650 	long ret;
651 };
652 
653 static int um_pci_map_iomem_walk(struct pci_dev *pdev, void *_data)
654 {
655 	struct um_pci_map_iomem_data *data = _data;
656 	struct um_pci_device_reg *reg = &um_pci_devices[pdev->devfn / 8];
657 	struct um_pci_device *dev;
658 	int i;
659 
660 	if (!reg->dev)
661 		return 0;
662 
663 	for (i = 0; i < ARRAY_SIZE(dev->resptr); i++) {
664 		struct resource *r = &pdev->resource[i];
665 
666 		if ((r->flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM)
667 			continue;
668 
669 		/*
670 		 * must be the whole or part of the resource,
671 		 * not allowed to only overlap
672 		 */
673 		if (data->offset < r->start || data->offset > r->end)
674 			continue;
675 		if (data->offset + data->size - 1 > r->end)
676 			continue;
677 
678 		dev = reg->dev;
679 		*data->ops = &um_pci_device_bar_ops;
680 		dev->resptr[i] = i;
681 		*data->priv = &dev->resptr[i];
682 		data->ret = data->offset - r->start;
683 
684 		/* no need to continue */
685 		return 1;
686 	}
687 
688 	return 0;
689 }
690 
691 static long um_pci_map_iomem(unsigned long offset, size_t size,
692 			     const struct logic_iomem_ops **ops,
693 			     void **priv)
694 {
695 	struct um_pci_map_iomem_data data = {
696 		/* we want the full address here */
697 		.offset = offset + virt_iomem_resource.start,
698 		.size = size,
699 		.ops = ops,
700 		.priv = priv,
701 		.ret = -ENOENT,
702 	};
703 
704 	pci_walk_bus(bridge->bus, um_pci_map_iomem_walk, &data);
705 	return data.ret;
706 }
707 
708 static const struct logic_iomem_region_ops um_pci_iomem_ops = {
709 	.map = um_pci_map_iomem,
710 };
711 
712 static void um_pci_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
713 {
714 	/*
715 	 * This is a very low address and not actually valid 'physical' memory
716 	 * in UML, so we can simply map MSI(-X) vectors to there, it cannot be
717 	 * legitimately written to by the device in any other way.
718 	 * We use the (virtual) IRQ number here as the message to simplify the
719 	 * code that receives the message, where for now we simply trust the
720 	 * device to send the correct message.
721 	 */
722 	msg->address_hi = 0;
723 	msg->address_lo = 0xa0000;
724 	msg->data = data->irq;
725 }
726 
727 static struct irq_chip um_pci_msi_bottom_irq_chip = {
728 	.name = "UM virtio MSI",
729 	.irq_compose_msi_msg = um_pci_compose_msi_msg,
730 };
731 
732 static int um_pci_inner_domain_alloc(struct irq_domain *domain,
733 				     unsigned int virq, unsigned int nr_irqs,
734 				     void *args)
735 {
736 	unsigned long bit;
737 
738 	WARN_ON(nr_irqs != 1);
739 
740 	mutex_lock(&um_pci_mtx);
741 	bit = find_first_zero_bit(um_pci_msi_used, MAX_MSI_VECTORS);
742 	if (bit >= MAX_MSI_VECTORS) {
743 		mutex_unlock(&um_pci_mtx);
744 		return -ENOSPC;
745 	}
746 
747 	set_bit(bit, um_pci_msi_used);
748 	mutex_unlock(&um_pci_mtx);
749 
750 	irq_domain_set_info(domain, virq, bit, &um_pci_msi_bottom_irq_chip,
751 			    domain->host_data, handle_simple_irq,
752 			    NULL, NULL);
753 
754 	return 0;
755 }
756 
757 static void um_pci_inner_domain_free(struct irq_domain *domain,
758 				     unsigned int virq, unsigned int nr_irqs)
759 {
760 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
761 
762 	mutex_lock(&um_pci_mtx);
763 
764 	if (!test_bit(d->hwirq, um_pci_msi_used))
765 		pr_err("trying to free unused MSI#%lu\n", d->hwirq);
766 	else
767 		__clear_bit(d->hwirq, um_pci_msi_used);
768 
769 	mutex_unlock(&um_pci_mtx);
770 }
771 
772 static const struct irq_domain_ops um_pci_inner_domain_ops = {
773 	.alloc = um_pci_inner_domain_alloc,
774 	.free = um_pci_inner_domain_free,
775 };
776 
777 static struct irq_chip um_pci_msi_irq_chip = {
778 	.name = "UM virtio PCIe MSI",
779 	.irq_mask = pci_msi_mask_irq,
780 	.irq_unmask = pci_msi_unmask_irq,
781 };
782 
783 static struct msi_domain_info um_pci_msi_domain_info = {
784 	.flags	= MSI_FLAG_USE_DEF_DOM_OPS |
785 		  MSI_FLAG_USE_DEF_CHIP_OPS |
786 		  MSI_FLAG_PCI_MSIX,
787 	.chip	= &um_pci_msi_irq_chip,
788 };
789 
790 static struct resource busn_resource = {
791 	.name	= "PCI busn",
792 	.start	= 0,
793 	.end	= 0,
794 	.flags	= IORESOURCE_BUS,
795 };
796 
797 static int um_pci_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
798 {
799 	struct um_pci_device_reg *reg = &um_pci_devices[pdev->devfn / 8];
800 
801 	if (WARN_ON(!reg->dev))
802 		return -EINVAL;
803 
804 	/* Yes, we map all pins to the same IRQ ... doesn't matter for now. */
805 	return reg->dev->irq;
806 }
807 
808 void *pci_root_bus_fwnode(struct pci_bus *bus)
809 {
810 	return um_pci_fwnode;
811 }
812 
813 int um_pci_init(void)
814 {
815 	int err, i;
816 
817 	WARN_ON(logic_iomem_add_region(&virt_cfgspace_resource,
818 				       &um_pci_cfgspace_ops));
819 	WARN_ON(logic_iomem_add_region(&virt_iomem_resource,
820 				       &um_pci_iomem_ops));
821 
822 	if (WARN(CONFIG_UML_PCI_OVER_VIRTIO_DEVICE_ID < 0,
823 		 "No virtio device ID configured for PCI - no PCI support\n"))
824 		return 0;
825 
826 	bridge = pci_alloc_host_bridge(0);
827 	if (!bridge)
828 		return -ENOMEM;
829 
830 	um_pci_fwnode = irq_domain_alloc_named_fwnode("um-pci");
831 	if (!um_pci_fwnode) {
832 		err = -ENOMEM;
833 		goto free;
834 	}
835 
836 	um_pci_inner_domain = __irq_domain_add(um_pci_fwnode, MAX_MSI_VECTORS,
837 					       MAX_MSI_VECTORS, 0,
838 					       &um_pci_inner_domain_ops, NULL);
839 	if (!um_pci_inner_domain) {
840 		err = -ENOMEM;
841 		goto free;
842 	}
843 
844 	um_pci_msi_domain = pci_msi_create_irq_domain(um_pci_fwnode,
845 						      &um_pci_msi_domain_info,
846 						      um_pci_inner_domain);
847 	if (!um_pci_msi_domain) {
848 		err = -ENOMEM;
849 		goto free;
850 	}
851 
852 	pci_add_resource(&bridge->windows, &virt_iomem_resource);
853 	pci_add_resource(&bridge->windows, &busn_resource);
854 	bridge->ops = &um_pci_ops;
855 	bridge->map_irq = um_pci_map_irq;
856 
857 	for (i = 0; i < MAX_DEVICES; i++) {
858 		resource_size_t start;
859 
860 		start = virt_cfgspace_resource.start + i * CFG_SPACE_SIZE;
861 		um_pci_devices[i].iomem = ioremap(start, CFG_SPACE_SIZE);
862 		if (WARN(!um_pci_devices[i].iomem, "failed to map %d\n", i)) {
863 			err = -ENOMEM;
864 			goto free;
865 		}
866 	}
867 
868 	err = pci_host_probe(bridge);
869 	if (err)
870 		goto free;
871 
872 	err = register_virtio_driver(&um_pci_virtio_driver);
873 	if (err)
874 		goto free;
875 	return 0;
876 free:
877 	if (um_pci_inner_domain)
878 		irq_domain_remove(um_pci_inner_domain);
879 	if (um_pci_fwnode)
880 		irq_domain_free_fwnode(um_pci_fwnode);
881 	pci_free_resource_list(&bridge->windows);
882 	pci_free_host_bridge(bridge);
883 	return err;
884 }
885 module_init(um_pci_init);
886 
887 void um_pci_exit(void)
888 {
889 	unregister_virtio_driver(&um_pci_virtio_driver);
890 	irq_domain_remove(um_pci_msi_domain);
891 	irq_domain_remove(um_pci_inner_domain);
892 	pci_free_resource_list(&bridge->windows);
893 	pci_free_host_bridge(bridge);
894 }
895 module_exit(um_pci_exit);
896