1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Virtio PCI driver - modern (virtio 1.0) device support
4  *
5  * This module allows virtio devices to be used over a virtual PCI device.
6  * This can be used with QEMU based VMMs like KVM or Xen.
7  *
8  * Copyright IBM Corp. 2007
9  * Copyright Red Hat, Inc. 2014
10  *
11  * Authors:
12  *  Anthony Liguori  <aliguori@us.ibm.com>
13  *  Rusty Russell <rusty@rustcorp.com.au>
14  *  Michael S. Tsirkin <mst@redhat.com>
15  */
16 
17 #include <linux/delay.h>
18 #define VIRTIO_PCI_NO_LEGACY
19 #include "virtio_pci_common.h"
20 
21 /*
22  * Type-safe wrappers for io accesses.
23  * Use these to enforce at compile time the following spec requirement:
24  *
25  * The driver MUST access each field using the “natural” access
26  * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses
27  * for 16-bit fields and 8-bit accesses for 8-bit fields.
28  */
29 static inline u8 vp_ioread8(u8 __iomem *addr)
30 {
31 	return ioread8(addr);
32 }
33 static inline u16 vp_ioread16 (__le16 __iomem *addr)
34 {
35 	return ioread16(addr);
36 }
37 
38 static inline u32 vp_ioread32(__le32 __iomem *addr)
39 {
40 	return ioread32(addr);
41 }
42 
43 static inline void vp_iowrite8(u8 value, u8 __iomem *addr)
44 {
45 	iowrite8(value, addr);
46 }
47 
48 static inline void vp_iowrite16(u16 value, __le16 __iomem *addr)
49 {
50 	iowrite16(value, addr);
51 }
52 
53 static inline void vp_iowrite32(u32 value, __le32 __iomem *addr)
54 {
55 	iowrite32(value, addr);
56 }
57 
58 static void vp_iowrite64_twopart(u64 val,
59 				 __le32 __iomem *lo, __le32 __iomem *hi)
60 {
61 	vp_iowrite32((u32)val, lo);
62 	vp_iowrite32(val >> 32, hi);
63 }
64 
65 static void __iomem *map_capability(struct pci_dev *dev, int off,
66 				    size_t minlen,
67 				    u32 align,
68 				    u32 start, u32 size,
69 				    size_t *len)
70 {
71 	u8 bar;
72 	u32 offset, length;
73 	void __iomem *p;
74 
75 	pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap,
76 						 bar),
77 			     &bar);
78 	pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset),
79 			     &offset);
80 	pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length),
81 			      &length);
82 
83 	if (length <= start) {
84 		dev_err(&dev->dev,
85 			"virtio_pci: bad capability len %u (>%u expected)\n",
86 			length, start);
87 		return NULL;
88 	}
89 
90 	if (length - start < minlen) {
91 		dev_err(&dev->dev,
92 			"virtio_pci: bad capability len %u (>=%zu expected)\n",
93 			length, minlen);
94 		return NULL;
95 	}
96 
97 	length -= start;
98 
99 	if (start + offset < offset) {
100 		dev_err(&dev->dev,
101 			"virtio_pci: map wrap-around %u+%u\n",
102 			start, offset);
103 		return NULL;
104 	}
105 
106 	offset += start;
107 
108 	if (offset & (align - 1)) {
109 		dev_err(&dev->dev,
110 			"virtio_pci: offset %u not aligned to %u\n",
111 			offset, align);
112 		return NULL;
113 	}
114 
115 	if (length > size)
116 		length = size;
117 
118 	if (len)
119 		*len = length;
120 
121 	if (minlen + offset < minlen ||
122 	    minlen + offset > pci_resource_len(dev, bar)) {
123 		dev_err(&dev->dev,
124 			"virtio_pci: map virtio %zu@%u "
125 			"out of range on bar %i length %lu\n",
126 			minlen, offset,
127 			bar, (unsigned long)pci_resource_len(dev, bar));
128 		return NULL;
129 	}
130 
131 	p = pci_iomap_range(dev, bar, offset, length);
132 	if (!p)
133 		dev_err(&dev->dev,
134 			"virtio_pci: unable to map virtio %u@%u on bar %i\n",
135 			length, offset, bar);
136 	return p;
137 }
138 
139 /* virtio config->get_features() implementation */
140 static u64 vp_get_features(struct virtio_device *vdev)
141 {
142 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
143 	u64 features;
144 
145 	vp_iowrite32(0, &vp_dev->common->device_feature_select);
146 	features = vp_ioread32(&vp_dev->common->device_feature);
147 	vp_iowrite32(1, &vp_dev->common->device_feature_select);
148 	features |= ((u64)vp_ioread32(&vp_dev->common->device_feature) << 32);
149 
150 	return features;
151 }
152 
153 static void vp_transport_features(struct virtio_device *vdev, u64 features)
154 {
155 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
156 	struct pci_dev *pci_dev = vp_dev->pci_dev;
157 
158 	if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
159 			pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
160 		__virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
161 }
162 
163 /* virtio config->finalize_features() implementation */
164 static int vp_finalize_features(struct virtio_device *vdev)
165 {
166 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
167 	u64 features = vdev->features;
168 
169 	/* Give virtio_ring a chance to accept features. */
170 	vring_transport_features(vdev);
171 
172 	/* Give virtio_pci a chance to accept features. */
173 	vp_transport_features(vdev, features);
174 
175 	if (!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
176 		dev_err(&vdev->dev, "virtio: device uses modern interface "
177 			"but does not have VIRTIO_F_VERSION_1\n");
178 		return -EINVAL;
179 	}
180 
181 	vp_iowrite32(0, &vp_dev->common->guest_feature_select);
182 	vp_iowrite32((u32)vdev->features, &vp_dev->common->guest_feature);
183 	vp_iowrite32(1, &vp_dev->common->guest_feature_select);
184 	vp_iowrite32(vdev->features >> 32, &vp_dev->common->guest_feature);
185 
186 	return 0;
187 }
188 
189 /* virtio config->get() implementation */
190 static void vp_get(struct virtio_device *vdev, unsigned offset,
191 		   void *buf, unsigned len)
192 {
193 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
194 	u8 b;
195 	__le16 w;
196 	__le32 l;
197 
198 	BUG_ON(offset + len > vp_dev->device_len);
199 
200 	switch (len) {
201 	case 1:
202 		b = ioread8(vp_dev->device + offset);
203 		memcpy(buf, &b, sizeof b);
204 		break;
205 	case 2:
206 		w = cpu_to_le16(ioread16(vp_dev->device + offset));
207 		memcpy(buf, &w, sizeof w);
208 		break;
209 	case 4:
210 		l = cpu_to_le32(ioread32(vp_dev->device + offset));
211 		memcpy(buf, &l, sizeof l);
212 		break;
213 	case 8:
214 		l = cpu_to_le32(ioread32(vp_dev->device + offset));
215 		memcpy(buf, &l, sizeof l);
216 		l = cpu_to_le32(ioread32(vp_dev->device + offset + sizeof l));
217 		memcpy(buf + sizeof l, &l, sizeof l);
218 		break;
219 	default:
220 		BUG();
221 	}
222 }
223 
224 /* the config->set() implementation.  it's symmetric to the config->get()
225  * implementation */
226 static void vp_set(struct virtio_device *vdev, unsigned offset,
227 		   const void *buf, unsigned len)
228 {
229 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
230 	u8 b;
231 	__le16 w;
232 	__le32 l;
233 
234 	BUG_ON(offset + len > vp_dev->device_len);
235 
236 	switch (len) {
237 	case 1:
238 		memcpy(&b, buf, sizeof b);
239 		iowrite8(b, vp_dev->device + offset);
240 		break;
241 	case 2:
242 		memcpy(&w, buf, sizeof w);
243 		iowrite16(le16_to_cpu(w), vp_dev->device + offset);
244 		break;
245 	case 4:
246 		memcpy(&l, buf, sizeof l);
247 		iowrite32(le32_to_cpu(l), vp_dev->device + offset);
248 		break;
249 	case 8:
250 		memcpy(&l, buf, sizeof l);
251 		iowrite32(le32_to_cpu(l), vp_dev->device + offset);
252 		memcpy(&l, buf + sizeof l, sizeof l);
253 		iowrite32(le32_to_cpu(l), vp_dev->device + offset + sizeof l);
254 		break;
255 	default:
256 		BUG();
257 	}
258 }
259 
260 static u32 vp_generation(struct virtio_device *vdev)
261 {
262 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
263 	return vp_ioread8(&vp_dev->common->config_generation);
264 }
265 
266 /* config->{get,set}_status() implementations */
267 static u8 vp_get_status(struct virtio_device *vdev)
268 {
269 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
270 	return vp_ioread8(&vp_dev->common->device_status);
271 }
272 
273 static void vp_set_status(struct virtio_device *vdev, u8 status)
274 {
275 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
276 	/* We should never be setting status to 0. */
277 	BUG_ON(status == 0);
278 	vp_iowrite8(status, &vp_dev->common->device_status);
279 }
280 
281 static void vp_reset(struct virtio_device *vdev)
282 {
283 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
284 	/* 0 status means a reset. */
285 	vp_iowrite8(0, &vp_dev->common->device_status);
286 	/* After writing 0 to device_status, the driver MUST wait for a read of
287 	 * device_status to return 0 before reinitializing the device.
288 	 * This will flush out the status write, and flush in device writes,
289 	 * including MSI-X interrupts, if any.
290 	 */
291 	while (vp_ioread8(&vp_dev->common->device_status))
292 		msleep(1);
293 	/* Flush pending VQ/configuration callbacks. */
294 	vp_synchronize_vectors(vdev);
295 }
296 
297 static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
298 {
299 	/* Setup the vector used for configuration events */
300 	vp_iowrite16(vector, &vp_dev->common->msix_config);
301 	/* Verify we had enough resources to assign the vector */
302 	/* Will also flush the write out to device */
303 	return vp_ioread16(&vp_dev->common->msix_config);
304 }
305 
306 static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
307 				  struct virtio_pci_vq_info *info,
308 				  unsigned index,
309 				  void (*callback)(struct virtqueue *vq),
310 				  const char *name,
311 				  bool ctx,
312 				  u16 msix_vec)
313 {
314 	struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common;
315 	struct virtqueue *vq;
316 	u16 num, off;
317 	int err;
318 
319 	if (index >= vp_ioread16(&cfg->num_queues))
320 		return ERR_PTR(-ENOENT);
321 
322 	/* Select the queue we're interested in */
323 	vp_iowrite16(index, &cfg->queue_select);
324 
325 	/* Check if queue is either not available or already active. */
326 	num = vp_ioread16(&cfg->queue_size);
327 	if (!num || vp_ioread16(&cfg->queue_enable))
328 		return ERR_PTR(-ENOENT);
329 
330 	if (num & (num - 1)) {
331 		dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", num);
332 		return ERR_PTR(-EINVAL);
333 	}
334 
335 	/* get offset of notification word for this vq */
336 	off = vp_ioread16(&cfg->queue_notify_off);
337 
338 	info->msix_vector = msix_vec;
339 
340 	/* create the vring */
341 	vq = vring_create_virtqueue(index, num,
342 				    SMP_CACHE_BYTES, &vp_dev->vdev,
343 				    true, true, ctx,
344 				    vp_notify, callback, name);
345 	if (!vq)
346 		return ERR_PTR(-ENOMEM);
347 
348 	/* activate the queue */
349 	vp_iowrite16(virtqueue_get_vring_size(vq), &cfg->queue_size);
350 	vp_iowrite64_twopart(virtqueue_get_desc_addr(vq),
351 			     &cfg->queue_desc_lo, &cfg->queue_desc_hi);
352 	vp_iowrite64_twopart(virtqueue_get_avail_addr(vq),
353 			     &cfg->queue_avail_lo, &cfg->queue_avail_hi);
354 	vp_iowrite64_twopart(virtqueue_get_used_addr(vq),
355 			     &cfg->queue_used_lo, &cfg->queue_used_hi);
356 
357 	if (vp_dev->notify_base) {
358 		/* offset should not wrap */
359 		if ((u64)off * vp_dev->notify_offset_multiplier + 2
360 		    > vp_dev->notify_len) {
361 			dev_warn(&vp_dev->pci_dev->dev,
362 				 "bad notification offset %u (x %u) "
363 				 "for queue %u > %zd",
364 				 off, vp_dev->notify_offset_multiplier,
365 				 index, vp_dev->notify_len);
366 			err = -EINVAL;
367 			goto err_map_notify;
368 		}
369 		vq->priv = (void __force *)vp_dev->notify_base +
370 			off * vp_dev->notify_offset_multiplier;
371 	} else {
372 		vq->priv = (void __force *)map_capability(vp_dev->pci_dev,
373 					  vp_dev->notify_map_cap, 2, 2,
374 					  off * vp_dev->notify_offset_multiplier, 2,
375 					  NULL);
376 	}
377 
378 	if (!vq->priv) {
379 		err = -ENOMEM;
380 		goto err_map_notify;
381 	}
382 
383 	if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
384 		vp_iowrite16(msix_vec, &cfg->queue_msix_vector);
385 		msix_vec = vp_ioread16(&cfg->queue_msix_vector);
386 		if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
387 			err = -EBUSY;
388 			goto err_assign_vector;
389 		}
390 	}
391 
392 	return vq;
393 
394 err_assign_vector:
395 	if (!vp_dev->notify_base)
396 		pci_iounmap(vp_dev->pci_dev, (void __iomem __force *)vq->priv);
397 err_map_notify:
398 	vring_del_virtqueue(vq);
399 	return ERR_PTR(err);
400 }
401 
402 static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
403 			      struct virtqueue *vqs[],
404 			      vq_callback_t *callbacks[],
405 			      const char * const names[], const bool *ctx,
406 			      struct irq_affinity *desc)
407 {
408 	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
409 	struct virtqueue *vq;
410 	int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, desc);
411 
412 	if (rc)
413 		return rc;
414 
415 	/* Select and activate all queues. Has to be done last: once we do
416 	 * this, there's no way to go back except reset.
417 	 */
418 	list_for_each_entry(vq, &vdev->vqs, list) {
419 		vp_iowrite16(vq->index, &vp_dev->common->queue_select);
420 		vp_iowrite16(1, &vp_dev->common->queue_enable);
421 	}
422 
423 	return 0;
424 }
425 
426 static void del_vq(struct virtio_pci_vq_info *info)
427 {
428 	struct virtqueue *vq = info->vq;
429 	struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
430 
431 	vp_iowrite16(vq->index, &vp_dev->common->queue_select);
432 
433 	if (vp_dev->msix_enabled) {
434 		vp_iowrite16(VIRTIO_MSI_NO_VECTOR,
435 			     &vp_dev->common->queue_msix_vector);
436 		/* Flush the write out to device */
437 		vp_ioread16(&vp_dev->common->queue_msix_vector);
438 	}
439 
440 	if (!vp_dev->notify_base)
441 		pci_iounmap(vp_dev->pci_dev, (void __force __iomem *)vq->priv);
442 
443 	vring_del_virtqueue(vq);
444 }
445 
446 static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
447 	.get		= NULL,
448 	.set		= NULL,
449 	.generation	= vp_generation,
450 	.get_status	= vp_get_status,
451 	.set_status	= vp_set_status,
452 	.reset		= vp_reset,
453 	.find_vqs	= vp_modern_find_vqs,
454 	.del_vqs	= vp_del_vqs,
455 	.get_features	= vp_get_features,
456 	.finalize_features = vp_finalize_features,
457 	.bus_name	= vp_bus_name,
458 	.set_vq_affinity = vp_set_vq_affinity,
459 	.get_vq_affinity = vp_get_vq_affinity,
460 };
461 
462 static const struct virtio_config_ops virtio_pci_config_ops = {
463 	.get		= vp_get,
464 	.set		= vp_set,
465 	.generation	= vp_generation,
466 	.get_status	= vp_get_status,
467 	.set_status	= vp_set_status,
468 	.reset		= vp_reset,
469 	.find_vqs	= vp_modern_find_vqs,
470 	.del_vqs	= vp_del_vqs,
471 	.get_features	= vp_get_features,
472 	.finalize_features = vp_finalize_features,
473 	.bus_name	= vp_bus_name,
474 	.set_vq_affinity = vp_set_vq_affinity,
475 	.get_vq_affinity = vp_get_vq_affinity,
476 };
477 
478 /**
479  * virtio_pci_find_capability - walk capabilities to find device info.
480  * @dev: the pci device
481  * @cfg_type: the VIRTIO_PCI_CAP_* value we seek
482  * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO.
483  *
484  * Returns offset of the capability, or 0.
485  */
486 static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
487 					     u32 ioresource_types, int *bars)
488 {
489 	int pos;
490 
491 	for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
492 	     pos > 0;
493 	     pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
494 		u8 type, bar;
495 		pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
496 							 cfg_type),
497 				     &type);
498 		pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
499 							 bar),
500 				     &bar);
501 
502 		/* Ignore structures with reserved BAR values */
503 		if (bar > 0x5)
504 			continue;
505 
506 		if (type == cfg_type) {
507 			if (pci_resource_len(dev, bar) &&
508 			    pci_resource_flags(dev, bar) & ioresource_types) {
509 				*bars |= (1 << bar);
510 				return pos;
511 			}
512 		}
513 	}
514 	return 0;
515 }
516 
517 /* This is part of the ABI.  Don't screw with it. */
518 static inline void check_offsets(void)
519 {
520 	/* Note: disk space was harmed in compilation of this function. */
521 	BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR !=
522 		     offsetof(struct virtio_pci_cap, cap_vndr));
523 	BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT !=
524 		     offsetof(struct virtio_pci_cap, cap_next));
525 	BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN !=
526 		     offsetof(struct virtio_pci_cap, cap_len));
527 	BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE !=
528 		     offsetof(struct virtio_pci_cap, cfg_type));
529 	BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR !=
530 		     offsetof(struct virtio_pci_cap, bar));
531 	BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET !=
532 		     offsetof(struct virtio_pci_cap, offset));
533 	BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH !=
534 		     offsetof(struct virtio_pci_cap, length));
535 	BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT !=
536 		     offsetof(struct virtio_pci_notify_cap,
537 			      notify_off_multiplier));
538 	BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT !=
539 		     offsetof(struct virtio_pci_common_cfg,
540 			      device_feature_select));
541 	BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF !=
542 		     offsetof(struct virtio_pci_common_cfg, device_feature));
543 	BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT !=
544 		     offsetof(struct virtio_pci_common_cfg,
545 			      guest_feature_select));
546 	BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF !=
547 		     offsetof(struct virtio_pci_common_cfg, guest_feature));
548 	BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX !=
549 		     offsetof(struct virtio_pci_common_cfg, msix_config));
550 	BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ !=
551 		     offsetof(struct virtio_pci_common_cfg, num_queues));
552 	BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS !=
553 		     offsetof(struct virtio_pci_common_cfg, device_status));
554 	BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION !=
555 		     offsetof(struct virtio_pci_common_cfg, config_generation));
556 	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT !=
557 		     offsetof(struct virtio_pci_common_cfg, queue_select));
558 	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE !=
559 		     offsetof(struct virtio_pci_common_cfg, queue_size));
560 	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX !=
561 		     offsetof(struct virtio_pci_common_cfg, queue_msix_vector));
562 	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE !=
563 		     offsetof(struct virtio_pci_common_cfg, queue_enable));
564 	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF !=
565 		     offsetof(struct virtio_pci_common_cfg, queue_notify_off));
566 	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO !=
567 		     offsetof(struct virtio_pci_common_cfg, queue_desc_lo));
568 	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI !=
569 		     offsetof(struct virtio_pci_common_cfg, queue_desc_hi));
570 	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO !=
571 		     offsetof(struct virtio_pci_common_cfg, queue_avail_lo));
572 	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI !=
573 		     offsetof(struct virtio_pci_common_cfg, queue_avail_hi));
574 	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO !=
575 		     offsetof(struct virtio_pci_common_cfg, queue_used_lo));
576 	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI !=
577 		     offsetof(struct virtio_pci_common_cfg, queue_used_hi));
578 }
579 
580 /* the PCI probing function */
581 int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
582 {
583 	struct pci_dev *pci_dev = vp_dev->pci_dev;
584 	int err, common, isr, notify, device;
585 	u32 notify_length;
586 	u32 notify_offset;
587 
588 	check_offsets();
589 
590 	/* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
591 	if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
592 		return -ENODEV;
593 
594 	if (pci_dev->device < 0x1040) {
595 		/* Transitional devices: use the PCI subsystem device id as
596 		 * virtio device id, same as legacy driver always did.
597 		 */
598 		vp_dev->vdev.id.device = pci_dev->subsystem_device;
599 	} else {
600 		/* Modern devices: simply use PCI device id, but start from 0x1040. */
601 		vp_dev->vdev.id.device = pci_dev->device - 0x1040;
602 	}
603 	vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
604 
605 	/* check for a common config: if not, use legacy mode (bar 0). */
606 	common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
607 					    IORESOURCE_IO | IORESOURCE_MEM,
608 					    &vp_dev->modern_bars);
609 	if (!common) {
610 		dev_info(&pci_dev->dev,
611 			 "virtio_pci: leaving for legacy driver\n");
612 		return -ENODEV;
613 	}
614 
615 	/* If common is there, these should be too... */
616 	isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG,
617 					 IORESOURCE_IO | IORESOURCE_MEM,
618 					 &vp_dev->modern_bars);
619 	notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG,
620 					    IORESOURCE_IO | IORESOURCE_MEM,
621 					    &vp_dev->modern_bars);
622 	if (!isr || !notify) {
623 		dev_err(&pci_dev->dev,
624 			"virtio_pci: missing capabilities %i/%i/%i\n",
625 			common, isr, notify);
626 		return -EINVAL;
627 	}
628 
629 	err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
630 	if (err)
631 		err = dma_set_mask_and_coherent(&pci_dev->dev,
632 						DMA_BIT_MASK(32));
633 	if (err)
634 		dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA.  Trying to continue, but this might not work.\n");
635 
636 	/* Device capability is only mandatory for devices that have
637 	 * device-specific configuration.
638 	 */
639 	device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG,
640 					    IORESOURCE_IO | IORESOURCE_MEM,
641 					    &vp_dev->modern_bars);
642 
643 	err = pci_request_selected_regions(pci_dev, vp_dev->modern_bars,
644 					   "virtio-pci-modern");
645 	if (err)
646 		return err;
647 
648 	err = -EINVAL;
649 	vp_dev->common = map_capability(pci_dev, common,
650 					sizeof(struct virtio_pci_common_cfg), 4,
651 					0, sizeof(struct virtio_pci_common_cfg),
652 					NULL);
653 	if (!vp_dev->common)
654 		goto err_map_common;
655 	vp_dev->isr = map_capability(pci_dev, isr, sizeof(u8), 1,
656 				     0, 1,
657 				     NULL);
658 	if (!vp_dev->isr)
659 		goto err_map_isr;
660 
661 	/* Read notify_off_multiplier from config space. */
662 	pci_read_config_dword(pci_dev,
663 			      notify + offsetof(struct virtio_pci_notify_cap,
664 						notify_off_multiplier),
665 			      &vp_dev->notify_offset_multiplier);
666 	/* Read notify length and offset from config space. */
667 	pci_read_config_dword(pci_dev,
668 			      notify + offsetof(struct virtio_pci_notify_cap,
669 						cap.length),
670 			      &notify_length);
671 
672 	pci_read_config_dword(pci_dev,
673 			      notify + offsetof(struct virtio_pci_notify_cap,
674 						cap.offset),
675 			      &notify_offset);
676 
677 	/* We don't know how many VQs we'll map, ahead of the time.
678 	 * If notify length is small, map it all now.
679 	 * Otherwise, map each VQ individually later.
680 	 */
681 	if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
682 		vp_dev->notify_base = map_capability(pci_dev, notify, 2, 2,
683 						     0, notify_length,
684 						     &vp_dev->notify_len);
685 		if (!vp_dev->notify_base)
686 			goto err_map_notify;
687 	} else {
688 		vp_dev->notify_map_cap = notify;
689 	}
690 
691 	/* Again, we don't know how much we should map, but PAGE_SIZE
692 	 * is more than enough for all existing devices.
693 	 */
694 	if (device) {
695 		vp_dev->device = map_capability(pci_dev, device, 0, 4,
696 						0, PAGE_SIZE,
697 						&vp_dev->device_len);
698 		if (!vp_dev->device)
699 			goto err_map_device;
700 
701 		vp_dev->vdev.config = &virtio_pci_config_ops;
702 	} else {
703 		vp_dev->vdev.config = &virtio_pci_config_nodev_ops;
704 	}
705 
706 	vp_dev->config_vector = vp_config_vector;
707 	vp_dev->setup_vq = setup_vq;
708 	vp_dev->del_vq = del_vq;
709 
710 	return 0;
711 
712 err_map_device:
713 	if (vp_dev->notify_base)
714 		pci_iounmap(pci_dev, vp_dev->notify_base);
715 err_map_notify:
716 	pci_iounmap(pci_dev, vp_dev->isr);
717 err_map_isr:
718 	pci_iounmap(pci_dev, vp_dev->common);
719 err_map_common:
720 	return err;
721 }
722 
723 void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
724 {
725 	struct pci_dev *pci_dev = vp_dev->pci_dev;
726 
727 	if (vp_dev->device)
728 		pci_iounmap(pci_dev, vp_dev->device);
729 	if (vp_dev->notify_base)
730 		pci_iounmap(pci_dev, vp_dev->notify_base);
731 	pci_iounmap(pci_dev, vp_dev->isr);
732 	pci_iounmap(pci_dev, vp_dev->common);
733 	pci_release_selected_regions(pci_dev, vp_dev->modern_bars);
734 }
735