Lines Matching +full:cfg +full:- +full:space

1 // SPDX-License-Identifier: GPL-2.0-or-later
9 * vp_modern_map_capability - map a part of virtio pci capability
10 * @mdev: the modern virtio-pci device
26 struct pci_dev *dev = mdev->pci_dev; in vp_modern_map_capability()
40 if (bar >= PCI_STD_NUM_BARS || !(mdev->modern_bars & (1 << bar))) { in vp_modern_map_capability()
41 dev_err(&dev->dev, in vp_modern_map_capability()
47 dev_err(&dev->dev, in vp_modern_map_capability()
53 if (length - start < minlen) { in vp_modern_map_capability()
54 dev_err(&dev->dev, in vp_modern_map_capability()
60 length -= start; in vp_modern_map_capability()
63 dev_err(&dev->dev, in vp_modern_map_capability()
64 "virtio_pci: map wrap-around %u+%u\n", in vp_modern_map_capability()
71 if (offset & (align - 1)) { in vp_modern_map_capability()
72 dev_err(&dev->dev, in vp_modern_map_capability()
86 dev_err(&dev->dev, in vp_modern_map_capability()
96 dev_err(&dev->dev, in vp_modern_map_capability()
106 * virtio_pci_find_capability - walk capabilities to find device info.
148 /* Note: disk space was harmed in compilation of this function. */ in check_offsets()
211 * @mdev: the modern virtio-pci device
217 struct pci_dev *pci_dev = mdev->pci_dev; in vp_modern_probe()
225 if (mdev->device_id_check) { in vp_modern_probe()
226 devid = mdev->device_id_check(pci_dev); in vp_modern_probe()
229 mdev->id.device = devid; in vp_modern_probe()
232 if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f) in vp_modern_probe()
233 return -ENODEV; in vp_modern_probe()
235 if (pci_dev->device < 0x1040) { in vp_modern_probe()
239 mdev->id.device = pci_dev->subsystem_device; in vp_modern_probe()
242 mdev->id.device = pci_dev->device - 0x1040; in vp_modern_probe()
245 mdev->id.vendor = pci_dev->subsystem_vendor; in vp_modern_probe()
250 &mdev->modern_bars); in vp_modern_probe()
252 dev_info(&pci_dev->dev, in vp_modern_probe()
254 return -ENODEV; in vp_modern_probe()
260 &mdev->modern_bars); in vp_modern_probe()
263 &mdev->modern_bars); in vp_modern_probe()
265 dev_err(&pci_dev->dev, in vp_modern_probe()
268 return -EINVAL; in vp_modern_probe()
271 err = dma_set_mask_and_coherent(&pci_dev->dev, in vp_modern_probe()
272 mdev->dma_mask ? : DMA_BIT_MASK(64)); in vp_modern_probe()
274 err = dma_set_mask_and_coherent(&pci_dev->dev, in vp_modern_probe()
277 …dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this migh… in vp_modern_probe()
280 * device-specific configuration. in vp_modern_probe()
284 &mdev->modern_bars); in vp_modern_probe()
286 err = pci_request_selected_regions(pci_dev, mdev->modern_bars, in vp_modern_probe()
287 "virtio-pci-modern"); in vp_modern_probe()
291 err = -EINVAL; in vp_modern_probe()
292 mdev->common = vp_modern_map_capability(mdev, common, in vp_modern_probe()
296 if (!mdev->common) in vp_modern_probe()
298 mdev->isr = vp_modern_map_capability(mdev, isr, sizeof(u8), 1, in vp_modern_probe()
301 if (!mdev->isr) in vp_modern_probe()
304 /* Read notify_off_multiplier from config space. */ in vp_modern_probe()
308 &mdev->notify_offset_multiplier); in vp_modern_probe()
309 /* Read notify length and offset from config space. */ in vp_modern_probe()
325 mdev->notify_base = vp_modern_map_capability(mdev, notify, in vp_modern_probe()
328 &mdev->notify_len, in vp_modern_probe()
329 &mdev->notify_pa); in vp_modern_probe()
330 if (!mdev->notify_base) in vp_modern_probe()
333 mdev->notify_map_cap = notify; in vp_modern_probe()
340 mdev->device = vp_modern_map_capability(mdev, device, 0, 4, in vp_modern_probe()
342 &mdev->device_len, in vp_modern_probe()
344 if (!mdev->device) in vp_modern_probe()
351 if (mdev->notify_base) in vp_modern_probe()
352 pci_iounmap(pci_dev, mdev->notify_base); in vp_modern_probe()
354 pci_iounmap(pci_dev, mdev->isr); in vp_modern_probe()
356 pci_iounmap(pci_dev, mdev->common); in vp_modern_probe()
358 pci_release_selected_regions(pci_dev, mdev->modern_bars); in vp_modern_probe()
365 * @mdev: the modern virtio-pci device
369 struct pci_dev *pci_dev = mdev->pci_dev; in vp_modern_remove()
371 if (mdev->device) in vp_modern_remove()
372 pci_iounmap(pci_dev, mdev->device); in vp_modern_remove()
373 if (mdev->notify_base) in vp_modern_remove()
374 pci_iounmap(pci_dev, mdev->notify_base); in vp_modern_remove()
375 pci_iounmap(pci_dev, mdev->isr); in vp_modern_remove()
376 pci_iounmap(pci_dev, mdev->common); in vp_modern_remove()
377 pci_release_selected_regions(pci_dev, mdev->modern_bars); in vp_modern_remove()
382 * vp_modern_get_features - get features from device
383 * @mdev: the modern virtio-pci device
389 struct virtio_pci_common_cfg __iomem *cfg = mdev->common; in vp_modern_get_features() local
393 vp_iowrite32(0, &cfg->device_feature_select); in vp_modern_get_features()
394 features = vp_ioread32(&cfg->device_feature); in vp_modern_get_features()
395 vp_iowrite32(1, &cfg->device_feature_select); in vp_modern_get_features()
396 features |= ((u64)vp_ioread32(&cfg->device_feature) << 32); in vp_modern_get_features()
403 * vp_modern_get_driver_features - get driver features from device
404 * @mdev: the modern virtio-pci device
410 struct virtio_pci_common_cfg __iomem *cfg = mdev->common; in vp_modern_get_driver_features() local
414 vp_iowrite32(0, &cfg->guest_feature_select); in vp_modern_get_driver_features()
415 features = vp_ioread32(&cfg->guest_feature); in vp_modern_get_driver_features()
416 vp_iowrite32(1, &cfg->guest_feature_select); in vp_modern_get_driver_features()
417 features |= ((u64)vp_ioread32(&cfg->guest_feature) << 32); in vp_modern_get_driver_features()
424 * vp_modern_set_features - set features to device
425 * @mdev: the modern virtio-pci device
431 struct virtio_pci_common_cfg __iomem *cfg = mdev->common; in vp_modern_set_features() local
433 vp_iowrite32(0, &cfg->guest_feature_select); in vp_modern_set_features()
434 vp_iowrite32((u32)features, &cfg->guest_feature); in vp_modern_set_features()
435 vp_iowrite32(1, &cfg->guest_feature_select); in vp_modern_set_features()
436 vp_iowrite32(features >> 32, &cfg->guest_feature); in vp_modern_set_features()
441 * vp_modern_generation - get the device genreation
442 * @mdev: the modern virtio-pci device
448 struct virtio_pci_common_cfg __iomem *cfg = mdev->common; in vp_modern_generation() local
450 return vp_ioread8(&cfg->config_generation); in vp_modern_generation()
455 * vp_modern_get_status - get the device status
456 * @mdev: the modern virtio-pci device
462 struct virtio_pci_common_cfg __iomem *cfg = mdev->common; in vp_modern_get_status() local
464 return vp_ioread8(&cfg->device_status); in vp_modern_get_status()
469 * vp_modern_set_status - set status to device
470 * @mdev: the modern virtio-pci device
476 struct virtio_pci_common_cfg __iomem *cfg = mdev->common; in vp_modern_set_status() local
479 * Per memory-barriers.txt, wmb() is not needed to guarantee in vp_modern_set_status()
483 vp_iowrite8(status, &cfg->device_status); in vp_modern_set_status()
488 * vp_modern_get_queue_reset - get the queue reset status
489 * @mdev: the modern virtio-pci device
494 struct virtio_pci_modern_common_cfg __iomem *cfg; in vp_modern_get_queue_reset() local
496 cfg = (struct virtio_pci_modern_common_cfg __iomem *)mdev->common; in vp_modern_get_queue_reset()
498 vp_iowrite16(index, &cfg->cfg.queue_select); in vp_modern_get_queue_reset()
499 return vp_ioread16(&cfg->queue_reset); in vp_modern_get_queue_reset()
504 * vp_modern_set_queue_reset - reset the queue
505 * @mdev: the modern virtio-pci device
510 struct virtio_pci_modern_common_cfg __iomem *cfg; in vp_modern_set_queue_reset() local
512 cfg = (struct virtio_pci_modern_common_cfg __iomem *)mdev->common; in vp_modern_set_queue_reset()
514 vp_iowrite16(index, &cfg->cfg.queue_select); in vp_modern_set_queue_reset()
515 vp_iowrite16(1, &cfg->queue_reset); in vp_modern_set_queue_reset()
517 while (vp_ioread16(&cfg->queue_reset)) in vp_modern_set_queue_reset()
520 while (vp_ioread16(&cfg->cfg.queue_enable)) in vp_modern_set_queue_reset()
526 * vp_modern_queue_vector - set the MSIX vector for a specific virtqueue
527 * @mdev: the modern virtio-pci device
536 struct virtio_pci_common_cfg __iomem *cfg = mdev->common; in vp_modern_queue_vector() local
538 vp_iowrite16(index, &cfg->queue_select); in vp_modern_queue_vector()
539 vp_iowrite16(vector, &cfg->queue_msix_vector); in vp_modern_queue_vector()
541 return vp_ioread16(&cfg->queue_msix_vector); in vp_modern_queue_vector()
546 * vp_modern_config_vector - set the vector for config interrupt
547 * @mdev: the modern virtio-pci device
555 struct virtio_pci_common_cfg __iomem *cfg = mdev->common; in vp_modern_config_vector() local
558 vp_iowrite16(vector, &cfg->msix_config); in vp_modern_config_vector()
561 return vp_ioread16(&cfg->msix_config); in vp_modern_config_vector()
566 * vp_modern_queue_address - set the virtqueue address
567 * @mdev: the modern virtio-pci device
577 struct virtio_pci_common_cfg __iomem *cfg = mdev->common; in vp_modern_queue_address() local
579 vp_iowrite16(index, &cfg->queue_select); in vp_modern_queue_address()
581 vp_iowrite64_twopart(desc_addr, &cfg->queue_desc_lo, in vp_modern_queue_address()
582 &cfg->queue_desc_hi); in vp_modern_queue_address()
583 vp_iowrite64_twopart(driver_addr, &cfg->queue_avail_lo, in vp_modern_queue_address()
584 &cfg->queue_avail_hi); in vp_modern_queue_address()
585 vp_iowrite64_twopart(device_addr, &cfg->queue_used_lo, in vp_modern_queue_address()
586 &cfg->queue_used_hi); in vp_modern_queue_address()
591 * vp_modern_set_queue_enable - enable a virtqueue
592 * @mdev: the modern virtio-pci device
599 vp_iowrite16(index, &mdev->common->queue_select); in vp_modern_set_queue_enable()
600 vp_iowrite16(enable, &mdev->common->queue_enable); in vp_modern_set_queue_enable()
605 * vp_modern_get_queue_enable - enable a virtqueue
606 * @mdev: the modern virtio-pci device
614 vp_iowrite16(index, &mdev->common->queue_select); in vp_modern_get_queue_enable()
616 return vp_ioread16(&mdev->common->queue_enable); in vp_modern_get_queue_enable()
621 * vp_modern_set_queue_size - set size for a virtqueue
622 * @mdev: the modern virtio-pci device
629 vp_iowrite16(index, &mdev->common->queue_select); in vp_modern_set_queue_size()
630 vp_iowrite16(size, &mdev->common->queue_size); in vp_modern_set_queue_size()
636 * vp_modern_get_queue_size - get size for a virtqueue
637 * @mdev: the modern virtio-pci device
645 vp_iowrite16(index, &mdev->common->queue_select); in vp_modern_get_queue_size()
647 return vp_ioread16(&mdev->common->queue_size); in vp_modern_get_queue_size()
653 * vp_modern_get_num_queues - get the number of virtqueues
654 * @mdev: the modern virtio-pci device
660 return vp_ioread16(&mdev->common->num_queues); in vp_modern_get_num_queues()
665 * vp_modern_get_queue_notify_off - get notification offset for a virtqueue
666 * @mdev: the modern virtio-pci device
674 vp_iowrite16(index, &mdev->common->queue_select); in vp_modern_get_queue_notify_off()
676 return vp_ioread16(&mdev->common->queue_notify_off); in vp_modern_get_queue_notify_off()
680 * vp_modern_map_vq_notify - map notification area for a
682 * @mdev: the modern virtio-pci device
693 if (mdev->notify_base) { in vp_modern_map_vq_notify()
695 if ((u64)off * mdev->notify_offset_multiplier + 2 in vp_modern_map_vq_notify()
696 > mdev->notify_len) { in vp_modern_map_vq_notify()
697 dev_warn(&mdev->pci_dev->dev, in vp_modern_map_vq_notify()
700 off, mdev->notify_offset_multiplier, in vp_modern_map_vq_notify()
701 index, mdev->notify_len); in vp_modern_map_vq_notify()
705 *pa = mdev->notify_pa + in vp_modern_map_vq_notify()
706 off * mdev->notify_offset_multiplier; in vp_modern_map_vq_notify()
707 return mdev->notify_base + off * mdev->notify_offset_multiplier; in vp_modern_map_vq_notify()
710 mdev->notify_map_cap, 2, 2, in vp_modern_map_vq_notify()
711 off * mdev->notify_offset_multiplier, 2, in vp_modern_map_vq_notify()