xref: /openbmc/linux/include/linux/vfio_pci_core.h (revision 849b0156)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
4  *     Author: Alex Williamson <alex.williamson@redhat.com>
5  *
6  * Derived from original vfio:
7  * Copyright 2010 Cisco Systems, Inc.  All rights reserved.
8  * Author: Tom Lyon, pugs@cisco.com
9  */
10 
11 #include <linux/mutex.h>
12 #include <linux/pci.h>
13 #include <linux/vfio.h>
14 #include <linux/irqbypass.h>
15 #include <linux/types.h>
16 #include <linux/uuid.h>
17 #include <linux/notifier.h>
18 
19 #ifndef VFIO_PCI_CORE_H
20 #define VFIO_PCI_CORE_H
21 
22 #define VFIO_PCI_OFFSET_SHIFT   40
23 
24 #define VFIO_PCI_OFFSET_TO_INDEX(off)	(off >> VFIO_PCI_OFFSET_SHIFT)
25 #define VFIO_PCI_INDEX_TO_OFFSET(index)	((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
26 #define VFIO_PCI_OFFSET_MASK	(((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
27 
28 /* Special capability IDs predefined access */
29 #define PCI_CAP_ID_INVALID		0xFF	/* default raw access */
30 #define PCI_CAP_ID_INVALID_VIRT		0xFE	/* default virt access */
31 
32 /* Cap maximum number of ioeventfds per device (arbitrary) */
33 #define VFIO_PCI_IOEVENTFD_MAX		1000
34 
35 struct vfio_pci_ioeventfd {
36 	struct list_head	next;
37 	struct vfio_pci_core_device	*vdev;
38 	struct virqfd		*virqfd;
39 	void __iomem		*addr;
40 	uint64_t		data;
41 	loff_t			pos;
42 	int			bar;
43 	int			count;
44 	bool			test_mem;
45 };
46 
47 struct vfio_pci_irq_ctx {
48 	struct eventfd_ctx	*trigger;
49 	struct virqfd		*unmask;
50 	struct virqfd		*mask;
51 	char			*name;
52 	bool			masked;
53 	struct irq_bypass_producer	producer;
54 };
55 
56 struct vfio_pci_core_device;
57 struct vfio_pci_region;
58 
59 struct vfio_pci_regops {
60 	ssize_t (*rw)(struct vfio_pci_core_device *vdev, char __user *buf,
61 		      size_t count, loff_t *ppos, bool iswrite);
62 	void	(*release)(struct vfio_pci_core_device *vdev,
63 			   struct vfio_pci_region *region);
64 	int	(*mmap)(struct vfio_pci_core_device *vdev,
65 			struct vfio_pci_region *region,
66 			struct vm_area_struct *vma);
67 	int	(*add_capability)(struct vfio_pci_core_device *vdev,
68 				  struct vfio_pci_region *region,
69 				  struct vfio_info_cap *caps);
70 };
71 
72 struct vfio_pci_region {
73 	u32				type;
74 	u32				subtype;
75 	const struct vfio_pci_regops	*ops;
76 	void				*data;
77 	size_t				size;
78 	u32				flags;
79 };
80 
81 struct vfio_pci_dummy_resource {
82 	struct resource		resource;
83 	int			index;
84 	struct list_head	res_next;
85 };
86 
87 struct vfio_pci_vf_token {
88 	struct mutex		lock;
89 	uuid_t			uuid;
90 	int			users;
91 };
92 
93 struct vfio_pci_mmap_vma {
94 	struct vm_area_struct	*vma;
95 	struct list_head	vma_next;
96 };
97 
98 struct vfio_pci_core_device {
99 	struct vfio_device	vdev;
100 	struct pci_dev		*pdev;
101 	void __iomem		*barmap[PCI_STD_NUM_BARS];
102 	bool			bar_mmap_supported[PCI_STD_NUM_BARS];
103 	u8			*pci_config_map;
104 	u8			*vconfig;
105 	struct perm_bits	*msi_perm;
106 	spinlock_t		irqlock;
107 	struct mutex		igate;
108 	struct vfio_pci_irq_ctx	*ctx;
109 	int			num_ctx;
110 	int			irq_type;
111 	int			num_regions;
112 	struct vfio_pci_region	*region;
113 	u8			msi_qmax;
114 	u8			msix_bar;
115 	u16			msix_size;
116 	u32			msix_offset;
117 	u32			rbar[7];
118 	bool			pci_2_3;
119 	bool			virq_disabled;
120 	bool			reset_works;
121 	bool			extended_caps;
122 	bool			bardirty;
123 	bool			has_vga;
124 	bool			needs_reset;
125 	bool			nointx;
126 	bool			needs_pm_restore;
127 	struct pci_saved_state	*pci_saved_state;
128 	struct pci_saved_state	*pm_save;
129 	int			ioeventfds_nr;
130 	struct eventfd_ctx	*err_trigger;
131 	struct eventfd_ctx	*req_trigger;
132 	struct list_head	dummy_resources_list;
133 	struct mutex		ioeventfds_lock;
134 	struct list_head	ioeventfds_list;
135 	struct vfio_pci_vf_token	*vf_token;
136 	struct list_head		sriov_pfs_item;
137 	struct vfio_pci_core_device	*sriov_pf_core_dev;
138 	struct notifier_block	nb;
139 	struct mutex		vma_lock;
140 	struct list_head	vma_list;
141 	struct rw_semaphore	memory_lock;
142 };
143 
144 #define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
145 #define is_msi(vdev) (vdev->irq_type == VFIO_PCI_MSI_IRQ_INDEX)
146 #define is_msix(vdev) (vdev->irq_type == VFIO_PCI_MSIX_IRQ_INDEX)
147 #define is_irq_none(vdev) (!(is_intx(vdev) || is_msi(vdev) || is_msix(vdev)))
148 #define irq_is(vdev, type) (vdev->irq_type == type)
149 
150 extern void vfio_pci_intx_mask(struct vfio_pci_core_device *vdev);
151 extern void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev);
152 
153 extern int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev,
154 				   uint32_t flags, unsigned index,
155 				   unsigned start, unsigned count, void *data);
156 
157 extern ssize_t vfio_pci_config_rw(struct vfio_pci_core_device *vdev,
158 				  char __user *buf, size_t count,
159 				  loff_t *ppos, bool iswrite);
160 
161 extern ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf,
162 			       size_t count, loff_t *ppos, bool iswrite);
163 
164 #ifdef CONFIG_VFIO_PCI_VGA
165 extern ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf,
166 			       size_t count, loff_t *ppos, bool iswrite);
167 #else
168 static inline ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev,
169 				      char __user *buf, size_t count,
170 				      loff_t *ppos, bool iswrite)
171 {
172 	return -EINVAL;
173 }
174 #endif
175 
176 extern long vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset,
177 			       uint64_t data, int count, int fd);
178 
179 extern int vfio_pci_init_perm_bits(void);
180 extern void vfio_pci_uninit_perm_bits(void);
181 
182 extern int vfio_config_init(struct vfio_pci_core_device *vdev);
183 extern void vfio_config_free(struct vfio_pci_core_device *vdev);
184 
185 extern int vfio_pci_register_dev_region(struct vfio_pci_core_device *vdev,
186 					unsigned int type, unsigned int subtype,
187 					const struct vfio_pci_regops *ops,
188 					size_t size, u32 flags, void *data);
189 
190 extern int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev,
191 				    pci_power_t state);
192 
193 extern bool __vfio_pci_memory_enabled(struct vfio_pci_core_device *vdev);
194 extern void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_core_device
195 						    *vdev);
196 extern u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_core_device *vdev);
197 extern void vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device *vdev,
198 					       u16 cmd);
199 
200 #ifdef CONFIG_VFIO_PCI_IGD
201 extern int vfio_pci_igd_init(struct vfio_pci_core_device *vdev);
202 #else
203 static inline int vfio_pci_igd_init(struct vfio_pci_core_device *vdev)
204 {
205 	return -ENODEV;
206 }
207 #endif
208 
209 #ifdef CONFIG_S390
210 extern int vfio_pci_info_zdev_add_caps(struct vfio_pci_core_device *vdev,
211 				       struct vfio_info_cap *caps);
212 #else
213 static inline int vfio_pci_info_zdev_add_caps(struct vfio_pci_core_device *vdev,
214 					      struct vfio_info_cap *caps)
215 {
216 	return -ENODEV;
217 }
218 #endif
219 
220 /* Will be exported for vfio pci drivers usage */
221 void vfio_pci_core_set_params(bool nointxmask, bool is_disable_vga,
222 			      bool is_disable_idle_d3);
223 void vfio_pci_core_close_device(struct vfio_device *core_vdev);
224 void vfio_pci_core_init_device(struct vfio_pci_core_device *vdev,
225 			       struct pci_dev *pdev,
226 			       const struct vfio_device_ops *vfio_pci_ops);
227 int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev);
228 void vfio_pci_core_uninit_device(struct vfio_pci_core_device *vdev);
229 void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev);
230 int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn);
231 extern const struct pci_error_handlers vfio_pci_core_err_handlers;
232 long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
233 		unsigned long arg);
234 int vfio_pci_core_ioctl_feature(struct vfio_device *device, u32 flags,
235 				void __user *arg, size_t argsz);
236 ssize_t vfio_pci_core_read(struct vfio_device *core_vdev, char __user *buf,
237 		size_t count, loff_t *ppos);
238 ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *buf,
239 		size_t count, loff_t *ppos);
240 int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma);
241 void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count);
242 int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf);
243 int vfio_pci_core_enable(struct vfio_pci_core_device *vdev);
244 void vfio_pci_core_disable(struct vfio_pci_core_device *vdev);
245 void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev);
246 pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev,
247 						pci_channel_state_t state);
248 
249 static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
250 {
251 	return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
252 }
253 
254 #endif /* VFIO_PCI_CORE_H */
255