1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCI Backend - Functions for creating a virtual configuration space for
4  *               exported PCI Devices.
5  *               It's dangerous to allow PCI Driver Domains to change their
6  *               device's resources (memory, i/o ports, interrupts). We need to
7  *               restrict changes to certain PCI Configuration registers:
8  *               BARs, INTERRUPT_PIN, most registers in the header...
9  *
10  * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
11  */
12 
13 #define dev_fmt(fmt) DRV_NAME ": " fmt
14 
15 #include <linux/kernel.h>
16 #include <linux/moduleparam.h>
17 #include <linux/pci.h>
18 #include "pciback.h"
19 #include "conf_space.h"
20 #include "conf_space_quirks.h"
21 
22 bool xen_pcibk_permissive;
23 module_param_named(permissive, xen_pcibk_permissive, bool, 0644);
24 
25 /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word,
26  * xen_pcibk_write_config_word, and xen_pcibk_write_config_byte are created. */
27 #define DEFINE_PCI_CONFIG(op, size, type)			\
28 int xen_pcibk_##op##_config_##size				\
29 (struct pci_dev *dev, int offset, type value, void *data)	\
30 {								\
31 	return pci_##op##_config_##size(dev, offset, value);	\
32 }
33 
DEFINE_PCI_CONFIG(read,byte,u8 *)34 DEFINE_PCI_CONFIG(read, byte, u8 *)
35 DEFINE_PCI_CONFIG(read, word, u16 *)
36 DEFINE_PCI_CONFIG(read, dword, u32 *)
37 
38 DEFINE_PCI_CONFIG(write, byte, u8)
39 DEFINE_PCI_CONFIG(write, word, u16)
40 DEFINE_PCI_CONFIG(write, dword, u32)
41 
42 static int conf_space_read(struct pci_dev *dev,
43 			   const struct config_field_entry *entry,
44 			   int offset, u32 *value)
45 {
46 	int ret = 0;
47 	const struct config_field *field = entry->field;
48 
49 	*value = 0;
50 
51 	switch (field->size) {
52 	case 1:
53 		if (field->u.b.read)
54 			ret = field->u.b.read(dev, offset, (u8 *) value,
55 					      entry->data);
56 		break;
57 	case 2:
58 		if (field->u.w.read)
59 			ret = field->u.w.read(dev, offset, (u16 *) value,
60 					      entry->data);
61 		break;
62 	case 4:
63 		if (field->u.dw.read)
64 			ret = field->u.dw.read(dev, offset, value, entry->data);
65 		break;
66 	}
67 	return ret;
68 }
69 
conf_space_write(struct pci_dev * dev,const struct config_field_entry * entry,int offset,u32 value)70 static int conf_space_write(struct pci_dev *dev,
71 			    const struct config_field_entry *entry,
72 			    int offset, u32 value)
73 {
74 	int ret = 0;
75 	const struct config_field *field = entry->field;
76 
77 	switch (field->size) {
78 	case 1:
79 		if (field->u.b.write)
80 			ret = field->u.b.write(dev, offset, (u8) value,
81 					       entry->data);
82 		break;
83 	case 2:
84 		if (field->u.w.write)
85 			ret = field->u.w.write(dev, offset, (u16) value,
86 					       entry->data);
87 		break;
88 	case 4:
89 		if (field->u.dw.write)
90 			ret = field->u.dw.write(dev, offset, value,
91 						entry->data);
92 		break;
93 	}
94 	return ret;
95 }
96 
get_mask(int size)97 static inline u32 get_mask(int size)
98 {
99 	if (size == 1)
100 		return 0xff;
101 	else if (size == 2)
102 		return 0xffff;
103 	else
104 		return 0xffffffff;
105 }
106 
valid_request(int offset,int size)107 static inline int valid_request(int offset, int size)
108 {
109 	/* Validate request (no un-aligned requests) */
110 	if ((size == 1 || size == 2 || size == 4) && (offset % size) == 0)
111 		return 1;
112 	return 0;
113 }
114 
merge_value(u32 val,u32 new_val,u32 new_val_mask,int offset)115 static inline u32 merge_value(u32 val, u32 new_val, u32 new_val_mask,
116 			      int offset)
117 {
118 	if (offset >= 0) {
119 		new_val_mask <<= (offset * 8);
120 		new_val <<= (offset * 8);
121 	} else {
122 		new_val_mask >>= (offset * -8);
123 		new_val >>= (offset * -8);
124 	}
125 	val = (val & ~new_val_mask) | (new_val & new_val_mask);
126 
127 	return val;
128 }
129 
xen_pcibios_err_to_errno(int err)130 static int xen_pcibios_err_to_errno(int err)
131 {
132 	switch (err) {
133 	case PCIBIOS_SUCCESSFUL:
134 		return XEN_PCI_ERR_success;
135 	case PCIBIOS_DEVICE_NOT_FOUND:
136 		return XEN_PCI_ERR_dev_not_found;
137 	case PCIBIOS_BAD_REGISTER_NUMBER:
138 		return XEN_PCI_ERR_invalid_offset;
139 	case PCIBIOS_FUNC_NOT_SUPPORTED:
140 		return XEN_PCI_ERR_not_implemented;
141 	case PCIBIOS_SET_FAILED:
142 		return XEN_PCI_ERR_access_denied;
143 	}
144 	return err;
145 }
146 
xen_pcibk_config_read(struct pci_dev * dev,int offset,int size,u32 * ret_val)147 int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
148 			  u32 *ret_val)
149 {
150 	int err = 0;
151 	struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
152 	const struct config_field_entry *cfg_entry;
153 	const struct config_field *field;
154 	int field_start, field_end;
155 	/* if read fails for any reason, return 0
156 	 * (as if device didn't respond) */
157 	u32 value = 0, tmp_val;
158 
159 	dev_dbg(&dev->dev, "read %d bytes at 0x%x\n", size, offset);
160 
161 	if (!valid_request(offset, size)) {
162 		err = XEN_PCI_ERR_invalid_offset;
163 		goto out;
164 	}
165 
166 	/* Get the real value first, then modify as appropriate */
167 	switch (size) {
168 	case 1:
169 		err = pci_read_config_byte(dev, offset, (u8 *) &value);
170 		break;
171 	case 2:
172 		err = pci_read_config_word(dev, offset, (u16 *) &value);
173 		break;
174 	case 4:
175 		err = pci_read_config_dword(dev, offset, &value);
176 		break;
177 	}
178 
179 	list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
180 		field = cfg_entry->field;
181 
182 		field_start = OFFSET(cfg_entry);
183 		field_end = OFFSET(cfg_entry) + field->size;
184 
185 		if (offset + size > field_start && field_end > offset) {
186 			err = conf_space_read(dev, cfg_entry, field_start,
187 					      &tmp_val);
188 			if (err)
189 				goto out;
190 
191 			value = merge_value(value, tmp_val,
192 					    get_mask(field->size),
193 					    field_start - offset);
194 		}
195 	}
196 
197 out:
198 	dev_dbg(&dev->dev, "read %d bytes at 0x%x = %x\n", size, offset, value);
199 
200 	*ret_val = value;
201 	return xen_pcibios_err_to_errno(err);
202 }
203 
xen_pcibk_config_write(struct pci_dev * dev,int offset,int size,u32 value)204 int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
205 {
206 	int err = 0, handled = 0;
207 	struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
208 	const struct config_field_entry *cfg_entry;
209 	const struct config_field *field;
210 	u32 tmp_val;
211 	int field_start, field_end;
212 
213 	dev_dbg(&dev->dev, "write request %d bytes at 0x%x = %x\n",
214 		size, offset, value);
215 
216 	if (!valid_request(offset, size))
217 		return XEN_PCI_ERR_invalid_offset;
218 
219 	list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
220 		field = cfg_entry->field;
221 
222 		field_start = OFFSET(cfg_entry);
223 		field_end = OFFSET(cfg_entry) + field->size;
224 
225 		if (offset + size > field_start && field_end > offset) {
226 			err = conf_space_read(dev, cfg_entry, field_start,
227 					      &tmp_val);
228 			if (err)
229 				break;
230 
231 			tmp_val = merge_value(tmp_val, value, get_mask(size),
232 					      offset - field_start);
233 
234 			err = conf_space_write(dev, cfg_entry, field_start,
235 					       tmp_val);
236 
237 			/* handled is set true here, but not every byte
238 			 * may have been written! Properly detecting if
239 			 * every byte is handled is unnecessary as the
240 			 * flag is used to detect devices that need
241 			 * special helpers to work correctly.
242 			 */
243 			handled = 1;
244 		}
245 	}
246 
247 	if (!handled && !err) {
248 		/* By default, anything not specificially handled above is
249 		 * read-only. The permissive flag changes this behavior so
250 		 * that anything not specifically handled above is writable.
251 		 * This means that some fields may still be read-only because
252 		 * they have entries in the config_field list that intercept
253 		 * the write and do nothing. */
254 		if (dev_data->permissive || xen_pcibk_permissive) {
255 			switch (size) {
256 			case 1:
257 				err = pci_write_config_byte(dev, offset,
258 							    (u8) value);
259 				break;
260 			case 2:
261 				err = pci_write_config_word(dev, offset,
262 							    (u16) value);
263 				break;
264 			case 4:
265 				err = pci_write_config_dword(dev, offset,
266 							     (u32) value);
267 				break;
268 			}
269 		} else if (!dev_data->warned_on_write) {
270 			dev_data->warned_on_write = 1;
271 			dev_warn(&dev->dev, "Driver tried to write to a "
272 				 "read-only configuration space field at offset"
273 				 " 0x%x, size %d. This may be harmless, but if "
274 				 "you have problems with your device:\n"
275 				 "1) see permissive attribute in sysfs\n"
276 				 "2) report problems to the xen-devel "
277 				 "mailing list along with details of your "
278 				 "device obtained from lspci.\n", offset, size);
279 		}
280 	}
281 
282 	return xen_pcibios_err_to_errno(err);
283 }
284 
xen_pcibk_get_interrupt_type(struct pci_dev * dev)285 int xen_pcibk_get_interrupt_type(struct pci_dev *dev)
286 {
287 	int err;
288 	u16 val;
289 	int ret = 0;
290 
291 	/*
292 	 * Do not trust dev->msi(x)_enabled here, as enabling could be done
293 	 * bypassing the pci_*msi* functions, by the qemu.
294 	 */
295 	if (dev->msi_cap) {
296 		err = pci_read_config_word(dev,
297 				dev->msi_cap + PCI_MSI_FLAGS,
298 				&val);
299 		if (err)
300 			return err;
301 		if (val & PCI_MSI_FLAGS_ENABLE)
302 			ret |= INTERRUPT_TYPE_MSI;
303 	}
304 	if (dev->msix_cap) {
305 		err = pci_read_config_word(dev,
306 				dev->msix_cap + PCI_MSIX_FLAGS,
307 				&val);
308 		if (err)
309 			return err;
310 		if (val & PCI_MSIX_FLAGS_ENABLE)
311 			ret |= INTERRUPT_TYPE_MSIX;
312 	}
313 
314 	/*
315 	 * PCIe spec says device cannot use INTx if MSI/MSI-X is enabled,
316 	 * so check for INTx only when both are disabled.
317 	 */
318 	if (!ret) {
319 		err = pci_read_config_word(dev, PCI_COMMAND, &val);
320 		if (err)
321 			return err;
322 		if (!(val & PCI_COMMAND_INTX_DISABLE))
323 			ret |= INTERRUPT_TYPE_INTX;
324 	}
325 
326 	return ret ?: INTERRUPT_TYPE_NONE;
327 }
328 
xen_pcibk_config_free_dyn_fields(struct pci_dev * dev)329 void xen_pcibk_config_free_dyn_fields(struct pci_dev *dev)
330 {
331 	struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
332 	struct config_field_entry *cfg_entry, *t;
333 	const struct config_field *field;
334 
335 	dev_dbg(&dev->dev, "free-ing dynamically allocated virtual "
336 			   "configuration space fields\n");
337 	if (!dev_data)
338 		return;
339 
340 	list_for_each_entry_safe(cfg_entry, t, &dev_data->config_fields, list) {
341 		field = cfg_entry->field;
342 
343 		if (field->clean) {
344 			field->clean((struct config_field *)field);
345 
346 			kfree(cfg_entry->data);
347 
348 			list_del(&cfg_entry->list);
349 			kfree(cfg_entry);
350 		}
351 
352 	}
353 }
354 
xen_pcibk_config_reset_dev(struct pci_dev * dev)355 void xen_pcibk_config_reset_dev(struct pci_dev *dev)
356 {
357 	struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
358 	const struct config_field_entry *cfg_entry;
359 	const struct config_field *field;
360 
361 	dev_dbg(&dev->dev, "resetting virtual configuration space\n");
362 	if (!dev_data)
363 		return;
364 
365 	list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
366 		field = cfg_entry->field;
367 
368 		if (field->reset)
369 			field->reset(dev, OFFSET(cfg_entry), cfg_entry->data);
370 	}
371 }
372 
xen_pcibk_config_free_dev(struct pci_dev * dev)373 void xen_pcibk_config_free_dev(struct pci_dev *dev)
374 {
375 	struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
376 	struct config_field_entry *cfg_entry, *t;
377 	const struct config_field *field;
378 
379 	dev_dbg(&dev->dev, "free-ing virtual configuration space fields\n");
380 	if (!dev_data)
381 		return;
382 
383 	list_for_each_entry_safe(cfg_entry, t, &dev_data->config_fields, list) {
384 		list_del(&cfg_entry->list);
385 
386 		field = cfg_entry->field;
387 
388 		if (field->release)
389 			field->release(dev, OFFSET(cfg_entry), cfg_entry->data);
390 
391 		kfree(cfg_entry);
392 	}
393 }
394 
xen_pcibk_config_add_field_offset(struct pci_dev * dev,const struct config_field * field,unsigned int base_offset)395 int xen_pcibk_config_add_field_offset(struct pci_dev *dev,
396 				    const struct config_field *field,
397 				    unsigned int base_offset)
398 {
399 	int err = 0;
400 	struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
401 	struct config_field_entry *cfg_entry;
402 	void *tmp;
403 
404 	cfg_entry = kmalloc(sizeof(*cfg_entry), GFP_KERNEL);
405 	if (!cfg_entry) {
406 		err = -ENOMEM;
407 		goto out;
408 	}
409 
410 	cfg_entry->data = NULL;
411 	cfg_entry->field = field;
412 	cfg_entry->base_offset = base_offset;
413 
414 	/* silently ignore duplicate fields */
415 	err = xen_pcibk_field_is_dup(dev, OFFSET(cfg_entry));
416 	if (err)
417 		goto out;
418 
419 	if (field->init) {
420 		tmp = field->init(dev, OFFSET(cfg_entry));
421 
422 		if (IS_ERR(tmp)) {
423 			err = PTR_ERR(tmp);
424 			goto out;
425 		}
426 
427 		cfg_entry->data = tmp;
428 	}
429 
430 	dev_dbg(&dev->dev, "added config field at offset 0x%02x\n",
431 		OFFSET(cfg_entry));
432 	list_add_tail(&cfg_entry->list, &dev_data->config_fields);
433 
434 out:
435 	if (err)
436 		kfree(cfg_entry);
437 
438 	return err;
439 }
440 
441 /* This sets up the device's virtual configuration space to keep track of
442  * certain registers (like the base address registers (BARs) so that we can
443  * keep the client from manipulating them directly.
444  */
xen_pcibk_config_init_dev(struct pci_dev * dev)445 int xen_pcibk_config_init_dev(struct pci_dev *dev)
446 {
447 	int err = 0;
448 	struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
449 
450 	dev_dbg(&dev->dev, "initializing virtual configuration space\n");
451 
452 	INIT_LIST_HEAD(&dev_data->config_fields);
453 
454 	err = xen_pcibk_config_header_add_fields(dev);
455 	if (err)
456 		goto out;
457 
458 	err = xen_pcibk_config_capability_add_fields(dev);
459 	if (err)
460 		goto out;
461 
462 	err = xen_pcibk_config_quirks_init(dev);
463 
464 out:
465 	return err;
466 }
467 
xen_pcibk_config_init(void)468 int xen_pcibk_config_init(void)
469 {
470 	return xen_pcibk_config_capability_init();
471 }
472