1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * PCI Backend - Handles the virtual fields in the configuration space headers.
4  *
5  * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #define dev_fmt pr_fmt
10 
11 #include <linux/kernel.h>
12 #include <linux/pci.h>
13 #include "pciback.h"
14 #include "conf_space.h"
15 
16 struct pci_cmd_info {
17 	u16 val;
18 };
19 
20 struct pci_bar_info {
21 	u32 val;
22 	u32 len_val;
23 	int which;
24 };
25 
26 #define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO))
27 #define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER)
28 
29 /* Bits guests are allowed to control in permissive mode. */
30 #define PCI_COMMAND_GUEST (PCI_COMMAND_MASTER|PCI_COMMAND_SPECIAL| \
31 			   PCI_COMMAND_INVALIDATE|PCI_COMMAND_VGA_PALETTE| \
32 			   PCI_COMMAND_WAIT|PCI_COMMAND_FAST_BACK)
33 
command_init(struct pci_dev * dev,int offset)34 static void *command_init(struct pci_dev *dev, int offset)
35 {
36 	struct pci_cmd_info *cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
37 	int err;
38 
39 	if (!cmd)
40 		return ERR_PTR(-ENOMEM);
41 
42 	err = pci_read_config_word(dev, PCI_COMMAND, &cmd->val);
43 	if (err) {
44 		kfree(cmd);
45 		return ERR_PTR(err);
46 	}
47 
48 	return cmd;
49 }
50 
command_read(struct pci_dev * dev,int offset,u16 * value,void * data)51 static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data)
52 {
53 	int ret = pci_read_config_word(dev, offset, value);
54 	const struct pci_cmd_info *cmd = data;
55 
56 	*value &= PCI_COMMAND_GUEST;
57 	*value |= cmd->val & ~PCI_COMMAND_GUEST;
58 
59 	return ret;
60 }
61 
command_write(struct pci_dev * dev,int offset,u16 value,void * data)62 static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
63 {
64 	struct xen_pcibk_dev_data *dev_data;
65 	int err;
66 	u16 val;
67 	struct pci_cmd_info *cmd = data;
68 
69 	dev_data = pci_get_drvdata(dev);
70 	if (!pci_is_enabled(dev) && is_enable_cmd(value)) {
71 		dev_dbg(&dev->dev, "enable\n");
72 		err = pci_enable_device(dev);
73 		if (err)
74 			return err;
75 		if (dev_data)
76 			dev_data->enable_intx = 1;
77 	} else if (pci_is_enabled(dev) && !is_enable_cmd(value)) {
78 		dev_dbg(&dev->dev, "disable\n");
79 		pci_disable_device(dev);
80 		if (dev_data)
81 			dev_data->enable_intx = 0;
82 	}
83 
84 	if (!dev->is_busmaster && is_master_cmd(value)) {
85 		dev_dbg(&dev->dev, "set bus master\n");
86 		pci_set_master(dev);
87 	} else if (dev->is_busmaster && !is_master_cmd(value)) {
88 		dev_dbg(&dev->dev, "clear bus master\n");
89 		pci_clear_master(dev);
90 	}
91 
92 	if (!(cmd->val & PCI_COMMAND_INVALIDATE) &&
93 	    (value & PCI_COMMAND_INVALIDATE)) {
94 		dev_dbg(&dev->dev, "enable memory-write-invalidate\n");
95 		err = pci_set_mwi(dev);
96 		if (err) {
97 			dev_warn(&dev->dev, "cannot enable memory-write-invalidate (%d)\n",
98 				err);
99 			value &= ~PCI_COMMAND_INVALIDATE;
100 		}
101 	} else if ((cmd->val & PCI_COMMAND_INVALIDATE) &&
102 		   !(value & PCI_COMMAND_INVALIDATE)) {
103 		dev_dbg(&dev->dev, "disable memory-write-invalidate\n");
104 		pci_clear_mwi(dev);
105 	}
106 
107 	if (dev_data && dev_data->allow_interrupt_control &&
108 	    ((cmd->val ^ value) & PCI_COMMAND_INTX_DISABLE))
109 		pci_intx(dev, !(value & PCI_COMMAND_INTX_DISABLE));
110 
111 	cmd->val = value;
112 
113 	if (!xen_pcibk_permissive && (!dev_data || !dev_data->permissive))
114 		return 0;
115 
116 	/* Only allow the guest to control certain bits. */
117 	err = pci_read_config_word(dev, offset, &val);
118 	if (err || val == value)
119 		return err;
120 
121 	value &= PCI_COMMAND_GUEST;
122 	value |= val & ~PCI_COMMAND_GUEST;
123 
124 	return pci_write_config_word(dev, offset, value);
125 }
126 
rom_write(struct pci_dev * dev,int offset,u32 value,void * data)127 static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data)
128 {
129 	struct pci_bar_info *bar = data;
130 
131 	if (unlikely(!bar)) {
132 		dev_warn(&dev->dev, "driver data not found\n");
133 		return XEN_PCI_ERR_op_failed;
134 	}
135 
136 	/* A write to obtain the length must happen as a 32-bit write.
137 	 * This does not (yet) support writing individual bytes
138 	 */
139 	if ((value | ~PCI_ROM_ADDRESS_MASK) == ~0U)
140 		bar->which = 1;
141 	else {
142 		u32 tmpval;
143 		pci_read_config_dword(dev, offset, &tmpval);
144 		if (tmpval != bar->val && value == bar->val) {
145 			/* Allow restoration of bar value. */
146 			pci_write_config_dword(dev, offset, bar->val);
147 		}
148 		bar->which = 0;
149 	}
150 
151 	/* Do we need to support enabling/disabling the rom address here? */
152 
153 	return 0;
154 }
155 
156 /* For the BARs, only allow writes which write ~0 or
157  * the correct resource information
158  * (Needed for when the driver probes the resource usage)
159  */
bar_write(struct pci_dev * dev,int offset,u32 value,void * data)160 static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data)
161 {
162 	struct pci_bar_info *bar = data;
163 	unsigned int pos = (offset - PCI_BASE_ADDRESS_0) / 4;
164 	const struct resource *res = dev->resource;
165 	u32 mask;
166 
167 	if (unlikely(!bar)) {
168 		dev_warn(&dev->dev, "driver data not found\n");
169 		return XEN_PCI_ERR_op_failed;
170 	}
171 
172 	/* A write to obtain the length must happen as a 32-bit write.
173 	 * This does not (yet) support writing individual bytes
174 	 */
175 	if (res[pos].flags & IORESOURCE_IO)
176 		mask = ~PCI_BASE_ADDRESS_IO_MASK;
177 	else if (pos && (res[pos - 1].flags & IORESOURCE_MEM_64))
178 		mask = 0;
179 	else
180 		mask = ~PCI_BASE_ADDRESS_MEM_MASK;
181 	if ((value | mask) == ~0U)
182 		bar->which = 1;
183 	else {
184 		u32 tmpval;
185 		pci_read_config_dword(dev, offset, &tmpval);
186 		if (tmpval != bar->val && value == bar->val) {
187 			/* Allow restoration of bar value. */
188 			pci_write_config_dword(dev, offset, bar->val);
189 		}
190 		bar->which = 0;
191 	}
192 
193 	return 0;
194 }
195 
bar_read(struct pci_dev * dev,int offset,u32 * value,void * data)196 static int bar_read(struct pci_dev *dev, int offset, u32 * value, void *data)
197 {
198 	struct pci_bar_info *bar = data;
199 
200 	if (unlikely(!bar)) {
201 		dev_warn(&dev->dev, "driver data not found\n");
202 		return XEN_PCI_ERR_op_failed;
203 	}
204 
205 	*value = bar->which ? bar->len_val : bar->val;
206 
207 	return 0;
208 }
209 
bar_init(struct pci_dev * dev,int offset)210 static void *bar_init(struct pci_dev *dev, int offset)
211 {
212 	unsigned int pos;
213 	const struct resource *res = dev->resource;
214 	struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL);
215 
216 	if (!bar)
217 		return ERR_PTR(-ENOMEM);
218 
219 	if (offset == PCI_ROM_ADDRESS || offset == PCI_ROM_ADDRESS1)
220 		pos = PCI_ROM_RESOURCE;
221 	else {
222 		pos = (offset - PCI_BASE_ADDRESS_0) / 4;
223 		if (pos && (res[pos - 1].flags & IORESOURCE_MEM_64)) {
224 			/*
225 			 * Use ">> 16 >> 16" instead of direct ">> 32" shift
226 			 * to avoid warnings on 32-bit architectures.
227 			 */
228 			bar->val = res[pos - 1].start >> 16 >> 16;
229 			bar->len_val = -resource_size(&res[pos - 1]) >> 16 >> 16;
230 			return bar;
231 		}
232 	}
233 
234 	if (!res[pos].flags ||
235 	    (res[pos].flags & (IORESOURCE_DISABLED | IORESOURCE_UNSET |
236 			       IORESOURCE_BUSY)))
237 		return bar;
238 
239 	bar->val = res[pos].start |
240 		   (res[pos].flags & PCI_REGION_FLAG_MASK);
241 	bar->len_val = -resource_size(&res[pos]) |
242 		       (res[pos].flags & PCI_REGION_FLAG_MASK);
243 
244 	return bar;
245 }
246 
bar_reset(struct pci_dev * dev,int offset,void * data)247 static void bar_reset(struct pci_dev *dev, int offset, void *data)
248 {
249 	struct pci_bar_info *bar = data;
250 
251 	bar->which = 0;
252 }
253 
bar_release(struct pci_dev * dev,int offset,void * data)254 static void bar_release(struct pci_dev *dev, int offset, void *data)
255 {
256 	kfree(data);
257 }
258 
xen_pcibk_read_vendor(struct pci_dev * dev,int offset,u16 * value,void * data)259 static int xen_pcibk_read_vendor(struct pci_dev *dev, int offset,
260 			       u16 *value, void *data)
261 {
262 	*value = dev->vendor;
263 
264 	return 0;
265 }
266 
xen_pcibk_read_device(struct pci_dev * dev,int offset,u16 * value,void * data)267 static int xen_pcibk_read_device(struct pci_dev *dev, int offset,
268 			       u16 *value, void *data)
269 {
270 	*value = dev->device;
271 
272 	return 0;
273 }
274 
interrupt_read(struct pci_dev * dev,int offset,u8 * value,void * data)275 static int interrupt_read(struct pci_dev *dev, int offset, u8 * value,
276 			  void *data)
277 {
278 	*value = (u8) dev->irq;
279 
280 	return 0;
281 }
282 
bist_write(struct pci_dev * dev,int offset,u8 value,void * data)283 static int bist_write(struct pci_dev *dev, int offset, u8 value, void *data)
284 {
285 	u8 cur_value;
286 	int err;
287 
288 	err = pci_read_config_byte(dev, offset, &cur_value);
289 	if (err)
290 		goto out;
291 
292 	if ((cur_value & ~PCI_BIST_START) == (value & ~PCI_BIST_START)
293 	    || value == PCI_BIST_START)
294 		err = pci_write_config_byte(dev, offset, value);
295 
296 out:
297 	return err;
298 }
299 
300 static const struct config_field header_common[] = {
301 	{
302 	 .offset    = PCI_VENDOR_ID,
303 	 .size      = 2,
304 	 .u.w.read  = xen_pcibk_read_vendor,
305 	},
306 	{
307 	 .offset    = PCI_DEVICE_ID,
308 	 .size      = 2,
309 	 .u.w.read  = xen_pcibk_read_device,
310 	},
311 	{
312 	 .offset    = PCI_COMMAND,
313 	 .size      = 2,
314 	 .init      = command_init,
315 	 .release   = bar_release,
316 	 .u.w.read  = command_read,
317 	 .u.w.write = command_write,
318 	},
319 	{
320 	 .offset    = PCI_INTERRUPT_LINE,
321 	 .size      = 1,
322 	 .u.b.read  = interrupt_read,
323 	},
324 	{
325 	 .offset    = PCI_INTERRUPT_PIN,
326 	 .size      = 1,
327 	 .u.b.read  = xen_pcibk_read_config_byte,
328 	},
329 	{
330 	 /* Any side effects of letting driver domain control cache line? */
331 	 .offset    = PCI_CACHE_LINE_SIZE,
332 	 .size      = 1,
333 	 .u.b.read  = xen_pcibk_read_config_byte,
334 	 .u.b.write = xen_pcibk_write_config_byte,
335 	},
336 	{
337 	 .offset    = PCI_LATENCY_TIMER,
338 	 .size      = 1,
339 	 .u.b.read  = xen_pcibk_read_config_byte,
340 	},
341 	{
342 	 .offset    = PCI_BIST,
343 	 .size      = 1,
344 	 .u.b.read  = xen_pcibk_read_config_byte,
345 	 .u.b.write = bist_write,
346 	},
347 	{}
348 };
349 
350 #define CFG_FIELD_BAR(reg_offset)			\
351 	{						\
352 	.offset     = reg_offset,			\
353 	.size       = 4,				\
354 	.init       = bar_init,				\
355 	.reset      = bar_reset,			\
356 	.release    = bar_release,			\
357 	.u.dw.read  = bar_read,				\
358 	.u.dw.write = bar_write,			\
359 	}
360 
361 #define CFG_FIELD_ROM(reg_offset)			\
362 	{						\
363 	.offset     = reg_offset,			\
364 	.size       = 4,				\
365 	.init       = bar_init,				\
366 	.reset      = bar_reset,			\
367 	.release    = bar_release,			\
368 	.u.dw.read  = bar_read,				\
369 	.u.dw.write = rom_write,			\
370 	}
371 
372 static const struct config_field header_0[] = {
373 	CFG_FIELD_BAR(PCI_BASE_ADDRESS_0),
374 	CFG_FIELD_BAR(PCI_BASE_ADDRESS_1),
375 	CFG_FIELD_BAR(PCI_BASE_ADDRESS_2),
376 	CFG_FIELD_BAR(PCI_BASE_ADDRESS_3),
377 	CFG_FIELD_BAR(PCI_BASE_ADDRESS_4),
378 	CFG_FIELD_BAR(PCI_BASE_ADDRESS_5),
379 	CFG_FIELD_ROM(PCI_ROM_ADDRESS),
380 	{}
381 };
382 
383 static const struct config_field header_1[] = {
384 	CFG_FIELD_BAR(PCI_BASE_ADDRESS_0),
385 	CFG_FIELD_BAR(PCI_BASE_ADDRESS_1),
386 	CFG_FIELD_ROM(PCI_ROM_ADDRESS1),
387 	{}
388 };
389 
xen_pcibk_config_header_add_fields(struct pci_dev * dev)390 int xen_pcibk_config_header_add_fields(struct pci_dev *dev)
391 {
392 	int err;
393 
394 	err = xen_pcibk_config_add_fields(dev, header_common);
395 	if (err)
396 		goto out;
397 
398 	switch (dev->hdr_type) {
399 	case PCI_HEADER_TYPE_NORMAL:
400 		err = xen_pcibk_config_add_fields(dev, header_0);
401 		break;
402 
403 	case PCI_HEADER_TYPE_BRIDGE:
404 		err = xen_pcibk_config_add_fields(dev, header_1);
405 		break;
406 
407 	default:
408 		err = -EINVAL;
409 		dev_err(&dev->dev, "Unsupported header type %d!\n",
410 			dev->hdr_type);
411 		break;
412 	}
413 
414 out:
415 	return err;
416 }
417