xref: /openbmc/linux/drivers/misc/cxl/pci.c (revision 8633186209e35dfafc27c3d0f0d5e702ab47265f)
1 /*
2  * Copyright 2014 IBM Corp.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version
7  * 2 of the License, or (at your option) any later version.
8  */
9 
10 #include <linux/pci_regs.h>
11 #include <linux/pci_ids.h>
12 #include <linux/device.h>
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/sort.h>
17 #include <linux/pci.h>
18 #include <linux/of.h>
19 #include <linux/delay.h>
20 #include <asm/opal.h>
21 #include <asm/msi_bitmap.h>
22 #include <asm/pci-bridge.h> /* for struct pci_controller */
23 #include <asm/pnv-pci.h>
24 #include <asm/io.h>
25 
26 #include "cxl.h"
27 #include <misc/cxl.h>
28 
29 
30 #define CXL_PCI_VSEC_ID	0x1280
31 #define CXL_VSEC_MIN_SIZE 0x80
32 
33 #define CXL_READ_VSEC_LENGTH(dev, vsec, dest)			\
34 	{							\
35 		pci_read_config_word(dev, vsec + 0x6, dest);	\
36 		*dest >>= 4;					\
37 	}
38 #define CXL_READ_VSEC_NAFUS(dev, vsec, dest) \
39 	pci_read_config_byte(dev, vsec + 0x8, dest)
40 
41 #define CXL_READ_VSEC_STATUS(dev, vsec, dest) \
42 	pci_read_config_byte(dev, vsec + 0x9, dest)
43 #define CXL_STATUS_SECOND_PORT  0x80
44 #define CXL_STATUS_MSI_X_FULL   0x40
45 #define CXL_STATUS_MSI_X_SINGLE 0x20
46 #define CXL_STATUS_FLASH_RW     0x08
47 #define CXL_STATUS_FLASH_RO     0x04
48 #define CXL_STATUS_LOADABLE_AFU 0x02
49 #define CXL_STATUS_LOADABLE_PSL 0x01
50 /* If we see these features we won't try to use the card */
51 #define CXL_UNSUPPORTED_FEATURES \
52 	(CXL_STATUS_MSI_X_FULL | CXL_STATUS_MSI_X_SINGLE)
53 
54 #define CXL_READ_VSEC_MODE_CONTROL(dev, vsec, dest) \
55 	pci_read_config_byte(dev, vsec + 0xa, dest)
56 #define CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val) \
57 	pci_write_config_byte(dev, vsec + 0xa, val)
58 #define CXL_VSEC_PROTOCOL_MASK   0xe0
59 #define CXL_VSEC_PROTOCOL_1024TB 0x80
60 #define CXL_VSEC_PROTOCOL_512TB  0x40
61 #define CXL_VSEC_PROTOCOL_256TB  0x20 /* Power 8 uses this */
62 #define CXL_VSEC_PROTOCOL_ENABLE 0x01
63 
64 #define CXL_READ_VSEC_PSL_REVISION(dev, vsec, dest) \
65 	pci_read_config_word(dev, vsec + 0xc, dest)
66 #define CXL_READ_VSEC_CAIA_MINOR(dev, vsec, dest) \
67 	pci_read_config_byte(dev, vsec + 0xe, dest)
68 #define CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, dest) \
69 	pci_read_config_byte(dev, vsec + 0xf, dest)
70 #define CXL_READ_VSEC_BASE_IMAGE(dev, vsec, dest) \
71 	pci_read_config_word(dev, vsec + 0x10, dest)
72 
73 #define CXL_READ_VSEC_IMAGE_STATE(dev, vsec, dest) \
74 	pci_read_config_byte(dev, vsec + 0x13, dest)
75 #define CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, val) \
76 	pci_write_config_byte(dev, vsec + 0x13, val)
77 #define CXL_VSEC_USER_IMAGE_LOADED 0x80 /* RO */
78 #define CXL_VSEC_PERST_LOADS_IMAGE 0x20 /* RW */
79 #define CXL_VSEC_PERST_SELECT_USER 0x10 /* RW */
80 
81 #define CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, dest) \
82 	pci_read_config_dword(dev, vsec + 0x20, dest)
83 #define CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, dest) \
84 	pci_read_config_dword(dev, vsec + 0x24, dest)
85 #define CXL_READ_VSEC_PS_OFF(dev, vsec, dest) \
86 	pci_read_config_dword(dev, vsec + 0x28, dest)
87 #define CXL_READ_VSEC_PS_SIZE(dev, vsec, dest) \
88 	pci_read_config_dword(dev, vsec + 0x2c, dest)
89 
90 
91 /* This works a little different than the p1/p2 register accesses to make it
92  * easier to pull out individual fields */
93 #define AFUD_READ(afu, off)		in_be64(afu->afu_desc_mmio + off)
94 #define AFUD_READ_LE(afu, off)		in_le64(afu->afu_desc_mmio + off)
95 #define EXTRACT_PPC_BIT(val, bit)	(!!(val & PPC_BIT(bit)))
96 #define EXTRACT_PPC_BITS(val, bs, be)	((val & PPC_BITMASK(bs, be)) >> PPC_BITLSHIFT(be))
97 
98 #define AFUD_READ_INFO(afu)		AFUD_READ(afu, 0x0)
99 #define   AFUD_NUM_INTS_PER_PROC(val)	EXTRACT_PPC_BITS(val,  0, 15)
100 #define   AFUD_NUM_PROCS(val)		EXTRACT_PPC_BITS(val, 16, 31)
101 #define   AFUD_NUM_CRS(val)		EXTRACT_PPC_BITS(val, 32, 47)
102 #define   AFUD_MULTIMODE(val)		EXTRACT_PPC_BIT(val, 48)
103 #define   AFUD_PUSH_BLOCK_TRANSFER(val)	EXTRACT_PPC_BIT(val, 55)
104 #define   AFUD_DEDICATED_PROCESS(val)	EXTRACT_PPC_BIT(val, 59)
105 #define   AFUD_AFU_DIRECTED(val)	EXTRACT_PPC_BIT(val, 61)
106 #define   AFUD_TIME_SLICED(val)		EXTRACT_PPC_BIT(val, 63)
107 #define AFUD_READ_CR(afu)		AFUD_READ(afu, 0x20)
108 #define   AFUD_CR_LEN(val)		EXTRACT_PPC_BITS(val, 8, 63)
109 #define AFUD_READ_CR_OFF(afu)		AFUD_READ(afu, 0x28)
110 #define AFUD_READ_PPPSA(afu)		AFUD_READ(afu, 0x30)
111 #define   AFUD_PPPSA_PP(val)		EXTRACT_PPC_BIT(val, 6)
112 #define   AFUD_PPPSA_PSA(val)		EXTRACT_PPC_BIT(val, 7)
113 #define   AFUD_PPPSA_LEN(val)		EXTRACT_PPC_BITS(val, 8, 63)
114 #define AFUD_READ_PPPSA_OFF(afu)	AFUD_READ(afu, 0x38)
115 #define AFUD_READ_EB(afu)		AFUD_READ(afu, 0x40)
116 #define   AFUD_EB_LEN(val)		EXTRACT_PPC_BITS(val, 8, 63)
117 #define AFUD_READ_EB_OFF(afu)		AFUD_READ(afu, 0x48)
118 
119 u16 cxl_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off)
120 {
121 	u64 aligned_off = off & ~0x3L;
122 	u32 val;
123 
124 	val = cxl_afu_cr_read32(afu, cr, aligned_off);
125 	return (val >> ((off & 0x2) * 8)) & 0xffff;
126 }
127 
128 u8 cxl_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off)
129 {
130 	u64 aligned_off = off & ~0x3L;
131 	u32 val;
132 
133 	val = cxl_afu_cr_read32(afu, cr, aligned_off);
134 	return (val >> ((off & 0x3) * 8)) & 0xff;
135 }
136 
137 static const struct pci_device_id cxl_pci_tbl[] = {
138 	{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), },
139 	{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), },
140 	{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x04cf), },
141 	{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0601), },
142 	{ PCI_DEVICE_CLASS(0x120000, ~0), },
143 
144 	{ }
145 };
146 MODULE_DEVICE_TABLE(pci, cxl_pci_tbl);
147 
148 
149 /*
150  * Mostly using these wrappers to avoid confusion:
151  * priv 1 is BAR2, while priv 2 is BAR0
152  */
153 static inline resource_size_t p1_base(struct pci_dev *dev)
154 {
155 	return pci_resource_start(dev, 2);
156 }
157 
158 static inline resource_size_t p1_size(struct pci_dev *dev)
159 {
160 	return pci_resource_len(dev, 2);
161 }
162 
163 static inline resource_size_t p2_base(struct pci_dev *dev)
164 {
165 	return pci_resource_start(dev, 0);
166 }
167 
168 static inline resource_size_t p2_size(struct pci_dev *dev)
169 {
170 	return pci_resource_len(dev, 0);
171 }
172 
173 static int find_cxl_vsec(struct pci_dev *dev)
174 {
175 	int vsec = 0;
176 	u16 val;
177 
178 	while ((vsec = pci_find_next_ext_capability(dev, vsec, PCI_EXT_CAP_ID_VNDR))) {
179 		pci_read_config_word(dev, vsec + 0x4, &val);
180 		if (val == CXL_PCI_VSEC_ID)
181 			return vsec;
182 	}
183 	return 0;
184 
185 }
186 
187 static void dump_cxl_config_space(struct pci_dev *dev)
188 {
189 	int vsec;
190 	u32 val;
191 
192 	dev_info(&dev->dev, "dump_cxl_config_space\n");
193 
194 	pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &val);
195 	dev_info(&dev->dev, "BAR0: %#.8x\n", val);
196 	pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &val);
197 	dev_info(&dev->dev, "BAR1: %#.8x\n", val);
198 	pci_read_config_dword(dev, PCI_BASE_ADDRESS_2, &val);
199 	dev_info(&dev->dev, "BAR2: %#.8x\n", val);
200 	pci_read_config_dword(dev, PCI_BASE_ADDRESS_3, &val);
201 	dev_info(&dev->dev, "BAR3: %#.8x\n", val);
202 	pci_read_config_dword(dev, PCI_BASE_ADDRESS_4, &val);
203 	dev_info(&dev->dev, "BAR4: %#.8x\n", val);
204 	pci_read_config_dword(dev, PCI_BASE_ADDRESS_5, &val);
205 	dev_info(&dev->dev, "BAR5: %#.8x\n", val);
206 
207 	dev_info(&dev->dev, "p1 regs: %#llx, len: %#llx\n",
208 		p1_base(dev), p1_size(dev));
209 	dev_info(&dev->dev, "p2 regs: %#llx, len: %#llx\n",
210 		p2_base(dev), p2_size(dev));
211 	dev_info(&dev->dev, "BAR 4/5: %#llx, len: %#llx\n",
212 		pci_resource_start(dev, 4), pci_resource_len(dev, 4));
213 
214 	if (!(vsec = find_cxl_vsec(dev)))
215 		return;
216 
217 #define show_reg(name, what) \
218 	dev_info(&dev->dev, "cxl vsec: %30s: %#x\n", name, what)
219 
220 	pci_read_config_dword(dev, vsec + 0x0, &val);
221 	show_reg("Cap ID", (val >> 0) & 0xffff);
222 	show_reg("Cap Ver", (val >> 16) & 0xf);
223 	show_reg("Next Cap Ptr", (val >> 20) & 0xfff);
224 	pci_read_config_dword(dev, vsec + 0x4, &val);
225 	show_reg("VSEC ID", (val >> 0) & 0xffff);
226 	show_reg("VSEC Rev", (val >> 16) & 0xf);
227 	show_reg("VSEC Length",	(val >> 20) & 0xfff);
228 	pci_read_config_dword(dev, vsec + 0x8, &val);
229 	show_reg("Num AFUs", (val >> 0) & 0xff);
230 	show_reg("Status", (val >> 8) & 0xff);
231 	show_reg("Mode Control", (val >> 16) & 0xff);
232 	show_reg("Reserved", (val >> 24) & 0xff);
233 	pci_read_config_dword(dev, vsec + 0xc, &val);
234 	show_reg("PSL Rev", (val >> 0) & 0xffff);
235 	show_reg("CAIA Ver", (val >> 16) & 0xffff);
236 	pci_read_config_dword(dev, vsec + 0x10, &val);
237 	show_reg("Base Image Rev", (val >> 0) & 0xffff);
238 	show_reg("Reserved", (val >> 16) & 0x0fff);
239 	show_reg("Image Control", (val >> 28) & 0x3);
240 	show_reg("Reserved", (val >> 30) & 0x1);
241 	show_reg("Image Loaded", (val >> 31) & 0x1);
242 
243 	pci_read_config_dword(dev, vsec + 0x14, &val);
244 	show_reg("Reserved", val);
245 	pci_read_config_dword(dev, vsec + 0x18, &val);
246 	show_reg("Reserved", val);
247 	pci_read_config_dword(dev, vsec + 0x1c, &val);
248 	show_reg("Reserved", val);
249 
250 	pci_read_config_dword(dev, vsec + 0x20, &val);
251 	show_reg("AFU Descriptor Offset", val);
252 	pci_read_config_dword(dev, vsec + 0x24, &val);
253 	show_reg("AFU Descriptor Size", val);
254 	pci_read_config_dword(dev, vsec + 0x28, &val);
255 	show_reg("Problem State Offset", val);
256 	pci_read_config_dword(dev, vsec + 0x2c, &val);
257 	show_reg("Problem State Size", val);
258 
259 	pci_read_config_dword(dev, vsec + 0x30, &val);
260 	show_reg("Reserved", val);
261 	pci_read_config_dword(dev, vsec + 0x34, &val);
262 	show_reg("Reserved", val);
263 	pci_read_config_dword(dev, vsec + 0x38, &val);
264 	show_reg("Reserved", val);
265 	pci_read_config_dword(dev, vsec + 0x3c, &val);
266 	show_reg("Reserved", val);
267 
268 	pci_read_config_dword(dev, vsec + 0x40, &val);
269 	show_reg("PSL Programming Port", val);
270 	pci_read_config_dword(dev, vsec + 0x44, &val);
271 	show_reg("PSL Programming Control", val);
272 
273 	pci_read_config_dword(dev, vsec + 0x48, &val);
274 	show_reg("Reserved", val);
275 	pci_read_config_dword(dev, vsec + 0x4c, &val);
276 	show_reg("Reserved", val);
277 
278 	pci_read_config_dword(dev, vsec + 0x50, &val);
279 	show_reg("Flash Address Register", val);
280 	pci_read_config_dword(dev, vsec + 0x54, &val);
281 	show_reg("Flash Size Register", val);
282 	pci_read_config_dword(dev, vsec + 0x58, &val);
283 	show_reg("Flash Status/Control Register", val);
284 	pci_read_config_dword(dev, vsec + 0x58, &val);
285 	show_reg("Flash Data Port", val);
286 
287 #undef show_reg
288 }
289 
290 static void dump_afu_descriptor(struct cxl_afu *afu)
291 {
292 	u64 val, afu_cr_num, afu_cr_off, afu_cr_len;
293 	int i;
294 
295 #define show_reg(name, what) \
296 	dev_info(&afu->dev, "afu desc: %30s: %#llx\n", name, what)
297 
298 	val = AFUD_READ_INFO(afu);
299 	show_reg("num_ints_per_process", AFUD_NUM_INTS_PER_PROC(val));
300 	show_reg("num_of_processes", AFUD_NUM_PROCS(val));
301 	show_reg("num_of_afu_CRs", AFUD_NUM_CRS(val));
302 	show_reg("req_prog_mode", val & 0xffffULL);
303 	afu_cr_num = AFUD_NUM_CRS(val);
304 
305 	val = AFUD_READ(afu, 0x8);
306 	show_reg("Reserved", val);
307 	val = AFUD_READ(afu, 0x10);
308 	show_reg("Reserved", val);
309 	val = AFUD_READ(afu, 0x18);
310 	show_reg("Reserved", val);
311 
312 	val = AFUD_READ_CR(afu);
313 	show_reg("Reserved", (val >> (63-7)) & 0xff);
314 	show_reg("AFU_CR_len", AFUD_CR_LEN(val));
315 	afu_cr_len = AFUD_CR_LEN(val) * 256;
316 
317 	val = AFUD_READ_CR_OFF(afu);
318 	afu_cr_off = val;
319 	show_reg("AFU_CR_offset", val);
320 
321 	val = AFUD_READ_PPPSA(afu);
322 	show_reg("PerProcessPSA_control", (val >> (63-7)) & 0xff);
323 	show_reg("PerProcessPSA Length", AFUD_PPPSA_LEN(val));
324 
325 	val = AFUD_READ_PPPSA_OFF(afu);
326 	show_reg("PerProcessPSA_offset", val);
327 
328 	val = AFUD_READ_EB(afu);
329 	show_reg("Reserved", (val >> (63-7)) & 0xff);
330 	show_reg("AFU_EB_len", AFUD_EB_LEN(val));
331 
332 	val = AFUD_READ_EB_OFF(afu);
333 	show_reg("AFU_EB_offset", val);
334 
335 	for (i = 0; i < afu_cr_num; i++) {
336 		val = AFUD_READ_LE(afu, afu_cr_off + i * afu_cr_len);
337 		show_reg("CR Vendor", val & 0xffff);
338 		show_reg("CR Device", (val >> 16) & 0xffff);
339 	}
340 #undef show_reg
341 }
342 
343 static int init_implementation_adapter_regs(struct cxl *adapter, struct pci_dev *dev)
344 {
345 	struct device_node *np;
346 	const __be32 *prop;
347 	u64 psl_dsnctl;
348 	u64 chipid;
349 
350 	if (!(np = pnv_pci_get_phb_node(dev)))
351 		return -ENODEV;
352 
353 	while (np && !(prop = of_get_property(np, "ibm,chip-id", NULL)))
354 		np = of_get_next_parent(np);
355 	if (!np)
356 		return -ENODEV;
357 	chipid = be32_to_cpup(prop);
358 	of_node_put(np);
359 
360 	/* Tell PSL where to route data to */
361 	psl_dsnctl = 0x02E8900002000000ULL | (chipid << (63-5));
362 	cxl_p1_write(adapter, CXL_PSL_DSNDCTL, psl_dsnctl);
363 	cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL);
364 	/* snoop write mask */
365 	cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL);
366 	/* set fir_accum */
367 	cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, 0x0800000000000000ULL);
368 	/* for debugging with trace arrays */
369 	cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL);
370 
371 	return 0;
372 }
373 
374 #define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6))
375 #define _2048_250MHZ_CYCLES 1
376 
377 static int cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
378 {
379 	u64 psl_tb;
380 	int delta;
381 	unsigned int retry = 0;
382 	struct device_node *np;
383 
384 	if (!(np = pnv_pci_get_phb_node(dev)))
385 		return -ENODEV;
386 
387 	/* Do not fail when CAPP timebase sync is not supported by OPAL */
388 	of_node_get(np);
389 	if (! of_get_property(np, "ibm,capp-timebase-sync", NULL)) {
390 		of_node_put(np);
391 		pr_err("PSL: Timebase sync: OPAL support missing\n");
392 		return 0;
393 	}
394 	of_node_put(np);
395 
396 	/*
397 	 * Setup PSL Timebase Control and Status register
398 	 * with the recommended Timebase Sync Count value
399 	 */
400 	cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT,
401 		     TBSYNC_CNT(2 * _2048_250MHZ_CYCLES));
402 
403 	/* Enable PSL Timebase */
404 	cxl_p1_write(adapter, CXL_PSL_Control, 0x0000000000000000);
405 	cxl_p1_write(adapter, CXL_PSL_Control, CXL_PSL_Control_tb);
406 
407 	/* Wait until CORE TB and PSL TB difference <= 16usecs */
408 	do {
409 		msleep(1);
410 		if (retry++ > 5) {
411 			pr_err("PSL: Timebase sync: giving up!\n");
412 			return -EIO;
413 		}
414 		psl_tb = cxl_p1_read(adapter, CXL_PSL_Timebase);
415 		delta = mftb() - psl_tb;
416 		if (delta < 0)
417 			delta = -delta;
418 	} while (cputime_to_usecs(delta) > 16);
419 
420 	return 0;
421 }
422 
423 static int init_implementation_afu_regs(struct cxl_afu *afu)
424 {
425 	/* read/write masks for this slice */
426 	cxl_p1n_write(afu, CXL_PSL_APCALLOC_A, 0xFFFFFFFEFEFEFEFEULL);
427 	/* APC read/write masks for this slice */
428 	cxl_p1n_write(afu, CXL_PSL_COALLOC_A, 0xFF000000FEFEFEFEULL);
429 	/* for debugging with trace arrays */
430 	cxl_p1n_write(afu, CXL_PSL_SLICE_TRACE, 0x0000FFFF00000000ULL);
431 	cxl_p1n_write(afu, CXL_PSL_RXCTL_A, CXL_PSL_RXCTL_AFUHP_4S);
432 
433 	return 0;
434 }
435 
436 int cxl_setup_irq(struct cxl *adapter, unsigned int hwirq,
437 			 unsigned int virq)
438 {
439 	struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
440 
441 	return pnv_cxl_ioda_msi_setup(dev, hwirq, virq);
442 }
443 
444 int cxl_update_image_control(struct cxl *adapter)
445 {
446 	struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
447 	int rc;
448 	int vsec;
449 	u8 image_state;
450 
451 	if (!(vsec = find_cxl_vsec(dev))) {
452 		dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
453 		return -ENODEV;
454 	}
455 
456 	if ((rc = CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state))) {
457 		dev_err(&dev->dev, "failed to read image state: %i\n", rc);
458 		return rc;
459 	}
460 
461 	if (adapter->perst_loads_image)
462 		image_state |= CXL_VSEC_PERST_LOADS_IMAGE;
463 	else
464 		image_state &= ~CXL_VSEC_PERST_LOADS_IMAGE;
465 
466 	if (adapter->perst_select_user)
467 		image_state |= CXL_VSEC_PERST_SELECT_USER;
468 	else
469 		image_state &= ~CXL_VSEC_PERST_SELECT_USER;
470 
471 	if ((rc = CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, image_state))) {
472 		dev_err(&dev->dev, "failed to update image control: %i\n", rc);
473 		return rc;
474 	}
475 
476 	return 0;
477 }
478 
479 int cxl_alloc_one_irq(struct cxl *adapter)
480 {
481 	struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
482 
483 	return pnv_cxl_alloc_hwirqs(dev, 1);
484 }
485 
486 void cxl_release_one_irq(struct cxl *adapter, int hwirq)
487 {
488 	struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
489 
490 	return pnv_cxl_release_hwirqs(dev, hwirq, 1);
491 }
492 
493 int cxl_alloc_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter, unsigned int num)
494 {
495 	struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
496 
497 	return pnv_cxl_alloc_hwirq_ranges(irqs, dev, num);
498 }
499 
500 void cxl_release_irq_ranges(struct cxl_irq_ranges *irqs, struct cxl *adapter)
501 {
502 	struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
503 
504 	pnv_cxl_release_hwirq_ranges(irqs, dev);
505 }
506 
507 static int setup_cxl_bars(struct pci_dev *dev)
508 {
509 	/* Safety check in case we get backported to < 3.17 without M64 */
510 	if ((p1_base(dev) < 0x100000000ULL) ||
511 	    (p2_base(dev) < 0x100000000ULL)) {
512 		dev_err(&dev->dev, "ABORTING: M32 BAR assignment incompatible with CXL\n");
513 		return -ENODEV;
514 	}
515 
516 	/*
517 	 * BAR 4/5 has a special meaning for CXL and must be programmed with a
518 	 * special value corresponding to the CXL protocol address range.
519 	 * For POWER 8 that means bits 48:49 must be set to 10
520 	 */
521 	pci_write_config_dword(dev, PCI_BASE_ADDRESS_4, 0x00000000);
522 	pci_write_config_dword(dev, PCI_BASE_ADDRESS_5, 0x00020000);
523 
524 	return 0;
525 }
526 
527 /* pciex node: ibm,opal-m64-window = <0x3d058 0x0 0x3d058 0x0 0x8 0x0>; */
528 static int switch_card_to_cxl(struct pci_dev *dev)
529 {
530 	int vsec;
531 	u8 val;
532 	int rc;
533 
534 	dev_info(&dev->dev, "switch card to CXL\n");
535 
536 	if (!(vsec = find_cxl_vsec(dev))) {
537 		dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
538 		return -ENODEV;
539 	}
540 
541 	if ((rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val))) {
542 		dev_err(&dev->dev, "failed to read current mode control: %i", rc);
543 		return rc;
544 	}
545 	val &= ~CXL_VSEC_PROTOCOL_MASK;
546 	val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE;
547 	if ((rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val))) {
548 		dev_err(&dev->dev, "failed to enable CXL protocol: %i", rc);
549 		return rc;
550 	}
551 	/*
552 	 * The CAIA spec (v0.12 11.6 Bi-modal Device Support) states
553 	 * we must wait 100ms after this mode switch before touching
554 	 * PCIe config space.
555 	 */
556 	msleep(100);
557 
558 	return 0;
559 }
560 
561 static int cxl_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
562 {
563 	u64 p1n_base, p2n_base, afu_desc;
564 	const u64 p1n_size = 0x100;
565 	const u64 p2n_size = 0x1000;
566 
567 	p1n_base = p1_base(dev) + 0x10000 + (afu->slice * p1n_size);
568 	p2n_base = p2_base(dev) + (afu->slice * p2n_size);
569 	afu->psn_phys = p2_base(dev) + (adapter->ps_off + (afu->slice * adapter->ps_size));
570 	afu_desc = p2_base(dev) + adapter->afu_desc_off + (afu->slice * adapter->afu_desc_size);
571 
572 	if (!(afu->p1n_mmio = ioremap(p1n_base, p1n_size)))
573 		goto err;
574 	if (!(afu->p2n_mmio = ioremap(p2n_base, p2n_size)))
575 		goto err1;
576 	if (afu_desc) {
577 		if (!(afu->afu_desc_mmio = ioremap(afu_desc, adapter->afu_desc_size)))
578 			goto err2;
579 	}
580 
581 	return 0;
582 err2:
583 	iounmap(afu->p2n_mmio);
584 err1:
585 	iounmap(afu->p1n_mmio);
586 err:
587 	dev_err(&afu->dev, "Error mapping AFU MMIO regions\n");
588 	return -ENOMEM;
589 }
590 
591 static void cxl_unmap_slice_regs(struct cxl_afu *afu)
592 {
593 	if (afu->p2n_mmio) {
594 		iounmap(afu->p2n_mmio);
595 		afu->p2n_mmio = NULL;
596 	}
597 	if (afu->p1n_mmio) {
598 		iounmap(afu->p1n_mmio);
599 		afu->p1n_mmio = NULL;
600 	}
601 	if (afu->afu_desc_mmio) {
602 		iounmap(afu->afu_desc_mmio);
603 		afu->afu_desc_mmio = NULL;
604 	}
605 }
606 
607 void cxl_release_afu(struct device *dev)
608 {
609 	struct cxl_afu *afu = to_cxl_afu(dev);
610 
611 	pr_devel("cxl_release_afu\n");
612 
613 	idr_destroy(&afu->contexts_idr);
614 	cxl_release_spa(afu);
615 
616 	kfree(afu);
617 }
618 
619 /* Expects AFU struct to have recently been zeroed out */
620 static int cxl_read_afu_descriptor(struct cxl_afu *afu)
621 {
622 	u64 val;
623 
624 	val = AFUD_READ_INFO(afu);
625 	afu->pp_irqs = AFUD_NUM_INTS_PER_PROC(val);
626 	afu->max_procs_virtualised = AFUD_NUM_PROCS(val);
627 	afu->crs_num = AFUD_NUM_CRS(val);
628 
629 	if (AFUD_AFU_DIRECTED(val))
630 		afu->modes_supported |= CXL_MODE_DIRECTED;
631 	if (AFUD_DEDICATED_PROCESS(val))
632 		afu->modes_supported |= CXL_MODE_DEDICATED;
633 	if (AFUD_TIME_SLICED(val))
634 		afu->modes_supported |= CXL_MODE_TIME_SLICED;
635 
636 	val = AFUD_READ_PPPSA(afu);
637 	afu->pp_size = AFUD_PPPSA_LEN(val) * 4096;
638 	afu->psa = AFUD_PPPSA_PSA(val);
639 	if ((afu->pp_psa = AFUD_PPPSA_PP(val)))
640 		afu->pp_offset = AFUD_READ_PPPSA_OFF(afu);
641 
642 	val = AFUD_READ_CR(afu);
643 	afu->crs_len = AFUD_CR_LEN(val) * 256;
644 	afu->crs_offset = AFUD_READ_CR_OFF(afu);
645 
646 
647 	/* eb_len is in multiple of 4K */
648 	afu->eb_len = AFUD_EB_LEN(AFUD_READ_EB(afu)) * 4096;
649 	afu->eb_offset = AFUD_READ_EB_OFF(afu);
650 
651 	/* eb_off is 4K aligned so lower 12 bits are always zero */
652 	if (EXTRACT_PPC_BITS(afu->eb_offset, 0, 11) != 0) {
653 		dev_warn(&afu->dev,
654 			 "Invalid AFU error buffer offset %Lx\n",
655 			 afu->eb_offset);
656 		dev_info(&afu->dev,
657 			 "Ignoring AFU error buffer in the descriptor\n");
658 		/* indicate that no afu buffer exists */
659 		afu->eb_len = 0;
660 	}
661 
662 	return 0;
663 }
664 
665 static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu)
666 {
667 	int i;
668 
669 	if (afu->psa && afu->adapter->ps_size <
670 			(afu->pp_offset + afu->pp_size*afu->max_procs_virtualised)) {
671 		dev_err(&afu->dev, "per-process PSA can't fit inside the PSA!\n");
672 		return -ENODEV;
673 	}
674 
675 	if (afu->pp_psa && (afu->pp_size < PAGE_SIZE))
676 		dev_warn(&afu->dev, "AFU uses < PAGE_SIZE per-process PSA!");
677 
678 	for (i = 0; i < afu->crs_num; i++) {
679 		if ((cxl_afu_cr_read32(afu, i, 0) == 0)) {
680 			dev_err(&afu->dev, "ABORTING: AFU configuration record %i is invalid\n", i);
681 			return -EINVAL;
682 		}
683 	}
684 
685 	return 0;
686 }
687 
688 static int sanitise_afu_regs(struct cxl_afu *afu)
689 {
690 	u64 reg;
691 
692 	/*
693 	 * Clear out any regs that contain either an IVTE or address or may be
694 	 * waiting on an acknowledgement to try to be a bit safer as we bring
695 	 * it online
696 	 */
697 	reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
698 	if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
699 		dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg);
700 		if (__cxl_afu_reset(afu))
701 			return -EIO;
702 		if (cxl_afu_disable(afu))
703 			return -EIO;
704 		if (cxl_psl_purge(afu))
705 			return -EIO;
706 	}
707 	cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0x0000000000000000);
708 	cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, 0x0000000000000000);
709 	cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, 0x0000000000000000);
710 	cxl_p1n_write(afu, CXL_PSL_AMBAR_An, 0x0000000000000000);
711 	cxl_p1n_write(afu, CXL_PSL_SPOffset_An, 0x0000000000000000);
712 	cxl_p1n_write(afu, CXL_HAURP_An, 0x0000000000000000);
713 	cxl_p2n_write(afu, CXL_CSRP_An, 0x0000000000000000);
714 	cxl_p2n_write(afu, CXL_AURP1_An, 0x0000000000000000);
715 	cxl_p2n_write(afu, CXL_AURP0_An, 0x0000000000000000);
716 	cxl_p2n_write(afu, CXL_SSTP1_An, 0x0000000000000000);
717 	cxl_p2n_write(afu, CXL_SSTP0_An, 0x0000000000000000);
718 	reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
719 	if (reg) {
720 		dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg);
721 		if (reg & CXL_PSL_DSISR_TRANS)
722 			cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
723 		else
724 			cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
725 	}
726 	reg = cxl_p1n_read(afu, CXL_PSL_SERR_An);
727 	if (reg) {
728 		if (reg & ~0xffff)
729 			dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg);
730 		cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff);
731 	}
732 	reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
733 	if (reg) {
734 		dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg);
735 		cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg);
736 	}
737 
738 	return 0;
739 }
740 
741 #define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE
742 /*
743  * afu_eb_read:
744  * Called from sysfs and reads the afu error info buffer. The h/w only supports
745  * 4/8 bytes aligned access. So in case the requested offset/count arent 8 byte
746  * aligned the function uses a bounce buffer which can be max PAGE_SIZE.
747  */
748 ssize_t cxl_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
749 				loff_t off, size_t count)
750 {
751 	loff_t aligned_start, aligned_end;
752 	size_t aligned_length;
753 	void *tbuf;
754 	const void __iomem *ebuf = afu->afu_desc_mmio + afu->eb_offset;
755 
756 	if (count == 0 || off < 0 || (size_t)off >= afu->eb_len)
757 		return 0;
758 
759 	/* calculate aligned read window */
760 	count = min((size_t)(afu->eb_len - off), count);
761 	aligned_start = round_down(off, 8);
762 	aligned_end = round_up(off + count, 8);
763 	aligned_length = aligned_end - aligned_start;
764 
765 	/* max we can copy in one read is PAGE_SIZE */
766 	if (aligned_length > ERR_BUFF_MAX_COPY_SIZE) {
767 		aligned_length = ERR_BUFF_MAX_COPY_SIZE;
768 		count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7);
769 	}
770 
771 	/* use bounce buffer for copy */
772 	tbuf = (void *)__get_free_page(GFP_TEMPORARY);
773 	if (!tbuf)
774 		return -ENOMEM;
775 
776 	/* perform aligned read from the mmio region */
777 	memcpy_fromio(tbuf, ebuf + aligned_start, aligned_length);
778 	memcpy(buf, tbuf + (off & 0x7), count);
779 
780 	free_page((unsigned long)tbuf);
781 
782 	return count;
783 }
784 
785 static int cxl_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
786 {
787 	int rc;
788 
789 	if ((rc = cxl_map_slice_regs(afu, adapter, dev)))
790 		return rc;
791 
792 	if ((rc = sanitise_afu_regs(afu)))
793 		goto err1;
794 
795 	/* We need to reset the AFU before we can read the AFU descriptor */
796 	if ((rc = __cxl_afu_reset(afu)))
797 		goto err1;
798 
799 	if (cxl_verbose)
800 		dump_afu_descriptor(afu);
801 
802 	if ((rc = cxl_read_afu_descriptor(afu)))
803 		goto err1;
804 
805 	if ((rc = cxl_afu_descriptor_looks_ok(afu)))
806 		goto err1;
807 
808 	if ((rc = init_implementation_afu_regs(afu)))
809 		goto err1;
810 
811 	if ((rc = cxl_register_serr_irq(afu)))
812 		goto err1;
813 
814 	if ((rc = cxl_register_psl_irq(afu)))
815 		goto err2;
816 
817 	return 0;
818 
819 err2:
820 	cxl_release_serr_irq(afu);
821 err1:
822 	cxl_unmap_slice_regs(afu);
823 	return rc;
824 }
825 
826 static void cxl_deconfigure_afu(struct cxl_afu *afu)
827 {
828 	cxl_release_psl_irq(afu);
829 	cxl_release_serr_irq(afu);
830 	cxl_unmap_slice_regs(afu);
831 }
832 
833 static int cxl_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
834 {
835 	struct cxl_afu *afu;
836 	int rc;
837 
838 	afu = cxl_alloc_afu(adapter, slice);
839 	if (!afu)
840 		return -ENOMEM;
841 
842 	rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice);
843 	if (rc)
844 		goto err_free;
845 
846 	rc = cxl_configure_afu(afu, adapter, dev);
847 	if (rc)
848 		goto err_free;
849 
850 	/* Don't care if this fails */
851 	cxl_debugfs_afu_add(afu);
852 
853 	/*
854 	 * After we call this function we must not free the afu directly, even
855 	 * if it returns an error!
856 	 */
857 	if ((rc = cxl_register_afu(afu)))
858 		goto err_put1;
859 
860 	if ((rc = cxl_sysfs_afu_add(afu)))
861 		goto err_put1;
862 
863 	adapter->afu[afu->slice] = afu;
864 
865 	if ((rc = cxl_pci_vphb_add(afu)))
866 		dev_info(&afu->dev, "Can't register vPHB\n");
867 
868 	return 0;
869 
870 err_put1:
871 	cxl_deconfigure_afu(afu);
872 	cxl_debugfs_afu_remove(afu);
873 	device_unregister(&afu->dev);
874 	return rc;
875 
876 err_free:
877 	kfree(afu);
878 	return rc;
879 
880 }
881 
882 static void cxl_remove_afu(struct cxl_afu *afu)
883 {
884 	pr_devel("cxl_remove_afu\n");
885 
886 	if (!afu)
887 		return;
888 
889 	cxl_sysfs_afu_remove(afu);
890 	cxl_debugfs_afu_remove(afu);
891 
892 	spin_lock(&afu->adapter->afu_list_lock);
893 	afu->adapter->afu[afu->slice] = NULL;
894 	spin_unlock(&afu->adapter->afu_list_lock);
895 
896 	cxl_context_detach_all(afu);
897 	cxl_afu_deactivate_mode(afu);
898 
899 	cxl_deconfigure_afu(afu);
900 	device_unregister(&afu->dev);
901 }
902 
903 int cxl_reset(struct cxl *adapter)
904 {
905 	struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
906 	int rc;
907 
908 	if (adapter->perst_same_image) {
909 		dev_warn(&dev->dev,
910 			 "cxl: refusing to reset/reflash when perst_reloads_same_image is set.\n");
911 		return -EINVAL;
912 	}
913 
914 	dev_info(&dev->dev, "CXL reset\n");
915 
916 	/* pcie_warm_reset requests a fundamental pci reset which includes a
917 	 * PERST assert/deassert.  PERST triggers a loading of the image
918 	 * if "user" or "factory" is selected in sysfs */
919 	if ((rc = pci_set_pcie_reset_state(dev, pcie_warm_reset))) {
920 		dev_err(&dev->dev, "cxl: pcie_warm_reset failed\n");
921 		return rc;
922 	}
923 
924 	return rc;
925 }
926 
927 static int cxl_map_adapter_regs(struct cxl *adapter, struct pci_dev *dev)
928 {
929 	if (pci_request_region(dev, 2, "priv 2 regs"))
930 		goto err1;
931 	if (pci_request_region(dev, 0, "priv 1 regs"))
932 		goto err2;
933 
934 	pr_devel("cxl_map_adapter_regs: p1: %#016llx %#llx, p2: %#016llx %#llx",
935 			p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev));
936 
937 	if (!(adapter->p1_mmio = ioremap(p1_base(dev), p1_size(dev))))
938 		goto err3;
939 
940 	if (!(adapter->p2_mmio = ioremap(p2_base(dev), p2_size(dev))))
941 		goto err4;
942 
943 	return 0;
944 
945 err4:
946 	iounmap(adapter->p1_mmio);
947 	adapter->p1_mmio = NULL;
948 err3:
949 	pci_release_region(dev, 0);
950 err2:
951 	pci_release_region(dev, 2);
952 err1:
953 	return -ENOMEM;
954 }
955 
956 static void cxl_unmap_adapter_regs(struct cxl *adapter)
957 {
958 	if (adapter->p1_mmio) {
959 		iounmap(adapter->p1_mmio);
960 		adapter->p1_mmio = NULL;
961 		pci_release_region(to_pci_dev(adapter->dev.parent), 2);
962 	}
963 	if (adapter->p2_mmio) {
964 		iounmap(adapter->p2_mmio);
965 		adapter->p2_mmio = NULL;
966 		pci_release_region(to_pci_dev(adapter->dev.parent), 0);
967 	}
968 }
969 
970 static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
971 {
972 	int vsec;
973 	u32 afu_desc_off, afu_desc_size;
974 	u32 ps_off, ps_size;
975 	u16 vseclen;
976 	u8 image_state;
977 
978 	if (!(vsec = find_cxl_vsec(dev))) {
979 		dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
980 		return -ENODEV;
981 	}
982 
983 	CXL_READ_VSEC_LENGTH(dev, vsec, &vseclen);
984 	if (vseclen < CXL_VSEC_MIN_SIZE) {
985 		dev_err(&dev->dev, "ABORTING: CXL VSEC too short\n");
986 		return -EINVAL;
987 	}
988 
989 	CXL_READ_VSEC_STATUS(dev, vsec, &adapter->vsec_status);
990 	CXL_READ_VSEC_PSL_REVISION(dev, vsec, &adapter->psl_rev);
991 	CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, &adapter->caia_major);
992 	CXL_READ_VSEC_CAIA_MINOR(dev, vsec, &adapter->caia_minor);
993 	CXL_READ_VSEC_BASE_IMAGE(dev, vsec, &adapter->base_image);
994 	CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state);
995 	adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
996 	adapter->perst_select_user = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
997 
998 	CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices);
999 	CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, &afu_desc_off);
1000 	CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, &afu_desc_size);
1001 	CXL_READ_VSEC_PS_OFF(dev, vsec, &ps_off);
1002 	CXL_READ_VSEC_PS_SIZE(dev, vsec, &ps_size);
1003 
1004 	/* Convert everything to bytes, because there is NO WAY I'd look at the
1005 	 * code a month later and forget what units these are in ;-) */
1006 	adapter->ps_off = ps_off * 64 * 1024;
1007 	adapter->ps_size = ps_size * 64 * 1024;
1008 	adapter->afu_desc_off = afu_desc_off * 64 * 1024;
1009 	adapter->afu_desc_size = afu_desc_size *64 * 1024;
1010 
1011 	/* Total IRQs - 1 PSL ERROR - #AFU*(1 slice error + 1 DSI) */
1012 	adapter->user_irqs = pnv_cxl_get_irq_count(dev) - 1 - 2*adapter->slices;
1013 
1014 	return 0;
1015 }
1016 
1017 /*
1018  * Workaround a PCIe Host Bridge defect on some cards, that can cause
1019  * malformed Transaction Layer Packet (TLP) errors to be erroneously
1020  * reported. Mask this error in the Uncorrectable Error Mask Register.
1021  *
1022  * The upper nibble of the PSL revision is used to distinguish between
1023  * different cards. The affected ones have it set to 0.
1024  */
1025 static void cxl_fixup_malformed_tlp(struct cxl *adapter, struct pci_dev *dev)
1026 {
1027 	int aer;
1028 	u32 data;
1029 
1030 	if (adapter->psl_rev & 0xf000)
1031 		return;
1032 	if (!(aer = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)))
1033 		return;
1034 	pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, &data);
1035 	if (data & PCI_ERR_UNC_MALF_TLP)
1036 		if (data & PCI_ERR_UNC_INTN)
1037 			return;
1038 	data |= PCI_ERR_UNC_MALF_TLP;
1039 	data |= PCI_ERR_UNC_INTN;
1040 	pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, data);
1041 }
1042 
1043 static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev)
1044 {
1045 	if (adapter->vsec_status & CXL_STATUS_SECOND_PORT)
1046 		return -EBUSY;
1047 
1048 	if (adapter->vsec_status & CXL_UNSUPPORTED_FEATURES) {
1049 		dev_err(&dev->dev, "ABORTING: CXL requires unsupported features\n");
1050 		return -EINVAL;
1051 	}
1052 
1053 	if (!adapter->slices) {
1054 		/* Once we support dynamic reprogramming we can use the card if
1055 		 * it supports loadable AFUs */
1056 		dev_err(&dev->dev, "ABORTING: Device has no AFUs\n");
1057 		return -EINVAL;
1058 	}
1059 
1060 	if (!adapter->afu_desc_off || !adapter->afu_desc_size) {
1061 		dev_err(&dev->dev, "ABORTING: VSEC shows no AFU descriptors\n");
1062 		return -EINVAL;
1063 	}
1064 
1065 	if (adapter->ps_size > p2_size(dev) - adapter->ps_off) {
1066 		dev_err(&dev->dev, "ABORTING: Problem state size larger than "
1067 				   "available in BAR2: 0x%llx > 0x%llx\n",
1068 			 adapter->ps_size, p2_size(dev) - adapter->ps_off);
1069 		return -EINVAL;
1070 	}
1071 
1072 	return 0;
1073 }
1074 
1075 static void cxl_release_adapter(struct device *dev)
1076 {
1077 	struct cxl *adapter = to_cxl_adapter(dev);
1078 
1079 	pr_devel("cxl_release_adapter\n");
1080 
1081 	cxl_remove_adapter_nr(adapter);
1082 
1083 	kfree(adapter);
1084 }
1085 
1086 #define CXL_PSL_ErrIVTE_tberror (0x1ull << (63-31))
1087 
1088 static int sanitise_adapter_regs(struct cxl *adapter)
1089 {
1090 	/* Clear PSL tberror bit by writing 1 to it */
1091 	cxl_p1_write(adapter, CXL_PSL_ErrIVTE, CXL_PSL_ErrIVTE_tberror);
1092 	return cxl_tlb_slb_invalidate(adapter);
1093 }
1094 
1095 /* This should contain *only* operations that can safely be done in
1096  * both creation and recovery.
1097  */
1098 static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
1099 {
1100 	int rc;
1101 
1102 	adapter->dev.parent = &dev->dev;
1103 	adapter->dev.release = cxl_release_adapter;
1104 	pci_set_drvdata(dev, adapter);
1105 
1106 	rc = pci_enable_device(dev);
1107 	if (rc) {
1108 		dev_err(&dev->dev, "pci_enable_device failed: %i\n", rc);
1109 		return rc;
1110 	}
1111 
1112 	if ((rc = cxl_read_vsec(adapter, dev)))
1113 		return rc;
1114 
1115 	if ((rc = cxl_vsec_looks_ok(adapter, dev)))
1116 	        return rc;
1117 
1118 	cxl_fixup_malformed_tlp(adapter, dev);
1119 
1120 	if ((rc = setup_cxl_bars(dev)))
1121 		return rc;
1122 
1123 	if ((rc = switch_card_to_cxl(dev)))
1124 		return rc;
1125 
1126 	if ((rc = cxl_update_image_control(adapter)))
1127 		return rc;
1128 
1129 	if ((rc = cxl_map_adapter_regs(adapter, dev)))
1130 		return rc;
1131 
1132 	if ((rc = sanitise_adapter_regs(adapter)))
1133 		goto err;
1134 
1135 	if ((rc = init_implementation_adapter_regs(adapter, dev)))
1136 		goto err;
1137 
1138 	if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_CAPI)))
1139 		goto err;
1140 
1141 	/* If recovery happened, the last step is to turn on snooping.
1142 	 * In the non-recovery case this has no effect */
1143 	if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON)))
1144 		goto err;
1145 
1146 	if ((rc = cxl_setup_psl_timebase(adapter, dev)))
1147 		goto err;
1148 
1149 	if ((rc = cxl_register_psl_err_irq(adapter)))
1150 		goto err;
1151 
1152 	return 0;
1153 
1154 err:
1155 	cxl_unmap_adapter_regs(adapter);
1156 	return rc;
1157 
1158 }
1159 
1160 static void cxl_deconfigure_adapter(struct cxl *adapter)
1161 {
1162 	struct pci_dev *pdev = to_pci_dev(adapter->dev.parent);
1163 
1164 	cxl_release_psl_err_irq(adapter);
1165 	cxl_unmap_adapter_regs(adapter);
1166 
1167 	pci_disable_device(pdev);
1168 }
1169 
1170 static struct cxl *cxl_init_adapter(struct pci_dev *dev)
1171 {
1172 	struct cxl *adapter;
1173 	int rc;
1174 
1175 	adapter = cxl_alloc_adapter();
1176 	if (!adapter)
1177 		return ERR_PTR(-ENOMEM);
1178 
1179 	/* Set defaults for parameters which need to persist over
1180 	 * configure/reconfigure
1181 	 */
1182 	adapter->perst_loads_image = true;
1183 	adapter->perst_same_image = false;
1184 
1185 	rc = cxl_configure_adapter(adapter, dev);
1186 	if (rc) {
1187 		pci_disable_device(dev);
1188 		cxl_release_adapter(&adapter->dev);
1189 		return ERR_PTR(rc);
1190 	}
1191 
1192 	/* Don't care if this one fails: */
1193 	cxl_debugfs_adapter_add(adapter);
1194 
1195 	/*
1196 	 * After we call this function we must not free the adapter directly,
1197 	 * even if it returns an error!
1198 	 */
1199 	if ((rc = cxl_register_adapter(adapter)))
1200 		goto err_put1;
1201 
1202 	if ((rc = cxl_sysfs_adapter_add(adapter)))
1203 		goto err_put1;
1204 
1205 	return adapter;
1206 
1207 err_put1:
1208 	/* This should mirror cxl_remove_adapter, except without the
1209 	 * sysfs parts
1210 	 */
1211 	cxl_debugfs_adapter_remove(adapter);
1212 	cxl_deconfigure_adapter(adapter);
1213 	device_unregister(&adapter->dev);
1214 	return ERR_PTR(rc);
1215 }
1216 
1217 static void cxl_remove_adapter(struct cxl *adapter)
1218 {
1219 	pr_devel("cxl_remove_adapter\n");
1220 
1221 	cxl_sysfs_adapter_remove(adapter);
1222 	cxl_debugfs_adapter_remove(adapter);
1223 
1224 	cxl_deconfigure_adapter(adapter);
1225 
1226 	device_unregister(&adapter->dev);
1227 }
1228 
1229 static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
1230 {
1231 	struct cxl *adapter;
1232 	int slice;
1233 	int rc;
1234 
1235 	if (cxl_verbose)
1236 		dump_cxl_config_space(dev);
1237 
1238 	adapter = cxl_init_adapter(dev);
1239 	if (IS_ERR(adapter)) {
1240 		dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter));
1241 		return PTR_ERR(adapter);
1242 	}
1243 
1244 	for (slice = 0; slice < adapter->slices; slice++) {
1245 		if ((rc = cxl_init_afu(adapter, slice, dev))) {
1246 			dev_err(&dev->dev, "AFU %i failed to initialise: %i\n", slice, rc);
1247 			continue;
1248 		}
1249 
1250 		rc = cxl_afu_select_best_mode(adapter->afu[slice]);
1251 		if (rc)
1252 			dev_err(&dev->dev, "AFU %i failed to start: %i\n", slice, rc);
1253 	}
1254 
1255 	return 0;
1256 }
1257 
1258 static void cxl_remove(struct pci_dev *dev)
1259 {
1260 	struct cxl *adapter = pci_get_drvdata(dev);
1261 	struct cxl_afu *afu;
1262 	int i;
1263 
1264 	/*
1265 	 * Lock to prevent someone grabbing a ref through the adapter list as
1266 	 * we are removing it
1267 	 */
1268 	for (i = 0; i < adapter->slices; i++) {
1269 		afu = adapter->afu[i];
1270 		cxl_pci_vphb_remove(afu);
1271 		cxl_remove_afu(afu);
1272 	}
1273 	cxl_remove_adapter(adapter);
1274 }
1275 
1276 static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
1277 						pci_channel_state_t state)
1278 {
1279 	struct pci_dev *afu_dev;
1280 	pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
1281 	pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET;
1282 
1283 	/* There should only be one entry, but go through the list
1284 	 * anyway
1285 	 */
1286 	list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
1287 		if (!afu_dev->driver)
1288 			continue;
1289 
1290 		afu_dev->error_state = state;
1291 
1292 		if (afu_dev->driver->err_handler)
1293 			afu_result = afu_dev->driver->err_handler->error_detected(afu_dev,
1294 										  state);
1295 		/* Disconnect trumps all, NONE trumps NEED_RESET */
1296 		if (afu_result == PCI_ERS_RESULT_DISCONNECT)
1297 			result = PCI_ERS_RESULT_DISCONNECT;
1298 		else if ((afu_result == PCI_ERS_RESULT_NONE) &&
1299 			 (result == PCI_ERS_RESULT_NEED_RESET))
1300 			result = PCI_ERS_RESULT_NONE;
1301 	}
1302 	return result;
1303 }
1304 
1305 static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
1306 					       pci_channel_state_t state)
1307 {
1308 	struct cxl *adapter = pci_get_drvdata(pdev);
1309 	struct cxl_afu *afu;
1310 	pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
1311 	int i;
1312 
1313 	/* At this point, we could still have an interrupt pending.
1314 	 * Let's try to get them out of the way before they do
1315 	 * anything we don't like.
1316 	 */
1317 	schedule();
1318 
1319 	/* If we're permanently dead, give up. */
1320 	if (state == pci_channel_io_perm_failure) {
1321 		/* Tell the AFU drivers; but we don't care what they
1322 		 * say, we're going away.
1323 		 */
1324 		for (i = 0; i < adapter->slices; i++) {
1325 			afu = adapter->afu[i];
1326 			cxl_vphb_error_detected(afu, state);
1327 		}
1328 		return PCI_ERS_RESULT_DISCONNECT;
1329 	}
1330 
1331 	/* Are we reflashing?
1332 	 *
1333 	 * If we reflash, we could come back as something entirely
1334 	 * different, including a non-CAPI card. As such, by default
1335 	 * we don't participate in the process. We'll be unbound and
1336 	 * the slot re-probed. (TODO: check EEH doesn't blindly rebind
1337 	 * us!)
1338 	 *
1339 	 * However, this isn't the entire story: for reliablity
1340 	 * reasons, we usually want to reflash the FPGA on PERST in
1341 	 * order to get back to a more reliable known-good state.
1342 	 *
1343 	 * This causes us a bit of a problem: if we reflash we can't
1344 	 * trust that we'll come back the same - we could have a new
1345 	 * image and been PERSTed in order to load that
1346 	 * image. However, most of the time we actually *will* come
1347 	 * back the same - for example a regular EEH event.
1348 	 *
1349 	 * Therefore, we allow the user to assert that the image is
1350 	 * indeed the same and that we should continue on into EEH
1351 	 * anyway.
1352 	 */
1353 	if (adapter->perst_loads_image && !adapter->perst_same_image) {
1354 		/* TODO take the PHB out of CXL mode */
1355 		dev_info(&pdev->dev, "reflashing, so opting out of EEH!\n");
1356 		return PCI_ERS_RESULT_NONE;
1357 	}
1358 
1359 	/*
1360 	 * At this point, we want to try to recover.  We'll always
1361 	 * need a complete slot reset: we don't trust any other reset.
1362 	 *
1363 	 * Now, we go through each AFU:
1364 	 *  - We send the driver, if bound, an error_detected callback.
1365 	 *    We expect it to clean up, but it can also tell us to give
1366 	 *    up and permanently detach the card. To simplify things, if
1367 	 *    any bound AFU driver doesn't support EEH, we give up on EEH.
1368 	 *
1369 	 *  - We detach all contexts associated with the AFU. This
1370 	 *    does not free them, but puts them into a CLOSED state
1371 	 *    which causes any the associated files to return useful
1372 	 *    errors to userland. It also unmaps, but does not free,
1373 	 *    any IRQs.
1374 	 *
1375 	 *  - We clean up our side: releasing and unmapping resources we hold
1376 	 *    so we can wire them up again when the hardware comes back up.
1377 	 *
1378 	 * Driver authors should note:
1379 	 *
1380 	 *  - Any contexts you create in your kernel driver (except
1381 	 *    those associated with anonymous file descriptors) are
1382 	 *    your responsibility to free and recreate. Likewise with
1383 	 *    any attached resources.
1384 	 *
1385 	 *  - We will take responsibility for re-initialising the
1386 	 *    device context (the one set up for you in
1387 	 *    cxl_pci_enable_device_hook and accessed through
1388 	 *    cxl_get_context). If you've attached IRQs or other
1389 	 *    resources to it, they remains yours to free.
1390 	 *
1391 	 * You can call the same functions to release resources as you
1392 	 * normally would: we make sure that these functions continue
1393 	 * to work when the hardware is down.
1394 	 *
1395 	 * Two examples:
1396 	 *
1397 	 * 1) If you normally free all your resources at the end of
1398 	 *    each request, or if you use anonymous FDs, your
1399 	 *    error_detected callback can simply set a flag to tell
1400 	 *    your driver not to start any new calls. You can then
1401 	 *    clear the flag in the resume callback.
1402 	 *
1403 	 * 2) If you normally allocate your resources on startup:
1404 	 *     * Set a flag in error_detected as above.
1405 	 *     * Let CXL detach your contexts.
1406 	 *     * In slot_reset, free the old resources and allocate new ones.
1407 	 *     * In resume, clear the flag to allow things to start.
1408 	 */
1409 	for (i = 0; i < adapter->slices; i++) {
1410 		afu = adapter->afu[i];
1411 
1412 		result = cxl_vphb_error_detected(afu, state);
1413 
1414 		/* Only continue if everyone agrees on NEED_RESET */
1415 		if (result != PCI_ERS_RESULT_NEED_RESET)
1416 			return result;
1417 
1418 		cxl_context_detach_all(afu);
1419 		cxl_afu_deactivate_mode(afu);
1420 		cxl_deconfigure_afu(afu);
1421 	}
1422 	cxl_deconfigure_adapter(adapter);
1423 
1424 	return result;
1425 }
1426 
1427 static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
1428 {
1429 	struct cxl *adapter = pci_get_drvdata(pdev);
1430 	struct cxl_afu *afu;
1431 	struct cxl_context *ctx;
1432 	struct pci_dev *afu_dev;
1433 	pci_ers_result_t afu_result = PCI_ERS_RESULT_RECOVERED;
1434 	pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED;
1435 	int i;
1436 
1437 	if (cxl_configure_adapter(adapter, pdev))
1438 		goto err;
1439 
1440 	for (i = 0; i < adapter->slices; i++) {
1441 		afu = adapter->afu[i];
1442 
1443 		if (cxl_configure_afu(afu, adapter, pdev))
1444 			goto err;
1445 
1446 		if (cxl_afu_select_best_mode(afu))
1447 			goto err;
1448 
1449 		cxl_pci_vphb_reconfigure(afu);
1450 
1451 		list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
1452 			/* Reset the device context.
1453 			 * TODO: make this less disruptive
1454 			 */
1455 			ctx = cxl_get_context(afu_dev);
1456 
1457 			if (ctx && cxl_release_context(ctx))
1458 				goto err;
1459 
1460 			ctx = cxl_dev_context_init(afu_dev);
1461 			if (!ctx)
1462 				goto err;
1463 
1464 			afu_dev->dev.archdata.cxl_ctx = ctx;
1465 
1466 			if (cxl_afu_check_and_enable(afu))
1467 				goto err;
1468 
1469 			afu_dev->error_state = pci_channel_io_normal;
1470 
1471 			/* If there's a driver attached, allow it to
1472 			 * chime in on recovery. Drivers should check
1473 			 * if everything has come back OK, but
1474 			 * shouldn't start new work until we call
1475 			 * their resume function.
1476 			 */
1477 			if (!afu_dev->driver)
1478 				continue;
1479 
1480 			if (afu_dev->driver->err_handler &&
1481 			    afu_dev->driver->err_handler->slot_reset)
1482 				afu_result = afu_dev->driver->err_handler->slot_reset(afu_dev);
1483 
1484 			if (afu_result == PCI_ERS_RESULT_DISCONNECT)
1485 				result = PCI_ERS_RESULT_DISCONNECT;
1486 		}
1487 	}
1488 	return result;
1489 
1490 err:
1491 	/* All the bits that happen in both error_detected and cxl_remove
1492 	 * should be idempotent, so we don't need to worry about leaving a mix
1493 	 * of unconfigured and reconfigured resources.
1494 	 */
1495 	dev_err(&pdev->dev, "EEH recovery failed. Asking to be disconnected.\n");
1496 	return PCI_ERS_RESULT_DISCONNECT;
1497 }
1498 
1499 static void cxl_pci_resume(struct pci_dev *pdev)
1500 {
1501 	struct cxl *adapter = pci_get_drvdata(pdev);
1502 	struct cxl_afu *afu;
1503 	struct pci_dev *afu_dev;
1504 	int i;
1505 
1506 	/* Everything is back now. Drivers should restart work now.
1507 	 * This is not the place to be checking if everything came back up
1508 	 * properly, because there's no return value: do that in slot_reset.
1509 	 */
1510 	for (i = 0; i < adapter->slices; i++) {
1511 		afu = adapter->afu[i];
1512 
1513 		list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
1514 			if (afu_dev->driver && afu_dev->driver->err_handler &&
1515 			    afu_dev->driver->err_handler->resume)
1516 				afu_dev->driver->err_handler->resume(afu_dev);
1517 		}
1518 	}
1519 }
1520 
1521 static const struct pci_error_handlers cxl_err_handler = {
1522 	.error_detected = cxl_pci_error_detected,
1523 	.slot_reset = cxl_pci_slot_reset,
1524 	.resume = cxl_pci_resume,
1525 };
1526 
1527 struct pci_driver cxl_pci_driver = {
1528 	.name = "cxl-pci",
1529 	.id_table = cxl_pci_tbl,
1530 	.probe = cxl_probe,
1531 	.remove = cxl_remove,
1532 	.shutdown = cxl_remove,
1533 	.err_handler = &cxl_err_handler,
1534 };
1535