xref: /openbmc/linux/drivers/misc/cxl/guest.c (revision 48ca54e3)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright 2015 IBM Corp.
4  */
5 
6 #include <linux/spinlock.h>
7 #include <linux/uaccess.h>
8 #include <linux/delay.h>
9 #include <linux/irqdomain.h>
10 #include <linux/platform_device.h>
11 
12 #include "cxl.h"
13 #include "hcalls.h"
14 #include "trace.h"
15 
16 #define CXL_ERROR_DETECTED_EVENT	1
17 #define CXL_SLOT_RESET_EVENT		2
18 #define CXL_RESUME_EVENT		3
19 
20 static void pci_error_handlers(struct cxl_afu *afu,
21 				int bus_error_event,
22 				pci_channel_state_t state)
23 {
24 	struct pci_dev *afu_dev;
25 	struct pci_driver *afu_drv;
26 	const struct pci_error_handlers *err_handler;
27 
28 	if (afu->phb == NULL)
29 		return;
30 
31 	list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
32 		afu_drv = to_pci_driver(afu_dev->dev.driver);
33 		if (!afu_drv)
34 			continue;
35 
36 		err_handler = afu_drv->err_handler;
37 		switch (bus_error_event) {
38 		case CXL_ERROR_DETECTED_EVENT:
39 			afu_dev->error_state = state;
40 
41 			if (err_handler &&
42 			    err_handler->error_detected)
43 				err_handler->error_detected(afu_dev, state);
44 			break;
45 		case CXL_SLOT_RESET_EVENT:
46 			afu_dev->error_state = state;
47 
48 			if (err_handler &&
49 			    err_handler->slot_reset)
50 				err_handler->slot_reset(afu_dev);
51 			break;
52 		case CXL_RESUME_EVENT:
53 			if (err_handler &&
54 			    err_handler->resume)
55 				err_handler->resume(afu_dev);
56 			break;
57 		}
58 	}
59 }
60 
61 static irqreturn_t guest_handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr,
62 					u64 errstat)
63 {
64 	pr_devel("in %s\n", __func__);
65 	dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat);
66 
67 	return cxl_ops->ack_irq(ctx, 0, errstat);
68 }
69 
70 static ssize_t guest_collect_vpd(struct cxl *adapter, struct cxl_afu *afu,
71 			void *buf, size_t len)
72 {
73 	unsigned int entries, mod;
74 	unsigned long **vpd_buf = NULL;
75 	struct sg_list *le;
76 	int rc = 0, i, tocopy;
77 	u64 out = 0;
78 
79 	if (buf == NULL)
80 		return -EINVAL;
81 
82 	/* number of entries in the list */
83 	entries = len / SG_BUFFER_SIZE;
84 	mod = len % SG_BUFFER_SIZE;
85 	if (mod)
86 		entries++;
87 
88 	if (entries > SG_MAX_ENTRIES) {
89 		entries = SG_MAX_ENTRIES;
90 		len = SG_MAX_ENTRIES * SG_BUFFER_SIZE;
91 		mod = 0;
92 	}
93 
94 	vpd_buf = kcalloc(entries, sizeof(unsigned long *), GFP_KERNEL);
95 	if (!vpd_buf)
96 		return -ENOMEM;
97 
98 	le = (struct sg_list *)get_zeroed_page(GFP_KERNEL);
99 	if (!le) {
100 		rc = -ENOMEM;
101 		goto err1;
102 	}
103 
104 	for (i = 0; i < entries; i++) {
105 		vpd_buf[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL);
106 		if (!vpd_buf[i]) {
107 			rc = -ENOMEM;
108 			goto err2;
109 		}
110 		le[i].phys_addr = cpu_to_be64(virt_to_phys(vpd_buf[i]));
111 		le[i].len = cpu_to_be64(SG_BUFFER_SIZE);
112 		if ((i == (entries - 1)) && mod)
113 			le[i].len = cpu_to_be64(mod);
114 	}
115 
116 	if (adapter)
117 		rc = cxl_h_collect_vpd_adapter(adapter->guest->handle,
118 					virt_to_phys(le), entries, &out);
119 	else
120 		rc = cxl_h_collect_vpd(afu->guest->handle, 0,
121 				virt_to_phys(le), entries, &out);
122 	pr_devel("length of available (entries: %i), vpd: %#llx\n",
123 		entries, out);
124 
125 	if (!rc) {
126 		/*
127 		 * hcall returns in 'out' the size of available VPDs.
128 		 * It fills the buffer with as much data as possible.
129 		 */
130 		if (out < len)
131 			len = out;
132 		rc = len;
133 		if (out) {
134 			for (i = 0; i < entries; i++) {
135 				if (len < SG_BUFFER_SIZE)
136 					tocopy = len;
137 				else
138 					tocopy = SG_BUFFER_SIZE;
139 				memcpy(buf, vpd_buf[i], tocopy);
140 				buf += tocopy;
141 				len -= tocopy;
142 			}
143 		}
144 	}
145 err2:
146 	for (i = 0; i < entries; i++) {
147 		if (vpd_buf[i])
148 			free_page((unsigned long) vpd_buf[i]);
149 	}
150 	free_page((unsigned long) le);
151 err1:
152 	kfree(vpd_buf);
153 	return rc;
154 }
155 
156 static int guest_get_irq_info(struct cxl_context *ctx, struct cxl_irq_info *info)
157 {
158 	return cxl_h_collect_int_info(ctx->afu->guest->handle, ctx->process_token, info);
159 }
160 
161 static irqreturn_t guest_psl_irq(int irq, void *data)
162 {
163 	struct cxl_context *ctx = data;
164 	struct cxl_irq_info irq_info;
165 	int rc;
166 
167 	pr_devel("%d: received PSL interrupt %i\n", ctx->pe, irq);
168 	rc = guest_get_irq_info(ctx, &irq_info);
169 	if (rc) {
170 		WARN(1, "Unable to get IRQ info: %i\n", rc);
171 		return IRQ_HANDLED;
172 	}
173 
174 	rc = cxl_irq_psl8(irq, ctx, &irq_info);
175 	return rc;
176 }
177 
178 static int afu_read_error_state(struct cxl_afu *afu, int *state_out)
179 {
180 	u64 state;
181 	int rc = 0;
182 
183 	if (!afu)
184 		return -EIO;
185 
186 	rc = cxl_h_read_error_state(afu->guest->handle, &state);
187 	if (!rc) {
188 		WARN_ON(state != H_STATE_NORMAL &&
189 			state != H_STATE_DISABLE &&
190 			state != H_STATE_TEMP_UNAVAILABLE &&
191 			state != H_STATE_PERM_UNAVAILABLE);
192 		*state_out = state & 0xffffffff;
193 	}
194 	return rc;
195 }
196 
197 static irqreturn_t guest_slice_irq_err(int irq, void *data)
198 {
199 	struct cxl_afu *afu = data;
200 	int rc;
201 	u64 serr, afu_error, dsisr;
202 
203 	rc = cxl_h_get_fn_error_interrupt(afu->guest->handle, &serr);
204 	if (rc) {
205 		dev_crit(&afu->dev, "Couldn't read PSL_SERR_An: %d\n", rc);
206 		return IRQ_HANDLED;
207 	}
208 	afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An);
209 	dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
210 	cxl_afu_decode_psl_serr(afu, serr);
211 	dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error);
212 	dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr);
213 
214 	rc = cxl_h_ack_fn_error_interrupt(afu->guest->handle, serr);
215 	if (rc)
216 		dev_crit(&afu->dev, "Couldn't ack slice error interrupt: %d\n",
217 			rc);
218 
219 	return IRQ_HANDLED;
220 }
221 
222 
223 static int irq_alloc_range(struct cxl *adapter, int len, int *irq)
224 {
225 	int i, n;
226 	struct irq_avail *cur;
227 
228 	for (i = 0; i < adapter->guest->irq_nranges; i++) {
229 		cur = &adapter->guest->irq_avail[i];
230 		n = bitmap_find_next_zero_area(cur->bitmap, cur->range,
231 					0, len, 0);
232 		if (n < cur->range) {
233 			bitmap_set(cur->bitmap, n, len);
234 			*irq = cur->offset + n;
235 			pr_devel("guest: allocate IRQs %#x->%#x\n",
236 				*irq, *irq + len - 1);
237 
238 			return 0;
239 		}
240 	}
241 	return -ENOSPC;
242 }
243 
244 static int irq_free_range(struct cxl *adapter, int irq, int len)
245 {
246 	int i, n;
247 	struct irq_avail *cur;
248 
249 	if (len == 0)
250 		return -ENOENT;
251 
252 	for (i = 0; i < adapter->guest->irq_nranges; i++) {
253 		cur = &adapter->guest->irq_avail[i];
254 		if (irq >= cur->offset &&
255 			(irq + len) <= (cur->offset + cur->range)) {
256 			n = irq - cur->offset;
257 			bitmap_clear(cur->bitmap, n, len);
258 			pr_devel("guest: release IRQs %#x->%#x\n",
259 				irq, irq + len - 1);
260 			return 0;
261 		}
262 	}
263 	return -ENOENT;
264 }
265 
266 static int guest_reset(struct cxl *adapter)
267 {
268 	struct cxl_afu *afu = NULL;
269 	int i, rc;
270 
271 	pr_devel("Adapter reset request\n");
272 	spin_lock(&adapter->afu_list_lock);
273 	for (i = 0; i < adapter->slices; i++) {
274 		if ((afu = adapter->afu[i])) {
275 			pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
276 					pci_channel_io_frozen);
277 			cxl_context_detach_all(afu);
278 		}
279 	}
280 
281 	rc = cxl_h_reset_adapter(adapter->guest->handle);
282 	for (i = 0; i < adapter->slices; i++) {
283 		if (!rc && (afu = adapter->afu[i])) {
284 			pci_error_handlers(afu, CXL_SLOT_RESET_EVENT,
285 					pci_channel_io_normal);
286 			pci_error_handlers(afu, CXL_RESUME_EVENT, 0);
287 		}
288 	}
289 	spin_unlock(&adapter->afu_list_lock);
290 	return rc;
291 }
292 
293 static int guest_alloc_one_irq(struct cxl *adapter)
294 {
295 	int irq;
296 
297 	spin_lock(&adapter->guest->irq_alloc_lock);
298 	if (irq_alloc_range(adapter, 1, &irq))
299 		irq = -ENOSPC;
300 	spin_unlock(&adapter->guest->irq_alloc_lock);
301 	return irq;
302 }
303 
304 static void guest_release_one_irq(struct cxl *adapter, int irq)
305 {
306 	spin_lock(&adapter->guest->irq_alloc_lock);
307 	irq_free_range(adapter, irq, 1);
308 	spin_unlock(&adapter->guest->irq_alloc_lock);
309 }
310 
311 static int guest_alloc_irq_ranges(struct cxl_irq_ranges *irqs,
312 				struct cxl *adapter, unsigned int num)
313 {
314 	int i, try, irq;
315 
316 	memset(irqs, 0, sizeof(struct cxl_irq_ranges));
317 
318 	spin_lock(&adapter->guest->irq_alloc_lock);
319 	for (i = 0; i < CXL_IRQ_RANGES && num; i++) {
320 		try = num;
321 		while (try) {
322 			if (irq_alloc_range(adapter, try, &irq) == 0)
323 				break;
324 			try /= 2;
325 		}
326 		if (!try)
327 			goto error;
328 		irqs->offset[i] = irq;
329 		irqs->range[i] = try;
330 		num -= try;
331 	}
332 	if (num)
333 		goto error;
334 	spin_unlock(&adapter->guest->irq_alloc_lock);
335 	return 0;
336 
337 error:
338 	for (i = 0; i < CXL_IRQ_RANGES; i++)
339 		irq_free_range(adapter, irqs->offset[i], irqs->range[i]);
340 	spin_unlock(&adapter->guest->irq_alloc_lock);
341 	return -ENOSPC;
342 }
343 
344 static void guest_release_irq_ranges(struct cxl_irq_ranges *irqs,
345 				struct cxl *adapter)
346 {
347 	int i;
348 
349 	spin_lock(&adapter->guest->irq_alloc_lock);
350 	for (i = 0; i < CXL_IRQ_RANGES; i++)
351 		irq_free_range(adapter, irqs->offset[i], irqs->range[i]);
352 	spin_unlock(&adapter->guest->irq_alloc_lock);
353 }
354 
355 static int guest_register_serr_irq(struct cxl_afu *afu)
356 {
357 	afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
358 				      dev_name(&afu->dev));
359 	if (!afu->err_irq_name)
360 		return -ENOMEM;
361 
362 	if (!(afu->serr_virq = cxl_map_irq(afu->adapter, afu->serr_hwirq,
363 				 guest_slice_irq_err, afu, afu->err_irq_name))) {
364 		kfree(afu->err_irq_name);
365 		afu->err_irq_name = NULL;
366 		return -ENOMEM;
367 	}
368 
369 	return 0;
370 }
371 
372 static void guest_release_serr_irq(struct cxl_afu *afu)
373 {
374 	cxl_unmap_irq(afu->serr_virq, afu);
375 	cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
376 	kfree(afu->err_irq_name);
377 }
378 
379 static int guest_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
380 {
381 	return cxl_h_control_faults(ctx->afu->guest->handle, ctx->process_token,
382 				tfc >> 32, (psl_reset_mask != 0));
383 }
384 
385 static void disable_afu_irqs(struct cxl_context *ctx)
386 {
387 	irq_hw_number_t hwirq;
388 	unsigned int virq;
389 	int r, i;
390 
391 	pr_devel("Disabling AFU(%d) interrupts\n", ctx->afu->slice);
392 	for (r = 0; r < CXL_IRQ_RANGES; r++) {
393 		hwirq = ctx->irqs.offset[r];
394 		for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
395 			virq = irq_find_mapping(NULL, hwirq);
396 			disable_irq(virq);
397 		}
398 	}
399 }
400 
401 static void enable_afu_irqs(struct cxl_context *ctx)
402 {
403 	irq_hw_number_t hwirq;
404 	unsigned int virq;
405 	int r, i;
406 
407 	pr_devel("Enabling AFU(%d) interrupts\n", ctx->afu->slice);
408 	for (r = 0; r < CXL_IRQ_RANGES; r++) {
409 		hwirq = ctx->irqs.offset[r];
410 		for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
411 			virq = irq_find_mapping(NULL, hwirq);
412 			enable_irq(virq);
413 		}
414 	}
415 }
416 
417 static int _guest_afu_cr_readXX(int sz, struct cxl_afu *afu, int cr_idx,
418 			u64 offset, u64 *val)
419 {
420 	unsigned long cr;
421 	char c;
422 	int rc = 0;
423 
424 	if (afu->crs_len < sz)
425 		return -ENOENT;
426 
427 	if (unlikely(offset >= afu->crs_len))
428 		return -ERANGE;
429 
430 	cr = get_zeroed_page(GFP_KERNEL);
431 	if (!cr)
432 		return -ENOMEM;
433 
434 	rc = cxl_h_get_config(afu->guest->handle, cr_idx, offset,
435 			virt_to_phys((void *)cr), sz);
436 	if (rc)
437 		goto err;
438 
439 	switch (sz) {
440 	case 1:
441 		c = *((char *) cr);
442 		*val = c;
443 		break;
444 	case 2:
445 		*val = in_le16((u16 *)cr);
446 		break;
447 	case 4:
448 		*val = in_le32((unsigned *)cr);
449 		break;
450 	case 8:
451 		*val = in_le64((u64 *)cr);
452 		break;
453 	default:
454 		WARN_ON(1);
455 	}
456 err:
457 	free_page(cr);
458 	return rc;
459 }
460 
461 static int guest_afu_cr_read32(struct cxl_afu *afu, int cr_idx, u64 offset,
462 			u32 *out)
463 {
464 	int rc;
465 	u64 val;
466 
467 	rc = _guest_afu_cr_readXX(4, afu, cr_idx, offset, &val);
468 	if (!rc)
469 		*out = (u32) val;
470 	return rc;
471 }
472 
473 static int guest_afu_cr_read16(struct cxl_afu *afu, int cr_idx, u64 offset,
474 			u16 *out)
475 {
476 	int rc;
477 	u64 val;
478 
479 	rc = _guest_afu_cr_readXX(2, afu, cr_idx, offset, &val);
480 	if (!rc)
481 		*out = (u16) val;
482 	return rc;
483 }
484 
485 static int guest_afu_cr_read8(struct cxl_afu *afu, int cr_idx, u64 offset,
486 			u8 *out)
487 {
488 	int rc;
489 	u64 val;
490 
491 	rc = _guest_afu_cr_readXX(1, afu, cr_idx, offset, &val);
492 	if (!rc)
493 		*out = (u8) val;
494 	return rc;
495 }
496 
497 static int guest_afu_cr_read64(struct cxl_afu *afu, int cr_idx, u64 offset,
498 			u64 *out)
499 {
500 	return _guest_afu_cr_readXX(8, afu, cr_idx, offset, out);
501 }
502 
503 static int guest_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in)
504 {
505 	/* config record is not writable from guest */
506 	return -EPERM;
507 }
508 
509 static int guest_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in)
510 {
511 	/* config record is not writable from guest */
512 	return -EPERM;
513 }
514 
515 static int guest_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in)
516 {
517 	/* config record is not writable from guest */
518 	return -EPERM;
519 }
520 
521 static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
522 {
523 	struct cxl_process_element_hcall *elem;
524 	struct cxl *adapter = ctx->afu->adapter;
525 	const struct cred *cred;
526 	u32 pid, idx;
527 	int rc, r, i;
528 	u64 mmio_addr, mmio_size;
529 	__be64 flags = 0;
530 
531 	/* Must be 8 byte aligned and cannot cross a 4096 byte boundary */
532 	if (!(elem = (struct cxl_process_element_hcall *)
533 			get_zeroed_page(GFP_KERNEL)))
534 		return -ENOMEM;
535 
536 	elem->version = cpu_to_be64(CXL_PROCESS_ELEMENT_VERSION);
537 	if (ctx->kernel) {
538 		pid = 0;
539 		flags |= CXL_PE_TRANSLATION_ENABLED;
540 		flags |= CXL_PE_PRIVILEGED_PROCESS;
541 		if (mfmsr() & MSR_SF)
542 			flags |= CXL_PE_64_BIT;
543 	} else {
544 		pid = current->pid;
545 		flags |= CXL_PE_PROBLEM_STATE;
546 		flags |= CXL_PE_TRANSLATION_ENABLED;
547 		if (!test_tsk_thread_flag(current, TIF_32BIT))
548 			flags |= CXL_PE_64_BIT;
549 		cred = get_current_cred();
550 		if (uid_eq(cred->euid, GLOBAL_ROOT_UID))
551 			flags |= CXL_PE_PRIVILEGED_PROCESS;
552 		put_cred(cred);
553 	}
554 	elem->flags         = cpu_to_be64(flags);
555 	elem->common.tid    = cpu_to_be32(0); /* Unused */
556 	elem->common.pid    = cpu_to_be32(pid);
557 	elem->common.csrp   = cpu_to_be64(0); /* disable */
558 	elem->common.u.psl8.aurp0  = cpu_to_be64(0); /* disable */
559 	elem->common.u.psl8.aurp1  = cpu_to_be64(0); /* disable */
560 
561 	cxl_prefault(ctx, wed);
562 
563 	elem->common.u.psl8.sstp0  = cpu_to_be64(ctx->sstp0);
564 	elem->common.u.psl8.sstp1  = cpu_to_be64(ctx->sstp1);
565 
566 	/*
567 	 * Ensure we have at least one interrupt allocated to take faults for
568 	 * kernel contexts that may not have allocated any AFU IRQs at all:
569 	 */
570 	if (ctx->irqs.range[0] == 0) {
571 		rc = afu_register_irqs(ctx, 0);
572 		if (rc)
573 			goto out_free;
574 	}
575 
576 	for (r = 0; r < CXL_IRQ_RANGES; r++) {
577 		for (i = 0; i < ctx->irqs.range[r]; i++) {
578 			if (r == 0 && i == 0) {
579 				elem->pslVirtualIsn = cpu_to_be32(ctx->irqs.offset[0]);
580 			} else {
581 				idx = ctx->irqs.offset[r] + i - adapter->guest->irq_base_offset;
582 				elem->applicationVirtualIsnBitmap[idx / 8] |= 0x80 >> (idx % 8);
583 			}
584 		}
585 	}
586 	elem->common.amr = cpu_to_be64(amr);
587 	elem->common.wed = cpu_to_be64(wed);
588 
589 	disable_afu_irqs(ctx);
590 
591 	rc = cxl_h_attach_process(ctx->afu->guest->handle, elem,
592 				&ctx->process_token, &mmio_addr, &mmio_size);
593 	if (rc == H_SUCCESS) {
594 		if (ctx->master || !ctx->afu->pp_psa) {
595 			ctx->psn_phys = ctx->afu->psn_phys;
596 			ctx->psn_size = ctx->afu->adapter->ps_size;
597 		} else {
598 			ctx->psn_phys = mmio_addr;
599 			ctx->psn_size = mmio_size;
600 		}
601 		if (ctx->afu->pp_psa && mmio_size &&
602 			ctx->afu->pp_size == 0) {
603 			/*
604 			 * There's no property in the device tree to read the
605 			 * pp_size. We only find out at the 1st attach.
606 			 * Compared to bare-metal, it is too late and we
607 			 * should really lock here. However, on powerVM,
608 			 * pp_size is really only used to display in /sys.
609 			 * Being discussed with pHyp for their next release.
610 			 */
611 			ctx->afu->pp_size = mmio_size;
612 		}
613 		/* from PAPR: process element is bytes 4-7 of process token */
614 		ctx->external_pe = ctx->process_token & 0xFFFFFFFF;
615 		pr_devel("CXL pe=%i is known as %i for pHyp, mmio_size=%#llx",
616 			ctx->pe, ctx->external_pe, ctx->psn_size);
617 		ctx->pe_inserted = true;
618 		enable_afu_irqs(ctx);
619 	}
620 
621 out_free:
622 	free_page((u64)elem);
623 	return rc;
624 }
625 
626 static int guest_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr)
627 {
628 	pr_devel("in %s\n", __func__);
629 
630 	ctx->kernel = kernel;
631 	if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
632 		return attach_afu_directed(ctx, wed, amr);
633 
634 	/* dedicated mode not supported on FW840 */
635 
636 	return -EINVAL;
637 }
638 
639 static int detach_afu_directed(struct cxl_context *ctx)
640 {
641 	if (!ctx->pe_inserted)
642 		return 0;
643 	if (cxl_h_detach_process(ctx->afu->guest->handle, ctx->process_token))
644 		return -1;
645 	return 0;
646 }
647 
648 static int guest_detach_process(struct cxl_context *ctx)
649 {
650 	pr_devel("in %s\n", __func__);
651 	trace_cxl_detach(ctx);
652 
653 	if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
654 		return -EIO;
655 
656 	if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
657 		return detach_afu_directed(ctx);
658 
659 	return -EINVAL;
660 }
661 
662 static void guest_release_afu(struct device *dev)
663 {
664 	struct cxl_afu *afu = to_cxl_afu(dev);
665 
666 	pr_devel("%s\n", __func__);
667 
668 	idr_destroy(&afu->contexts_idr);
669 
670 	kfree(afu->guest);
671 	kfree(afu);
672 }
673 
674 ssize_t cxl_guest_read_afu_vpd(struct cxl_afu *afu, void *buf, size_t len)
675 {
676 	return guest_collect_vpd(NULL, afu, buf, len);
677 }
678 
679 #define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE
680 static ssize_t guest_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
681 					loff_t off, size_t count)
682 {
683 	void *tbuf = NULL;
684 	int rc = 0;
685 
686 	tbuf = (void *) get_zeroed_page(GFP_KERNEL);
687 	if (!tbuf)
688 		return -ENOMEM;
689 
690 	rc = cxl_h_get_afu_err(afu->guest->handle,
691 			       off & 0x7,
692 			       virt_to_phys(tbuf),
693 			       count);
694 	if (rc)
695 		goto err;
696 
697 	if (count > ERR_BUFF_MAX_COPY_SIZE)
698 		count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7);
699 	memcpy(buf, tbuf, count);
700 err:
701 	free_page((u64)tbuf);
702 
703 	return rc;
704 }
705 
706 static int guest_afu_check_and_enable(struct cxl_afu *afu)
707 {
708 	return 0;
709 }
710 
711 static bool guest_support_attributes(const char *attr_name,
712 				     enum cxl_attrs type)
713 {
714 	switch (type) {
715 	case CXL_ADAPTER_ATTRS:
716 		if ((strcmp(attr_name, "base_image") == 0) ||
717 			(strcmp(attr_name, "load_image_on_perst") == 0) ||
718 			(strcmp(attr_name, "perst_reloads_same_image") == 0) ||
719 			(strcmp(attr_name, "image_loaded") == 0))
720 			return false;
721 		break;
722 	case CXL_AFU_MASTER_ATTRS:
723 		if ((strcmp(attr_name, "pp_mmio_off") == 0))
724 			return false;
725 		break;
726 	case CXL_AFU_ATTRS:
727 		break;
728 	default:
729 		break;
730 	}
731 
732 	return true;
733 }
734 
735 static int activate_afu_directed(struct cxl_afu *afu)
736 {
737 	int rc;
738 
739 	dev_info(&afu->dev, "Activating AFU(%d) directed mode\n", afu->slice);
740 
741 	afu->current_mode = CXL_MODE_DIRECTED;
742 
743 	afu->num_procs = afu->max_procs_virtualised;
744 
745 	if ((rc = cxl_chardev_m_afu_add(afu)))
746 		return rc;
747 
748 	if ((rc = cxl_sysfs_afu_m_add(afu)))
749 		goto err;
750 
751 	if ((rc = cxl_chardev_s_afu_add(afu)))
752 		goto err1;
753 
754 	return 0;
755 err1:
756 	cxl_sysfs_afu_m_remove(afu);
757 err:
758 	cxl_chardev_afu_remove(afu);
759 	return rc;
760 }
761 
762 static int guest_afu_activate_mode(struct cxl_afu *afu, int mode)
763 {
764 	if (!mode)
765 		return 0;
766 	if (!(mode & afu->modes_supported))
767 		return -EINVAL;
768 
769 	if (mode == CXL_MODE_DIRECTED)
770 		return activate_afu_directed(afu);
771 
772 	if (mode == CXL_MODE_DEDICATED)
773 		dev_err(&afu->dev, "Dedicated mode not supported\n");
774 
775 	return -EINVAL;
776 }
777 
778 static int deactivate_afu_directed(struct cxl_afu *afu)
779 {
780 	dev_info(&afu->dev, "Deactivating AFU(%d) directed mode\n", afu->slice);
781 
782 	afu->current_mode = 0;
783 	afu->num_procs = 0;
784 
785 	cxl_sysfs_afu_m_remove(afu);
786 	cxl_chardev_afu_remove(afu);
787 
788 	cxl_ops->afu_reset(afu);
789 
790 	return 0;
791 }
792 
793 static int guest_afu_deactivate_mode(struct cxl_afu *afu, int mode)
794 {
795 	if (!mode)
796 		return 0;
797 	if (!(mode & afu->modes_supported))
798 		return -EINVAL;
799 
800 	if (mode == CXL_MODE_DIRECTED)
801 		return deactivate_afu_directed(afu);
802 	return 0;
803 }
804 
805 static int guest_afu_reset(struct cxl_afu *afu)
806 {
807 	pr_devel("AFU(%d) reset request\n", afu->slice);
808 	return cxl_h_reset_afu(afu->guest->handle);
809 }
810 
811 static int guest_map_slice_regs(struct cxl_afu *afu)
812 {
813 	if (!(afu->p2n_mmio = ioremap(afu->guest->p2n_phys, afu->guest->p2n_size))) {
814 		dev_err(&afu->dev, "Error mapping AFU(%d) MMIO regions\n",
815 			afu->slice);
816 		return -ENOMEM;
817 	}
818 	return 0;
819 }
820 
821 static void guest_unmap_slice_regs(struct cxl_afu *afu)
822 {
823 	if (afu->p2n_mmio)
824 		iounmap(afu->p2n_mmio);
825 }
826 
827 static int afu_update_state(struct cxl_afu *afu)
828 {
829 	int rc, cur_state;
830 
831 	rc = afu_read_error_state(afu, &cur_state);
832 	if (rc)
833 		return rc;
834 
835 	if (afu->guest->previous_state == cur_state)
836 		return 0;
837 
838 	pr_devel("AFU(%d) update state to %#x\n", afu->slice, cur_state);
839 
840 	switch (cur_state) {
841 	case H_STATE_NORMAL:
842 		afu->guest->previous_state = cur_state;
843 		break;
844 
845 	case H_STATE_DISABLE:
846 		pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
847 				pci_channel_io_frozen);
848 
849 		cxl_context_detach_all(afu);
850 		if ((rc = cxl_ops->afu_reset(afu)))
851 			pr_devel("reset hcall failed %d\n", rc);
852 
853 		rc = afu_read_error_state(afu, &cur_state);
854 		if (!rc && cur_state == H_STATE_NORMAL) {
855 			pci_error_handlers(afu, CXL_SLOT_RESET_EVENT,
856 					pci_channel_io_normal);
857 			pci_error_handlers(afu, CXL_RESUME_EVENT, 0);
858 		}
859 		afu->guest->previous_state = 0;
860 		break;
861 
862 	case H_STATE_TEMP_UNAVAILABLE:
863 		afu->guest->previous_state = cur_state;
864 		break;
865 
866 	case H_STATE_PERM_UNAVAILABLE:
867 		dev_err(&afu->dev, "AFU is in permanent error state\n");
868 		pci_error_handlers(afu, CXL_ERROR_DETECTED_EVENT,
869 				pci_channel_io_perm_failure);
870 		afu->guest->previous_state = cur_state;
871 		break;
872 
873 	default:
874 		pr_err("Unexpected AFU(%d) error state: %#x\n",
875 		       afu->slice, cur_state);
876 		return -EINVAL;
877 	}
878 
879 	return rc;
880 }
881 
882 static void afu_handle_errstate(struct work_struct *work)
883 {
884 	struct cxl_afu_guest *afu_guest =
885 		container_of(to_delayed_work(work), struct cxl_afu_guest, work_err);
886 
887 	if (!afu_update_state(afu_guest->parent) &&
888 	    afu_guest->previous_state == H_STATE_PERM_UNAVAILABLE)
889 		return;
890 
891 	if (afu_guest->handle_err)
892 		schedule_delayed_work(&afu_guest->work_err,
893 				      msecs_to_jiffies(3000));
894 }
895 
896 static bool guest_link_ok(struct cxl *cxl, struct cxl_afu *afu)
897 {
898 	int state;
899 
900 	if (afu && (!afu_read_error_state(afu, &state))) {
901 		if (state == H_STATE_NORMAL)
902 			return true;
903 	}
904 
905 	return false;
906 }
907 
908 static int afu_properties_look_ok(struct cxl_afu *afu)
909 {
910 	if (afu->pp_irqs < 0) {
911 		dev_err(&afu->dev, "Unexpected per-process minimum interrupt value\n");
912 		return -EINVAL;
913 	}
914 
915 	if (afu->max_procs_virtualised < 1) {
916 		dev_err(&afu->dev, "Unexpected max number of processes virtualised value\n");
917 		return -EINVAL;
918 	}
919 
920 	return 0;
921 }
922 
923 int cxl_guest_init_afu(struct cxl *adapter, int slice, struct device_node *afu_np)
924 {
925 	struct cxl_afu *afu;
926 	bool free = true;
927 	int rc;
928 
929 	pr_devel("in %s - AFU(%d)\n", __func__, slice);
930 	if (!(afu = cxl_alloc_afu(adapter, slice)))
931 		return -ENOMEM;
932 
933 	if (!(afu->guest = kzalloc(sizeof(struct cxl_afu_guest), GFP_KERNEL))) {
934 		kfree(afu);
935 		return -ENOMEM;
936 	}
937 
938 	if ((rc = dev_set_name(&afu->dev, "afu%i.%i",
939 					  adapter->adapter_num,
940 					  slice)))
941 		goto err1;
942 
943 	adapter->slices++;
944 
945 	if ((rc = cxl_of_read_afu_handle(afu, afu_np)))
946 		goto err1;
947 
948 	if ((rc = cxl_ops->afu_reset(afu)))
949 		goto err1;
950 
951 	if ((rc = cxl_of_read_afu_properties(afu, afu_np)))
952 		goto err1;
953 
954 	if ((rc = afu_properties_look_ok(afu)))
955 		goto err1;
956 
957 	if ((rc = guest_map_slice_regs(afu)))
958 		goto err1;
959 
960 	if ((rc = guest_register_serr_irq(afu)))
961 		goto err2;
962 
963 	/*
964 	 * After we call this function we must not free the afu directly, even
965 	 * if it returns an error!
966 	 */
967 	if ((rc = cxl_register_afu(afu)))
968 		goto err_put1;
969 
970 	if ((rc = cxl_sysfs_afu_add(afu)))
971 		goto err_put1;
972 
973 	/*
974 	 * pHyp doesn't expose the programming models supported by the
975 	 * AFU. pHyp currently only supports directed mode. If it adds
976 	 * dedicated mode later, this version of cxl has no way to
977 	 * detect it. So we'll initialize the driver, but the first
978 	 * attach will fail.
979 	 * Being discussed with pHyp to do better (likely new property)
980 	 */
981 	if (afu->max_procs_virtualised == 1)
982 		afu->modes_supported = CXL_MODE_DEDICATED;
983 	else
984 		afu->modes_supported = CXL_MODE_DIRECTED;
985 
986 	if ((rc = cxl_afu_select_best_mode(afu)))
987 		goto err_put2;
988 
989 	adapter->afu[afu->slice] = afu;
990 
991 	afu->enabled = true;
992 
993 	/*
994 	 * wake up the cpu periodically to check the state
995 	 * of the AFU using "afu" stored in the guest structure.
996 	 */
997 	afu->guest->parent = afu;
998 	afu->guest->handle_err = true;
999 	INIT_DELAYED_WORK(&afu->guest->work_err, afu_handle_errstate);
1000 	schedule_delayed_work(&afu->guest->work_err, msecs_to_jiffies(1000));
1001 
1002 	if ((rc = cxl_pci_vphb_add(afu)))
1003 		dev_info(&afu->dev, "Can't register vPHB\n");
1004 
1005 	return 0;
1006 
1007 err_put2:
1008 	cxl_sysfs_afu_remove(afu);
1009 err_put1:
1010 	device_unregister(&afu->dev);
1011 	free = false;
1012 	guest_release_serr_irq(afu);
1013 err2:
1014 	guest_unmap_slice_regs(afu);
1015 err1:
1016 	if (free) {
1017 		kfree(afu->guest);
1018 		kfree(afu);
1019 	}
1020 	return rc;
1021 }
1022 
1023 void cxl_guest_remove_afu(struct cxl_afu *afu)
1024 {
1025 	if (!afu)
1026 		return;
1027 
1028 	/* flush and stop pending job */
1029 	afu->guest->handle_err = false;
1030 	flush_delayed_work(&afu->guest->work_err);
1031 
1032 	cxl_pci_vphb_remove(afu);
1033 	cxl_sysfs_afu_remove(afu);
1034 
1035 	spin_lock(&afu->adapter->afu_list_lock);
1036 	afu->adapter->afu[afu->slice] = NULL;
1037 	spin_unlock(&afu->adapter->afu_list_lock);
1038 
1039 	cxl_context_detach_all(afu);
1040 	cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
1041 	guest_release_serr_irq(afu);
1042 	guest_unmap_slice_regs(afu);
1043 
1044 	device_unregister(&afu->dev);
1045 }
1046 
1047 static void free_adapter(struct cxl *adapter)
1048 {
1049 	struct irq_avail *cur;
1050 	int i;
1051 
1052 	if (adapter->guest) {
1053 		if (adapter->guest->irq_avail) {
1054 			for (i = 0; i < adapter->guest->irq_nranges; i++) {
1055 				cur = &adapter->guest->irq_avail[i];
1056 				kfree(cur->bitmap);
1057 			}
1058 			kfree(adapter->guest->irq_avail);
1059 		}
1060 		kfree(adapter->guest->status);
1061 		kfree(adapter->guest);
1062 	}
1063 	cxl_remove_adapter_nr(adapter);
1064 	kfree(adapter);
1065 }
1066 
1067 static int properties_look_ok(struct cxl *adapter)
1068 {
1069 	/* The absence of this property means that the operational
1070 	 * status is unknown or okay
1071 	 */
1072 	if (strlen(adapter->guest->status) &&
1073 	    strcmp(adapter->guest->status, "okay")) {
1074 		pr_err("ABORTING:Bad operational status of the device\n");
1075 		return -EINVAL;
1076 	}
1077 
1078 	return 0;
1079 }
1080 
1081 ssize_t cxl_guest_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len)
1082 {
1083 	return guest_collect_vpd(adapter, NULL, buf, len);
1084 }
1085 
1086 void cxl_guest_remove_adapter(struct cxl *adapter)
1087 {
1088 	pr_devel("in %s\n", __func__);
1089 
1090 	cxl_sysfs_adapter_remove(adapter);
1091 
1092 	cxl_guest_remove_chardev(adapter);
1093 	device_unregister(&adapter->dev);
1094 }
1095 
1096 static void release_adapter(struct device *dev)
1097 {
1098 	free_adapter(to_cxl_adapter(dev));
1099 }
1100 
1101 struct cxl *cxl_guest_init_adapter(struct device_node *np, struct platform_device *pdev)
1102 {
1103 	struct cxl *adapter;
1104 	bool free = true;
1105 	int rc;
1106 
1107 	if (!(adapter = cxl_alloc_adapter()))
1108 		return ERR_PTR(-ENOMEM);
1109 
1110 	if (!(adapter->guest = kzalloc(sizeof(struct cxl_guest), GFP_KERNEL))) {
1111 		free_adapter(adapter);
1112 		return ERR_PTR(-ENOMEM);
1113 	}
1114 
1115 	adapter->slices = 0;
1116 	adapter->guest->pdev = pdev;
1117 	adapter->dev.parent = &pdev->dev;
1118 	adapter->dev.release = release_adapter;
1119 	dev_set_drvdata(&pdev->dev, adapter);
1120 
1121 	/*
1122 	 * Hypervisor controls PSL timebase initialization (p1 register).
1123 	 * On FW840, PSL is initialized.
1124 	 */
1125 	adapter->psl_timebase_synced = true;
1126 
1127 	if ((rc = cxl_of_read_adapter_handle(adapter, np)))
1128 		goto err1;
1129 
1130 	if ((rc = cxl_of_read_adapter_properties(adapter, np)))
1131 		goto err1;
1132 
1133 	if ((rc = properties_look_ok(adapter)))
1134 		goto err1;
1135 
1136 	if ((rc = cxl_guest_add_chardev(adapter)))
1137 		goto err1;
1138 
1139 	/*
1140 	 * After we call this function we must not free the adapter directly,
1141 	 * even if it returns an error!
1142 	 */
1143 	if ((rc = cxl_register_adapter(adapter)))
1144 		goto err_put1;
1145 
1146 	if ((rc = cxl_sysfs_adapter_add(adapter)))
1147 		goto err_put1;
1148 
1149 	/* release the context lock as the adapter is configured */
1150 	cxl_adapter_context_unlock(adapter);
1151 
1152 	return adapter;
1153 
1154 err_put1:
1155 	device_unregister(&adapter->dev);
1156 	free = false;
1157 	cxl_guest_remove_chardev(adapter);
1158 err1:
1159 	if (free)
1160 		free_adapter(adapter);
1161 	return ERR_PTR(rc);
1162 }
1163 
1164 void cxl_guest_reload_module(struct cxl *adapter)
1165 {
1166 	struct platform_device *pdev;
1167 
1168 	pdev = adapter->guest->pdev;
1169 	cxl_guest_remove_adapter(adapter);
1170 
1171 	cxl_of_probe(pdev);
1172 }
1173 
1174 const struct cxl_backend_ops cxl_guest_ops = {
1175 	.module = THIS_MODULE,
1176 	.adapter_reset = guest_reset,
1177 	.alloc_one_irq = guest_alloc_one_irq,
1178 	.release_one_irq = guest_release_one_irq,
1179 	.alloc_irq_ranges = guest_alloc_irq_ranges,
1180 	.release_irq_ranges = guest_release_irq_ranges,
1181 	.setup_irq = NULL,
1182 	.handle_psl_slice_error = guest_handle_psl_slice_error,
1183 	.psl_interrupt = guest_psl_irq,
1184 	.ack_irq = guest_ack_irq,
1185 	.attach_process = guest_attach_process,
1186 	.detach_process = guest_detach_process,
1187 	.update_ivtes = NULL,
1188 	.support_attributes = guest_support_attributes,
1189 	.link_ok = guest_link_ok,
1190 	.release_afu = guest_release_afu,
1191 	.afu_read_err_buffer = guest_afu_read_err_buffer,
1192 	.afu_check_and_enable = guest_afu_check_and_enable,
1193 	.afu_activate_mode = guest_afu_activate_mode,
1194 	.afu_deactivate_mode = guest_afu_deactivate_mode,
1195 	.afu_reset = guest_afu_reset,
1196 	.afu_cr_read8 = guest_afu_cr_read8,
1197 	.afu_cr_read16 = guest_afu_cr_read16,
1198 	.afu_cr_read32 = guest_afu_cr_read32,
1199 	.afu_cr_read64 = guest_afu_cr_read64,
1200 	.afu_cr_write8 = guest_afu_cr_write8,
1201 	.afu_cr_write16 = guest_afu_cr_write16,
1202 	.afu_cr_write32 = guest_afu_cr_write32,
1203 	.read_adapter_vpd = cxl_guest_read_adapter_vpd,
1204 };
1205