xref: /openbmc/linux/drivers/misc/cxl/native.c (revision 9dae47aba0a055f761176d9297371d5bb24289ec)
1 /*
2  * Copyright 2014 IBM Corp.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version
7  * 2 of the License, or (at your option) any later version.
8  */
9 
10 #include <linux/spinlock.h>
11 #include <linux/sched.h>
12 #include <linux/sched/clock.h>
13 #include <linux/slab.h>
14 #include <linux/mutex.h>
15 #include <linux/mm.h>
16 #include <linux/uaccess.h>
17 #include <linux/delay.h>
18 #include <asm/synch.h>
19 #include <misc/cxl-base.h>
20 
21 #include "cxl.h"
22 #include "trace.h"
23 
24 static int afu_control(struct cxl_afu *afu, u64 command, u64 clear,
25 		       u64 result, u64 mask, bool enabled)
26 {
27 	u64 AFU_Cntl;
28 	unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
29 	int rc = 0;
30 
31 	spin_lock(&afu->afu_cntl_lock);
32 	pr_devel("AFU command starting: %llx\n", command);
33 
34 	trace_cxl_afu_ctrl(afu, command);
35 
36 	AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
37 	cxl_p2n_write(afu, CXL_AFU_Cntl_An, (AFU_Cntl & ~clear) | command);
38 
39 	AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
40 	while ((AFU_Cntl & mask) != result) {
41 		if (time_after_eq(jiffies, timeout)) {
42 			dev_warn(&afu->dev, "WARNING: AFU control timed out!\n");
43 			rc = -EBUSY;
44 			goto out;
45 		}
46 
47 		if (!cxl_ops->link_ok(afu->adapter, afu)) {
48 			afu->enabled = enabled;
49 			rc = -EIO;
50 			goto out;
51 		}
52 
53 		pr_devel_ratelimited("AFU control... (0x%016llx)\n",
54 				     AFU_Cntl | command);
55 		cpu_relax();
56 		AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
57 	}
58 
59 	if (AFU_Cntl & CXL_AFU_Cntl_An_RA) {
60 		/*
61 		 * Workaround for a bug in the XSL used in the Mellanox CX4
62 		 * that fails to clear the RA bit after an AFU reset,
63 		 * preventing subsequent AFU resets from working.
64 		 */
65 		cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl & ~CXL_AFU_Cntl_An_RA);
66 	}
67 
68 	pr_devel("AFU command complete: %llx\n", command);
69 	afu->enabled = enabled;
70 out:
71 	trace_cxl_afu_ctrl_done(afu, command, rc);
72 	spin_unlock(&afu->afu_cntl_lock);
73 
74 	return rc;
75 }
76 
77 static int afu_enable(struct cxl_afu *afu)
78 {
79 	pr_devel("AFU enable request\n");
80 
81 	return afu_control(afu, CXL_AFU_Cntl_An_E, 0,
82 			   CXL_AFU_Cntl_An_ES_Enabled,
83 			   CXL_AFU_Cntl_An_ES_MASK, true);
84 }
85 
86 int cxl_afu_disable(struct cxl_afu *afu)
87 {
88 	pr_devel("AFU disable request\n");
89 
90 	return afu_control(afu, 0, CXL_AFU_Cntl_An_E,
91 			   CXL_AFU_Cntl_An_ES_Disabled,
92 			   CXL_AFU_Cntl_An_ES_MASK, false);
93 }
94 
95 /* This will disable as well as reset */
96 static int native_afu_reset(struct cxl_afu *afu)
97 {
98 	int rc;
99 	u64 serr;
100 
101 	pr_devel("AFU reset request\n");
102 
103 	rc = afu_control(afu, CXL_AFU_Cntl_An_RA, 0,
104 			   CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled,
105 			   CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK,
106 			   false);
107 
108 	/*
109 	 * Re-enable any masked interrupts when the AFU is not
110 	 * activated to avoid side effects after attaching a process
111 	 * in dedicated mode.
112 	 */
113 	if (afu->current_mode == 0) {
114 		serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
115 		serr &= ~CXL_PSL_SERR_An_IRQ_MASKS;
116 		cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
117 	}
118 
119 	return rc;
120 }
121 
122 static int native_afu_check_and_enable(struct cxl_afu *afu)
123 {
124 	if (!cxl_ops->link_ok(afu->adapter, afu)) {
125 		WARN(1, "Refusing to enable afu while link down!\n");
126 		return -EIO;
127 	}
128 	if (afu->enabled)
129 		return 0;
130 	return afu_enable(afu);
131 }
132 
133 int cxl_psl_purge(struct cxl_afu *afu)
134 {
135 	u64 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
136 	u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
137 	u64 dsisr, dar;
138 	u64 start, end;
139 	u64 trans_fault = 0x0ULL;
140 	unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
141 	int rc = 0;
142 
143 	trace_cxl_psl_ctrl(afu, CXL_PSL_SCNTL_An_Pc);
144 
145 	pr_devel("PSL purge request\n");
146 
147 	if (cxl_is_power8())
148 		trans_fault = CXL_PSL_DSISR_TRANS;
149 	if (cxl_is_power9())
150 		trans_fault = CXL_PSL9_DSISR_An_TF;
151 
152 	if (!cxl_ops->link_ok(afu->adapter, afu)) {
153 		dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n");
154 		rc = -EIO;
155 		goto out;
156 	}
157 
158 	if ((AFU_Cntl & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
159 		WARN(1, "psl_purge request while AFU not disabled!\n");
160 		cxl_afu_disable(afu);
161 	}
162 
163 	cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
164 		       PSL_CNTL | CXL_PSL_SCNTL_An_Pc);
165 	start = local_clock();
166 	PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
167 	while ((PSL_CNTL &  CXL_PSL_SCNTL_An_Ps_MASK)
168 			== CXL_PSL_SCNTL_An_Ps_Pending) {
169 		if (time_after_eq(jiffies, timeout)) {
170 			dev_warn(&afu->dev, "WARNING: PSL Purge timed out!\n");
171 			rc = -EBUSY;
172 			goto out;
173 		}
174 		if (!cxl_ops->link_ok(afu->adapter, afu)) {
175 			rc = -EIO;
176 			goto out;
177 		}
178 
179 		dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
180 		pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx  PSL_DSISR: 0x%016llx\n",
181 				     PSL_CNTL, dsisr);
182 
183 		if (dsisr & trans_fault) {
184 			dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
185 			dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n",
186 				   dsisr, dar);
187 			cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
188 		} else if (dsisr) {
189 			dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n",
190 				   dsisr);
191 			cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
192 		} else {
193 			cpu_relax();
194 		}
195 		PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
196 	}
197 	end = local_clock();
198 	pr_devel("PSL purged in %lld ns\n", end - start);
199 
200 	cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
201 		       PSL_CNTL & ~CXL_PSL_SCNTL_An_Pc);
202 out:
203 	trace_cxl_psl_ctrl_done(afu, CXL_PSL_SCNTL_An_Pc, rc);
204 	return rc;
205 }
206 
207 static int spa_max_procs(int spa_size)
208 {
209 	/*
210 	 * From the CAIA:
211 	 *    end_of_SPA_area = SPA_Base + ((n+4) * 128) + (( ((n*8) + 127) >> 7) * 128) + 255
212 	 * Most of that junk is really just an overly-complicated way of saying
213 	 * the last 256 bytes are __aligned(128), so it's really:
214 	 *    end_of_SPA_area = end_of_PSL_queue_area + __aligned(128) 255
215 	 * and
216 	 *    end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1
217 	 * so
218 	 *    sizeof(SPA) = ((n+4) * 128) + (n*8) + __aligned(128) 256
219 	 * Ignore the alignment (which is safe in this case as long as we are
220 	 * careful with our rounding) and solve for n:
221 	 */
222 	return ((spa_size / 8) - 96) / 17;
223 }
224 
225 static int cxl_alloc_spa(struct cxl_afu *afu, int mode)
226 {
227 	unsigned spa_size;
228 
229 	/* Work out how many pages to allocate */
230 	afu->native->spa_order = -1;
231 	do {
232 		afu->native->spa_order++;
233 		spa_size = (1 << afu->native->spa_order) * PAGE_SIZE;
234 
235 		if (spa_size > 0x100000) {
236 			dev_warn(&afu->dev, "num_of_processes too large for the SPA, limiting to %i (0x%x)\n",
237 					afu->native->spa_max_procs, afu->native->spa_size);
238 			if (mode != CXL_MODE_DEDICATED)
239 				afu->num_procs = afu->native->spa_max_procs;
240 			break;
241 		}
242 
243 		afu->native->spa_size = spa_size;
244 		afu->native->spa_max_procs = spa_max_procs(afu->native->spa_size);
245 	} while (afu->native->spa_max_procs < afu->num_procs);
246 
247 	if (!(afu->native->spa = (struct cxl_process_element *)
248 	      __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->native->spa_order))) {
249 		pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n");
250 		return -ENOMEM;
251 	}
252 	pr_devel("spa pages: %i afu->spa_max_procs: %i   afu->num_procs: %i\n",
253 		 1<<afu->native->spa_order, afu->native->spa_max_procs, afu->num_procs);
254 
255 	return 0;
256 }
257 
258 static void attach_spa(struct cxl_afu *afu)
259 {
260 	u64 spap;
261 
262 	afu->native->sw_command_status = (__be64 *)((char *)afu->native->spa +
263 					    ((afu->native->spa_max_procs + 3) * 128));
264 
265 	spap = virt_to_phys(afu->native->spa) & CXL_PSL_SPAP_Addr;
266 	spap |= ((afu->native->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size;
267 	spap |= CXL_PSL_SPAP_V;
268 	pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n",
269 		afu->native->spa, afu->native->spa_max_procs,
270 		afu->native->sw_command_status, spap);
271 	cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap);
272 }
273 
274 static inline void detach_spa(struct cxl_afu *afu)
275 {
276 	cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0);
277 }
278 
279 void cxl_release_spa(struct cxl_afu *afu)
280 {
281 	if (afu->native->spa) {
282 		free_pages((unsigned long) afu->native->spa,
283 			afu->native->spa_order);
284 		afu->native->spa = NULL;
285 	}
286 }
287 
288 /*
289  * Invalidation of all ERAT entries is no longer required by CAIA2. Use
290  * only for debug.
291  */
292 int cxl_invalidate_all_psl9(struct cxl *adapter)
293 {
294 	unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
295 	u64 ierat;
296 
297 	pr_devel("CXL adapter - invalidation of all ERAT entries\n");
298 
299 	/* Invalidates all ERAT entries for Radix or HPT */
300 	ierat = CXL_XSL9_IERAT_IALL;
301 	if (radix_enabled())
302 		ierat |= CXL_XSL9_IERAT_INVR;
303 	cxl_p1_write(adapter, CXL_XSL9_IERAT, ierat);
304 
305 	while (cxl_p1_read(adapter, CXL_XSL9_IERAT) & CXL_XSL9_IERAT_IINPROG) {
306 		if (time_after_eq(jiffies, timeout)) {
307 			dev_warn(&adapter->dev,
308 			"WARNING: CXL adapter invalidation of all ERAT entries timed out!\n");
309 			return -EBUSY;
310 		}
311 		if (!cxl_ops->link_ok(adapter, NULL))
312 			return -EIO;
313 		cpu_relax();
314 	}
315 	return 0;
316 }
317 
318 int cxl_invalidate_all_psl8(struct cxl *adapter)
319 {
320 	unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
321 
322 	pr_devel("CXL adapter wide TLBIA & SLBIA\n");
323 
324 	cxl_p1_write(adapter, CXL_PSL_AFUSEL, CXL_PSL_AFUSEL_A);
325 
326 	cxl_p1_write(adapter, CXL_PSL_TLBIA, CXL_TLB_SLB_IQ_ALL);
327 	while (cxl_p1_read(adapter, CXL_PSL_TLBIA) & CXL_TLB_SLB_P) {
328 		if (time_after_eq(jiffies, timeout)) {
329 			dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n");
330 			return -EBUSY;
331 		}
332 		if (!cxl_ops->link_ok(adapter, NULL))
333 			return -EIO;
334 		cpu_relax();
335 	}
336 
337 	cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_ALL);
338 	while (cxl_p1_read(adapter, CXL_PSL_SLBIA) & CXL_TLB_SLB_P) {
339 		if (time_after_eq(jiffies, timeout)) {
340 			dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n");
341 			return -EBUSY;
342 		}
343 		if (!cxl_ops->link_ok(adapter, NULL))
344 			return -EIO;
345 		cpu_relax();
346 	}
347 	return 0;
348 }
349 
350 int cxl_data_cache_flush(struct cxl *adapter)
351 {
352 	u64 reg;
353 	unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
354 
355 	pr_devel("Flushing data cache\n");
356 
357 	reg = cxl_p1_read(adapter, CXL_PSL_Control);
358 	reg |= CXL_PSL_Control_Fr;
359 	cxl_p1_write(adapter, CXL_PSL_Control, reg);
360 
361 	reg = cxl_p1_read(adapter, CXL_PSL_Control);
362 	while ((reg & CXL_PSL_Control_Fs_MASK) != CXL_PSL_Control_Fs_Complete) {
363 		if (time_after_eq(jiffies, timeout)) {
364 			dev_warn(&adapter->dev, "WARNING: cache flush timed out!\n");
365 			return -EBUSY;
366 		}
367 
368 		if (!cxl_ops->link_ok(adapter, NULL)) {
369 			dev_warn(&adapter->dev, "WARNING: link down when flushing cache\n");
370 			return -EIO;
371 		}
372 		cpu_relax();
373 		reg = cxl_p1_read(adapter, CXL_PSL_Control);
374 	}
375 
376 	reg &= ~CXL_PSL_Control_Fr;
377 	cxl_p1_write(adapter, CXL_PSL_Control, reg);
378 	return 0;
379 }
380 
381 static int cxl_write_sstp(struct cxl_afu *afu, u64 sstp0, u64 sstp1)
382 {
383 	int rc;
384 
385 	/* 1. Disable SSTP by writing 0 to SSTP1[V] */
386 	cxl_p2n_write(afu, CXL_SSTP1_An, 0);
387 
388 	/* 2. Invalidate all SLB entries */
389 	if ((rc = cxl_afu_slbia(afu)))
390 		return rc;
391 
392 	/* 3. Set SSTP0_An */
393 	cxl_p2n_write(afu, CXL_SSTP0_An, sstp0);
394 
395 	/* 4. Set SSTP1_An */
396 	cxl_p2n_write(afu, CXL_SSTP1_An, sstp1);
397 
398 	return 0;
399 }
400 
401 /* Using per slice version may improve performance here. (ie. SLBIA_An) */
402 static void slb_invalid(struct cxl_context *ctx)
403 {
404 	struct cxl *adapter = ctx->afu->adapter;
405 	u64 slbia;
406 
407 	WARN_ON(!mutex_is_locked(&ctx->afu->native->spa_mutex));
408 
409 	cxl_p1_write(adapter, CXL_PSL_LBISEL,
410 			((u64)be32_to_cpu(ctx->elem->common.pid) << 32) |
411 			be32_to_cpu(ctx->elem->lpid));
412 	cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID);
413 
414 	while (1) {
415 		if (!cxl_ops->link_ok(adapter, NULL))
416 			break;
417 		slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA);
418 		if (!(slbia & CXL_TLB_SLB_P))
419 			break;
420 		cpu_relax();
421 	}
422 }
423 
424 static int do_process_element_cmd(struct cxl_context *ctx,
425 				  u64 cmd, u64 pe_state)
426 {
427 	u64 state;
428 	unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
429 	int rc = 0;
430 
431 	trace_cxl_llcmd(ctx, cmd);
432 
433 	WARN_ON(!ctx->afu->enabled);
434 
435 	ctx->elem->software_state = cpu_to_be32(pe_state);
436 	smp_wmb();
437 	*(ctx->afu->native->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe);
438 	smp_mb();
439 	cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe);
440 	while (1) {
441 		if (time_after_eq(jiffies, timeout)) {
442 			dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n");
443 			rc = -EBUSY;
444 			goto out;
445 		}
446 		if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
447 			dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n");
448 			rc = -EIO;
449 			goto out;
450 		}
451 		state = be64_to_cpup(ctx->afu->native->sw_command_status);
452 		if (state == ~0ULL) {
453 			pr_err("cxl: Error adding process element to AFU\n");
454 			rc = -1;
455 			goto out;
456 		}
457 		if ((state & (CXL_SPA_SW_CMD_MASK | CXL_SPA_SW_STATE_MASK  | CXL_SPA_SW_LINK_MASK)) ==
458 		    (cmd | (cmd >> 16) | ctx->pe))
459 			break;
460 		/*
461 		 * The command won't finish in the PSL if there are
462 		 * outstanding DSIs.  Hence we need to yield here in
463 		 * case there are outstanding DSIs that we need to
464 		 * service.  Tuning possiblity: we could wait for a
465 		 * while before sched
466 		 */
467 		schedule();
468 
469 	}
470 out:
471 	trace_cxl_llcmd_done(ctx, cmd, rc);
472 	return rc;
473 }
474 
475 static int add_process_element(struct cxl_context *ctx)
476 {
477 	int rc = 0;
478 
479 	mutex_lock(&ctx->afu->native->spa_mutex);
480 	pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe);
481 	if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V)))
482 		ctx->pe_inserted = true;
483 	pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe);
484 	mutex_unlock(&ctx->afu->native->spa_mutex);
485 	return rc;
486 }
487 
488 static int terminate_process_element(struct cxl_context *ctx)
489 {
490 	int rc = 0;
491 
492 	/* fast path terminate if it's already invalid */
493 	if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V)))
494 		return rc;
495 
496 	mutex_lock(&ctx->afu->native->spa_mutex);
497 	pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe);
498 	/* We could be asked to terminate when the hw is down. That
499 	 * should always succeed: it's not running if the hw has gone
500 	 * away and is being reset.
501 	 */
502 	if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
503 		rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE,
504 					    CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T);
505 	ctx->elem->software_state = 0;	/* Remove Valid bit */
506 	pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe);
507 	mutex_unlock(&ctx->afu->native->spa_mutex);
508 	return rc;
509 }
510 
511 static int remove_process_element(struct cxl_context *ctx)
512 {
513 	int rc = 0;
514 
515 	mutex_lock(&ctx->afu->native->spa_mutex);
516 	pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe);
517 
518 	/* We could be asked to remove when the hw is down. Again, if
519 	 * the hw is down, the PE is gone, so we succeed.
520 	 */
521 	if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
522 		rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0);
523 
524 	if (!rc)
525 		ctx->pe_inserted = false;
526 	if (cxl_is_power8())
527 		slb_invalid(ctx);
528 	pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe);
529 	mutex_unlock(&ctx->afu->native->spa_mutex);
530 
531 	return rc;
532 }
533 
534 void cxl_assign_psn_space(struct cxl_context *ctx)
535 {
536 	if (!ctx->afu->pp_size || ctx->master) {
537 		ctx->psn_phys = ctx->afu->psn_phys;
538 		ctx->psn_size = ctx->afu->adapter->ps_size;
539 	} else {
540 		ctx->psn_phys = ctx->afu->psn_phys +
541 			(ctx->afu->native->pp_offset + ctx->afu->pp_size * ctx->pe);
542 		ctx->psn_size = ctx->afu->pp_size;
543 	}
544 }
545 
546 static int activate_afu_directed(struct cxl_afu *afu)
547 {
548 	int rc;
549 
550 	dev_info(&afu->dev, "Activating AFU directed mode\n");
551 
552 	afu->num_procs = afu->max_procs_virtualised;
553 	if (afu->native->spa == NULL) {
554 		if (cxl_alloc_spa(afu, CXL_MODE_DIRECTED))
555 			return -ENOMEM;
556 	}
557 	attach_spa(afu);
558 
559 	cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU);
560 	if (cxl_is_power8())
561 		cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
562 	cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
563 
564 	afu->current_mode = CXL_MODE_DIRECTED;
565 
566 	if ((rc = cxl_chardev_m_afu_add(afu)))
567 		return rc;
568 
569 	if ((rc = cxl_sysfs_afu_m_add(afu)))
570 		goto err;
571 
572 	if ((rc = cxl_chardev_s_afu_add(afu)))
573 		goto err1;
574 
575 	return 0;
576 err1:
577 	cxl_sysfs_afu_m_remove(afu);
578 err:
579 	cxl_chardev_afu_remove(afu);
580 	return rc;
581 }
582 
583 #ifdef CONFIG_CPU_LITTLE_ENDIAN
584 #define set_endian(sr) ((sr) |= CXL_PSL_SR_An_LE)
585 #else
586 #define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE))
587 #endif
588 
589 u64 cxl_calculate_sr(bool master, bool kernel, bool real_mode, bool p9)
590 {
591 	u64 sr = 0;
592 
593 	set_endian(sr);
594 	if (master)
595 		sr |= CXL_PSL_SR_An_MP;
596 	if (mfspr(SPRN_LPCR) & LPCR_TC)
597 		sr |= CXL_PSL_SR_An_TC;
598 	if (kernel) {
599 		if (!real_mode)
600 			sr |= CXL_PSL_SR_An_R;
601 		sr |= (mfmsr() & MSR_SF) | CXL_PSL_SR_An_HV;
602 	} else {
603 		sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R;
604 		if (radix_enabled())
605 			sr |= CXL_PSL_SR_An_HV;
606 		else
607 			sr &= ~(CXL_PSL_SR_An_HV);
608 		if (!test_tsk_thread_flag(current, TIF_32BIT))
609 			sr |= CXL_PSL_SR_An_SF;
610 	}
611 	if (p9) {
612 		if (radix_enabled())
613 			sr |= CXL_PSL_SR_An_XLAT_ror;
614 		else
615 			sr |= CXL_PSL_SR_An_XLAT_hpt;
616 	}
617 	return sr;
618 }
619 
620 static u64 calculate_sr(struct cxl_context *ctx)
621 {
622 	return cxl_calculate_sr(ctx->master, ctx->kernel, ctx->real_mode,
623 				cxl_is_power9());
624 }
625 
626 static void update_ivtes_directed(struct cxl_context *ctx)
627 {
628 	bool need_update = (ctx->status == STARTED);
629 	int r;
630 
631 	if (need_update) {
632 		WARN_ON(terminate_process_element(ctx));
633 		WARN_ON(remove_process_element(ctx));
634 	}
635 
636 	for (r = 0; r < CXL_IRQ_RANGES; r++) {
637 		ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
638 		ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
639 	}
640 
641 	/*
642 	 * Theoretically we could use the update llcmd, instead of a
643 	 * terminate/remove/add (or if an atomic update was required we could
644 	 * do a suspend/update/resume), however it seems there might be issues
645 	 * with the update llcmd on some cards (including those using an XSL on
646 	 * an ASIC) so for now it's safest to go with the commands that are
647 	 * known to work. In the future if we come across a situation where the
648 	 * card may be performing transactions using the same PE while we are
649 	 * doing this update we might need to revisit this.
650 	 */
651 	if (need_update)
652 		WARN_ON(add_process_element(ctx));
653 }
654 
655 static int process_element_entry_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
656 {
657 	u32 pid;
658 
659 	cxl_assign_psn_space(ctx);
660 
661 	ctx->elem->ctxtime = 0; /* disable */
662 	ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
663 	ctx->elem->haurp = 0; /* disable */
664 
665 	if (ctx->kernel)
666 		pid = 0;
667 	else {
668 		if (ctx->mm == NULL) {
669 			pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
670 				__func__, ctx->pe, pid_nr(ctx->pid));
671 			return -EINVAL;
672 		}
673 		pid = ctx->mm->context.id;
674 	}
675 
676 	ctx->elem->common.tid = 0;
677 	ctx->elem->common.pid = cpu_to_be32(pid);
678 
679 	ctx->elem->sr = cpu_to_be64(calculate_sr(ctx));
680 
681 	ctx->elem->common.csrp = 0; /* disable */
682 
683 	cxl_prefault(ctx, wed);
684 
685 	/*
686 	 * Ensure we have the multiplexed PSL interrupt set up to take faults
687 	 * for kernel contexts that may not have allocated any AFU IRQs at all:
688 	 */
689 	if (ctx->irqs.range[0] == 0) {
690 		ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
691 		ctx->irqs.range[0] = 1;
692 	}
693 
694 	ctx->elem->common.amr = cpu_to_be64(amr);
695 	ctx->elem->common.wed = cpu_to_be64(wed);
696 
697 	return 0;
698 }
699 
700 int cxl_attach_afu_directed_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
701 {
702 	int result;
703 
704 	/* fill the process element entry */
705 	result = process_element_entry_psl9(ctx, wed, amr);
706 	if (result)
707 		return result;
708 
709 	update_ivtes_directed(ctx);
710 
711 	/* first guy needs to enable */
712 	result = cxl_ops->afu_check_and_enable(ctx->afu);
713 	if (result)
714 		return result;
715 
716 	return add_process_element(ctx);
717 }
718 
719 int cxl_attach_afu_directed_psl8(struct cxl_context *ctx, u64 wed, u64 amr)
720 {
721 	u32 pid;
722 	int result;
723 
724 	cxl_assign_psn_space(ctx);
725 
726 	ctx->elem->ctxtime = 0; /* disable */
727 	ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
728 	ctx->elem->haurp = 0; /* disable */
729 	ctx->elem->u.sdr = cpu_to_be64(mfspr(SPRN_SDR1));
730 
731 	pid = current->pid;
732 	if (ctx->kernel)
733 		pid = 0;
734 	ctx->elem->common.tid = 0;
735 	ctx->elem->common.pid = cpu_to_be32(pid);
736 
737 	ctx->elem->sr = cpu_to_be64(calculate_sr(ctx));
738 
739 	ctx->elem->common.csrp = 0; /* disable */
740 	ctx->elem->common.u.psl8.aurp0 = 0; /* disable */
741 	ctx->elem->common.u.psl8.aurp1 = 0; /* disable */
742 
743 	cxl_prefault(ctx, wed);
744 
745 	ctx->elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0);
746 	ctx->elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1);
747 
748 	/*
749 	 * Ensure we have the multiplexed PSL interrupt set up to take faults
750 	 * for kernel contexts that may not have allocated any AFU IRQs at all:
751 	 */
752 	if (ctx->irqs.range[0] == 0) {
753 		ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
754 		ctx->irqs.range[0] = 1;
755 	}
756 
757 	update_ivtes_directed(ctx);
758 
759 	ctx->elem->common.amr = cpu_to_be64(amr);
760 	ctx->elem->common.wed = cpu_to_be64(wed);
761 
762 	/* first guy needs to enable */
763 	if ((result = cxl_ops->afu_check_and_enable(ctx->afu)))
764 		return result;
765 
766 	return add_process_element(ctx);
767 }
768 
769 static int deactivate_afu_directed(struct cxl_afu *afu)
770 {
771 	dev_info(&afu->dev, "Deactivating AFU directed mode\n");
772 
773 	afu->current_mode = 0;
774 	afu->num_procs = 0;
775 
776 	cxl_sysfs_afu_m_remove(afu);
777 	cxl_chardev_afu_remove(afu);
778 
779 	/*
780 	 * The CAIA section 2.2.1 indicates that the procedure for starting and
781 	 * stopping an AFU in AFU directed mode is AFU specific, which is not
782 	 * ideal since this code is generic and with one exception has no
783 	 * knowledge of the AFU. This is in contrast to the procedure for
784 	 * disabling a dedicated process AFU, which is documented to just
785 	 * require a reset. The architecture does indicate that both an AFU
786 	 * reset and an AFU disable should result in the AFU being disabled and
787 	 * we do both followed by a PSL purge for safety.
788 	 *
789 	 * Notably we used to have some issues with the disable sequence on PSL
790 	 * cards, which is why we ended up using this heavy weight procedure in
791 	 * the first place, however a bug was discovered that had rendered the
792 	 * disable operation ineffective, so it is conceivable that was the
793 	 * sole explanation for those difficulties. Careful regression testing
794 	 * is recommended if anyone attempts to remove or reorder these
795 	 * operations.
796 	 *
797 	 * The XSL on the Mellanox CX4 behaves a little differently from the
798 	 * PSL based cards and will time out an AFU reset if the AFU is still
799 	 * enabled. That card is special in that we do have a means to identify
800 	 * it from this code, so in that case we skip the reset and just use a
801 	 * disable/purge to avoid the timeout and corresponding noise in the
802 	 * kernel log.
803 	 */
804 	if (afu->adapter->native->sl_ops->needs_reset_before_disable)
805 		cxl_ops->afu_reset(afu);
806 	cxl_afu_disable(afu);
807 	cxl_psl_purge(afu);
808 
809 	return 0;
810 }
811 
812 int cxl_activate_dedicated_process_psl9(struct cxl_afu *afu)
813 {
814 	dev_info(&afu->dev, "Activating dedicated process mode\n");
815 
816 	/*
817 	 * If XSL is set to dedicated mode (Set in PSL_SCNTL reg), the
818 	 * XSL and AFU are programmed to work with a single context.
819 	 * The context information should be configured in the SPA area
820 	 * index 0 (so PSL_SPAP must be configured before enabling the
821 	 * AFU).
822 	 */
823 	afu->num_procs = 1;
824 	if (afu->native->spa == NULL) {
825 		if (cxl_alloc_spa(afu, CXL_MODE_DEDICATED))
826 			return -ENOMEM;
827 	}
828 	attach_spa(afu);
829 
830 	cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process);
831 	cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
832 
833 	afu->current_mode = CXL_MODE_DEDICATED;
834 
835 	return cxl_chardev_d_afu_add(afu);
836 }
837 
838 int cxl_activate_dedicated_process_psl8(struct cxl_afu *afu)
839 {
840 	dev_info(&afu->dev, "Activating dedicated process mode\n");
841 
842 	cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process);
843 
844 	cxl_p1n_write(afu, CXL_PSL_CtxTime_An, 0); /* disable */
845 	cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0);    /* disable */
846 	cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
847 	cxl_p1n_write(afu, CXL_PSL_LPID_An, mfspr(SPRN_LPID));
848 	cxl_p1n_write(afu, CXL_HAURP_An, 0);       /* disable */
849 	cxl_p1n_write(afu, CXL_PSL_SDR_An, mfspr(SPRN_SDR1));
850 
851 	cxl_p2n_write(afu, CXL_CSRP_An, 0);        /* disable */
852 	cxl_p2n_write(afu, CXL_AURP0_An, 0);       /* disable */
853 	cxl_p2n_write(afu, CXL_AURP1_An, 0);       /* disable */
854 
855 	afu->current_mode = CXL_MODE_DEDICATED;
856 	afu->num_procs = 1;
857 
858 	return cxl_chardev_d_afu_add(afu);
859 }
860 
861 void cxl_update_dedicated_ivtes_psl9(struct cxl_context *ctx)
862 {
863 	int r;
864 
865 	for (r = 0; r < CXL_IRQ_RANGES; r++) {
866 		ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
867 		ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
868 	}
869 }
870 
871 void cxl_update_dedicated_ivtes_psl8(struct cxl_context *ctx)
872 {
873 	struct cxl_afu *afu = ctx->afu;
874 
875 	cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An,
876 		       (((u64)ctx->irqs.offset[0] & 0xffff) << 48) |
877 		       (((u64)ctx->irqs.offset[1] & 0xffff) << 32) |
878 		       (((u64)ctx->irqs.offset[2] & 0xffff) << 16) |
879 			((u64)ctx->irqs.offset[3] & 0xffff));
880 	cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64)
881 		       (((u64)ctx->irqs.range[0] & 0xffff) << 48) |
882 		       (((u64)ctx->irqs.range[1] & 0xffff) << 32) |
883 		       (((u64)ctx->irqs.range[2] & 0xffff) << 16) |
884 			((u64)ctx->irqs.range[3] & 0xffff));
885 }
886 
887 int cxl_attach_dedicated_process_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
888 {
889 	struct cxl_afu *afu = ctx->afu;
890 	int result;
891 
892 	/* fill the process element entry */
893 	result = process_element_entry_psl9(ctx, wed, amr);
894 	if (result)
895 		return result;
896 
897 	if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes)
898 		afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
899 
900 	ctx->elem->software_state = cpu_to_be32(CXL_PE_SOFTWARE_STATE_V);
901 	/*
902 	 * Ideally we should do a wmb() here to make sure the changes to the
903 	 * PE are visible to the card before we call afu_enable.
904 	 * On ppc64 though all mmios are preceded by a 'sync' instruction hence
905 	 * we dont dont need one here.
906 	 */
907 
908 	result = cxl_ops->afu_reset(afu);
909 	if (result)
910 		return result;
911 
912 	return afu_enable(afu);
913 }
914 
915 int cxl_attach_dedicated_process_psl8(struct cxl_context *ctx, u64 wed, u64 amr)
916 {
917 	struct cxl_afu *afu = ctx->afu;
918 	u64 pid;
919 	int rc;
920 
921 	pid = (u64)current->pid << 32;
922 	if (ctx->kernel)
923 		pid = 0;
924 	cxl_p2n_write(afu, CXL_PSL_PID_TID_An, pid);
925 
926 	cxl_p1n_write(afu, CXL_PSL_SR_An, calculate_sr(ctx));
927 
928 	if ((rc = cxl_write_sstp(afu, ctx->sstp0, ctx->sstp1)))
929 		return rc;
930 
931 	cxl_prefault(ctx, wed);
932 
933 	if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes)
934 		afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
935 
936 	cxl_p2n_write(afu, CXL_PSL_AMR_An, amr);
937 
938 	/* master only context for dedicated */
939 	cxl_assign_psn_space(ctx);
940 
941 	if ((rc = cxl_ops->afu_reset(afu)))
942 		return rc;
943 
944 	cxl_p2n_write(afu, CXL_PSL_WED_An, wed);
945 
946 	return afu_enable(afu);
947 }
948 
949 static int deactivate_dedicated_process(struct cxl_afu *afu)
950 {
951 	dev_info(&afu->dev, "Deactivating dedicated process mode\n");
952 
953 	afu->current_mode = 0;
954 	afu->num_procs = 0;
955 
956 	cxl_chardev_afu_remove(afu);
957 
958 	return 0;
959 }
960 
961 static int native_afu_deactivate_mode(struct cxl_afu *afu, int mode)
962 {
963 	if (mode == CXL_MODE_DIRECTED)
964 		return deactivate_afu_directed(afu);
965 	if (mode == CXL_MODE_DEDICATED)
966 		return deactivate_dedicated_process(afu);
967 	return 0;
968 }
969 
970 static int native_afu_activate_mode(struct cxl_afu *afu, int mode)
971 {
972 	if (!mode)
973 		return 0;
974 	if (!(mode & afu->modes_supported))
975 		return -EINVAL;
976 
977 	if (!cxl_ops->link_ok(afu->adapter, afu)) {
978 		WARN(1, "Device link is down, refusing to activate!\n");
979 		return -EIO;
980 	}
981 
982 	if (mode == CXL_MODE_DIRECTED)
983 		return activate_afu_directed(afu);
984 	if ((mode == CXL_MODE_DEDICATED) &&
985 	    (afu->adapter->native->sl_ops->activate_dedicated_process))
986 		return afu->adapter->native->sl_ops->activate_dedicated_process(afu);
987 
988 	return -EINVAL;
989 }
990 
991 static int native_attach_process(struct cxl_context *ctx, bool kernel,
992 				u64 wed, u64 amr)
993 {
994 	if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
995 		WARN(1, "Device link is down, refusing to attach process!\n");
996 		return -EIO;
997 	}
998 
999 	ctx->kernel = kernel;
1000 	if ((ctx->afu->current_mode == CXL_MODE_DIRECTED) &&
1001 	    (ctx->afu->adapter->native->sl_ops->attach_afu_directed))
1002 		return ctx->afu->adapter->native->sl_ops->attach_afu_directed(ctx, wed, amr);
1003 
1004 	if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) &&
1005 	    (ctx->afu->adapter->native->sl_ops->attach_dedicated_process))
1006 		return ctx->afu->adapter->native->sl_ops->attach_dedicated_process(ctx, wed, amr);
1007 
1008 	return -EINVAL;
1009 }
1010 
1011 static inline int detach_process_native_dedicated(struct cxl_context *ctx)
1012 {
1013 	/*
1014 	 * The CAIA section 2.1.1 indicates that we need to do an AFU reset to
1015 	 * stop the AFU in dedicated mode (we therefore do not make that
1016 	 * optional like we do in the afu directed path). It does not indicate
1017 	 * that we need to do an explicit disable (which should occur
1018 	 * implicitly as part of the reset) or purge, but we do these as well
1019 	 * to be on the safe side.
1020 	 *
1021 	 * Notably we used to have some issues with the disable sequence
1022 	 * (before the sequence was spelled out in the architecture) which is
1023 	 * why we were so heavy weight in the first place, however a bug was
1024 	 * discovered that had rendered the disable operation ineffective, so
1025 	 * it is conceivable that was the sole explanation for those
1026 	 * difficulties. Point is, we should be careful and do some regression
1027 	 * testing if we ever attempt to remove any part of this procedure.
1028 	 */
1029 	cxl_ops->afu_reset(ctx->afu);
1030 	cxl_afu_disable(ctx->afu);
1031 	cxl_psl_purge(ctx->afu);
1032 	return 0;
1033 }
1034 
1035 static void native_update_ivtes(struct cxl_context *ctx)
1036 {
1037 	if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
1038 		return update_ivtes_directed(ctx);
1039 	if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) &&
1040 	    (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes))
1041 		return ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
1042 	WARN(1, "native_update_ivtes: Bad mode\n");
1043 }
1044 
1045 static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
1046 {
1047 	if (!ctx->pe_inserted)
1048 		return 0;
1049 	if (terminate_process_element(ctx))
1050 		return -1;
1051 	if (remove_process_element(ctx))
1052 		return -1;
1053 
1054 	return 0;
1055 }
1056 
1057 static int native_detach_process(struct cxl_context *ctx)
1058 {
1059 	trace_cxl_detach(ctx);
1060 
1061 	if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
1062 		return detach_process_native_dedicated(ctx);
1063 
1064 	return detach_process_native_afu_directed(ctx);
1065 }
1066 
1067 static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info)
1068 {
1069 	/* If the adapter has gone away, we can't get any meaningful
1070 	 * information.
1071 	 */
1072 	if (!cxl_ops->link_ok(afu->adapter, afu))
1073 		return -EIO;
1074 
1075 	info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
1076 	info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
1077 	if (cxl_is_power8())
1078 		info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An);
1079 	info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An);
1080 	info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
1081 	info->proc_handle = 0;
1082 
1083 	return 0;
1084 }
1085 
1086 void cxl_native_irq_dump_regs_psl9(struct cxl_context *ctx)
1087 {
1088 	u64 fir1, serr;
1089 
1090 	fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL9_FIR1);
1091 
1092 	dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
1093 	if (ctx->afu->adapter->native->sl_ops->register_serr_irq) {
1094 		serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
1095 		cxl_afu_decode_psl_serr(ctx->afu, serr);
1096 	}
1097 }
1098 
1099 void cxl_native_irq_dump_regs_psl8(struct cxl_context *ctx)
1100 {
1101 	u64 fir1, fir2, fir_slice, serr, afu_debug;
1102 
1103 	fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1);
1104 	fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2);
1105 	fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An);
1106 	afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
1107 
1108 	dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
1109 	dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
1110 	if (ctx->afu->adapter->native->sl_ops->register_serr_irq) {
1111 		serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
1112 		cxl_afu_decode_psl_serr(ctx->afu, serr);
1113 	}
1114 	dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
1115 	dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
1116 }
1117 
1118 static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx,
1119 						u64 dsisr, u64 errstat)
1120 {
1121 
1122 	dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
1123 
1124 	if (ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers)
1125 		ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers(ctx);
1126 
1127 	if (ctx->afu->adapter->native->sl_ops->debugfs_stop_trace) {
1128 		dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
1129 		ctx->afu->adapter->native->sl_ops->debugfs_stop_trace(ctx->afu->adapter);
1130 	}
1131 
1132 	return cxl_ops->ack_irq(ctx, 0, errstat);
1133 }
1134 
1135 static bool cxl_is_translation_fault(struct cxl_afu *afu, u64 dsisr)
1136 {
1137 	if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_TRANS))
1138 		return true;
1139 
1140 	if ((cxl_is_power9()) && (dsisr & CXL_PSL9_DSISR_An_TF))
1141 		return true;
1142 
1143 	return false;
1144 }
1145 
1146 irqreturn_t cxl_fail_irq_psl(struct cxl_afu *afu, struct cxl_irq_info *irq_info)
1147 {
1148 	if (cxl_is_translation_fault(afu, irq_info->dsisr))
1149 		cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
1150 	else
1151 		cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
1152 
1153 	return IRQ_HANDLED;
1154 }
1155 
1156 static irqreturn_t native_irq_multiplexed(int irq, void *data)
1157 {
1158 	struct cxl_afu *afu = data;
1159 	struct cxl_context *ctx;
1160 	struct cxl_irq_info irq_info;
1161 	u64 phreg = cxl_p2n_read(afu, CXL_PSL_PEHandle_An);
1162 	int ph, ret = IRQ_HANDLED, res;
1163 
1164 	/* check if eeh kicked in while the interrupt was in flight */
1165 	if (unlikely(phreg == ~0ULL)) {
1166 		dev_warn(&afu->dev,
1167 			 "Ignoring slice interrupt(%d) due to fenced card",
1168 			 irq);
1169 		return IRQ_HANDLED;
1170 	}
1171 	/* Mask the pe-handle from register value */
1172 	ph = phreg & 0xffff;
1173 	if ((res = native_get_irq_info(afu, &irq_info))) {
1174 		WARN(1, "Unable to get CXL IRQ Info: %i\n", res);
1175 		if (afu->adapter->native->sl_ops->fail_irq)
1176 			return afu->adapter->native->sl_ops->fail_irq(afu, &irq_info);
1177 		return ret;
1178 	}
1179 
1180 	rcu_read_lock();
1181 	ctx = idr_find(&afu->contexts_idr, ph);
1182 	if (ctx) {
1183 		if (afu->adapter->native->sl_ops->handle_interrupt)
1184 			ret = afu->adapter->native->sl_ops->handle_interrupt(irq, ctx, &irq_info);
1185 		rcu_read_unlock();
1186 		return ret;
1187 	}
1188 	rcu_read_unlock();
1189 
1190 	WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR"
1191 		" %016llx\n(Possible AFU HW issue - was a term/remove acked"
1192 		" with outstanding transactions?)\n", ph, irq_info.dsisr,
1193 		irq_info.dar);
1194 	if (afu->adapter->native->sl_ops->fail_irq)
1195 		ret = afu->adapter->native->sl_ops->fail_irq(afu, &irq_info);
1196 	return ret;
1197 }
1198 
1199 static void native_irq_wait(struct cxl_context *ctx)
1200 {
1201 	u64 dsisr;
1202 	int timeout = 1000;
1203 	int ph;
1204 
1205 	/*
1206 	 * Wait until no further interrupts are presented by the PSL
1207 	 * for this context.
1208 	 */
1209 	while (timeout--) {
1210 		ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff;
1211 		if (ph != ctx->pe)
1212 			return;
1213 		dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An);
1214 		if (cxl_is_power8() &&
1215 		   ((dsisr & CXL_PSL_DSISR_PENDING) == 0))
1216 			return;
1217 		if (cxl_is_power9() &&
1218 		   ((dsisr & CXL_PSL9_DSISR_PENDING) == 0))
1219 			return;
1220 		/*
1221 		 * We are waiting for the workqueue to process our
1222 		 * irq, so need to let that run here.
1223 		 */
1224 		msleep(1);
1225 	}
1226 
1227 	dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i"
1228 		 " DSISR %016llx!\n", ph, dsisr);
1229 	return;
1230 }
1231 
1232 static irqreturn_t native_slice_irq_err(int irq, void *data)
1233 {
1234 	struct cxl_afu *afu = data;
1235 	u64 errstat, serr, afu_error, dsisr;
1236 	u64 fir_slice, afu_debug, irq_mask;
1237 
1238 	/*
1239 	 * slice err interrupt is only used with full PSL (no XSL)
1240 	 */
1241 	serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
1242 	errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
1243 	afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An);
1244 	dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
1245 	cxl_afu_decode_psl_serr(afu, serr);
1246 
1247 	if (cxl_is_power8()) {
1248 		fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
1249 		afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
1250 		dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
1251 		dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
1252 	}
1253 	dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat);
1254 	dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error);
1255 	dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr);
1256 
1257 	/* mask off the IRQ so it won't retrigger until the AFU is reset */
1258 	irq_mask = (serr & CXL_PSL_SERR_An_IRQS) >> 32;
1259 	serr |= irq_mask;
1260 	cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
1261 	dev_info(&afu->dev, "Further such interrupts will be masked until the AFU is reset\n");
1262 
1263 	return IRQ_HANDLED;
1264 }
1265 
1266 void cxl_native_err_irq_dump_regs_psl9(struct cxl *adapter)
1267 {
1268 	u64 fir1;
1269 
1270 	fir1 = cxl_p1_read(adapter, CXL_PSL9_FIR1);
1271 	dev_crit(&adapter->dev, "PSL_FIR: 0x%016llx\n", fir1);
1272 }
1273 
1274 void cxl_native_err_irq_dump_regs_psl8(struct cxl *adapter)
1275 {
1276 	u64 fir1, fir2;
1277 
1278 	fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
1279 	fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
1280 	dev_crit(&adapter->dev,
1281 		 "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n",
1282 		 fir1, fir2);
1283 }
1284 
1285 static irqreturn_t native_irq_err(int irq, void *data)
1286 {
1287 	struct cxl *adapter = data;
1288 	u64 err_ivte;
1289 
1290 	WARN(1, "CXL ERROR interrupt %i\n", irq);
1291 
1292 	err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
1293 	dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte);
1294 
1295 	if (adapter->native->sl_ops->debugfs_stop_trace) {
1296 		dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
1297 		adapter->native->sl_ops->debugfs_stop_trace(adapter);
1298 	}
1299 
1300 	if (adapter->native->sl_ops->err_irq_dump_registers)
1301 		adapter->native->sl_ops->err_irq_dump_registers(adapter);
1302 
1303 	return IRQ_HANDLED;
1304 }
1305 
1306 int cxl_native_register_psl_err_irq(struct cxl *adapter)
1307 {
1308 	int rc;
1309 
1310 	adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
1311 				      dev_name(&adapter->dev));
1312 	if (!adapter->irq_name)
1313 		return -ENOMEM;
1314 
1315 	if ((rc = cxl_register_one_irq(adapter, native_irq_err, adapter,
1316 				       &adapter->native->err_hwirq,
1317 				       &adapter->native->err_virq,
1318 				       adapter->irq_name))) {
1319 		kfree(adapter->irq_name);
1320 		adapter->irq_name = NULL;
1321 		return rc;
1322 	}
1323 
1324 	cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->native->err_hwirq & 0xffff);
1325 
1326 	return 0;
1327 }
1328 
1329 void cxl_native_release_psl_err_irq(struct cxl *adapter)
1330 {
1331 	if (adapter->native->err_virq == 0 ||
1332 	    adapter->native->err_virq !=
1333 	    irq_find_mapping(NULL, adapter->native->err_hwirq))
1334 		return;
1335 
1336 	cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
1337 	cxl_unmap_irq(adapter->native->err_virq, adapter);
1338 	cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
1339 	kfree(adapter->irq_name);
1340 	adapter->native->err_virq = 0;
1341 }
1342 
1343 int cxl_native_register_serr_irq(struct cxl_afu *afu)
1344 {
1345 	u64 serr;
1346 	int rc;
1347 
1348 	afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
1349 				      dev_name(&afu->dev));
1350 	if (!afu->err_irq_name)
1351 		return -ENOMEM;
1352 
1353 	if ((rc = cxl_register_one_irq(afu->adapter, native_slice_irq_err, afu,
1354 				       &afu->serr_hwirq,
1355 				       &afu->serr_virq, afu->err_irq_name))) {
1356 		kfree(afu->err_irq_name);
1357 		afu->err_irq_name = NULL;
1358 		return rc;
1359 	}
1360 
1361 	serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
1362 	if (cxl_is_power8())
1363 		serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff);
1364 	if (cxl_is_power9()) {
1365 		/*
1366 		 * By default, all errors are masked. So don't set all masks.
1367 		 * Slice errors will be transfered.
1368 		 */
1369 		serr = (serr & ~0xff0000007fffffffULL) | (afu->serr_hwirq & 0xffff);
1370 	}
1371 	cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
1372 
1373 	return 0;
1374 }
1375 
1376 void cxl_native_release_serr_irq(struct cxl_afu *afu)
1377 {
1378 	if (afu->serr_virq == 0 ||
1379 	    afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
1380 		return;
1381 
1382 	cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
1383 	cxl_unmap_irq(afu->serr_virq, afu);
1384 	cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
1385 	kfree(afu->err_irq_name);
1386 	afu->serr_virq = 0;
1387 }
1388 
1389 int cxl_native_register_psl_irq(struct cxl_afu *afu)
1390 {
1391 	int rc;
1392 
1393 	afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s",
1394 				      dev_name(&afu->dev));
1395 	if (!afu->psl_irq_name)
1396 		return -ENOMEM;
1397 
1398 	if ((rc = cxl_register_one_irq(afu->adapter, native_irq_multiplexed,
1399 				    afu, &afu->native->psl_hwirq, &afu->native->psl_virq,
1400 				    afu->psl_irq_name))) {
1401 		kfree(afu->psl_irq_name);
1402 		afu->psl_irq_name = NULL;
1403 	}
1404 	return rc;
1405 }
1406 
1407 void cxl_native_release_psl_irq(struct cxl_afu *afu)
1408 {
1409 	if (afu->native->psl_virq == 0 ||
1410 	    afu->native->psl_virq !=
1411 	    irq_find_mapping(NULL, afu->native->psl_hwirq))
1412 		return;
1413 
1414 	cxl_unmap_irq(afu->native->psl_virq, afu);
1415 	cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
1416 	kfree(afu->psl_irq_name);
1417 	afu->native->psl_virq = 0;
1418 }
1419 
1420 static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
1421 {
1422 	u64 dsisr;
1423 
1424 	pr_devel("RECOVERING FROM PSL ERROR... (0x%016llx)\n", errstat);
1425 
1426 	/* Clear PSL_DSISR[PE] */
1427 	dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
1428 	cxl_p2n_write(afu, CXL_PSL_DSISR_An, dsisr & ~CXL_PSL_DSISR_An_PE);
1429 
1430 	/* Write 1s to clear error status bits */
1431 	cxl_p2n_write(afu, CXL_PSL_ErrStat_An, errstat);
1432 }
1433 
1434 static int native_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
1435 {
1436 	trace_cxl_psl_irq_ack(ctx, tfc);
1437 	if (tfc)
1438 		cxl_p2n_write(ctx->afu, CXL_PSL_TFC_An, tfc);
1439 	if (psl_reset_mask)
1440 		recover_psl_err(ctx->afu, psl_reset_mask);
1441 
1442 	return 0;
1443 }
1444 
1445 int cxl_check_error(struct cxl_afu *afu)
1446 {
1447 	return (cxl_p1n_read(afu, CXL_PSL_SCNTL_An) == ~0ULL);
1448 }
1449 
1450 static bool native_support_attributes(const char *attr_name,
1451 				      enum cxl_attrs type)
1452 {
1453 	return true;
1454 }
1455 
1456 static int native_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off, u64 *out)
1457 {
1458 	if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
1459 		return -EIO;
1460 	if (unlikely(off >= afu->crs_len))
1461 		return -ERANGE;
1462 	*out = in_le64(afu->native->afu_desc_mmio + afu->crs_offset +
1463 		(cr * afu->crs_len) + off);
1464 	return 0;
1465 }
1466 
1467 static int native_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off, u32 *out)
1468 {
1469 	if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
1470 		return -EIO;
1471 	if (unlikely(off >= afu->crs_len))
1472 		return -ERANGE;
1473 	*out = in_le32(afu->native->afu_desc_mmio + afu->crs_offset +
1474 		(cr * afu->crs_len) + off);
1475 	return 0;
1476 }
1477 
1478 static int native_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off, u16 *out)
1479 {
1480 	u64 aligned_off = off & ~0x3L;
1481 	u32 val;
1482 	int rc;
1483 
1484 	rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
1485 	if (!rc)
1486 		*out = (val >> ((off & 0x3) * 8)) & 0xffff;
1487 	return rc;
1488 }
1489 
1490 static int native_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off, u8 *out)
1491 {
1492 	u64 aligned_off = off & ~0x3L;
1493 	u32 val;
1494 	int rc;
1495 
1496 	rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
1497 	if (!rc)
1498 		*out = (val >> ((off & 0x3) * 8)) & 0xff;
1499 	return rc;
1500 }
1501 
1502 static int native_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in)
1503 {
1504 	if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
1505 		return -EIO;
1506 	if (unlikely(off >= afu->crs_len))
1507 		return -ERANGE;
1508 	out_le32(afu->native->afu_desc_mmio + afu->crs_offset +
1509 		(cr * afu->crs_len) + off, in);
1510 	return 0;
1511 }
1512 
1513 static int native_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in)
1514 {
1515 	u64 aligned_off = off & ~0x3L;
1516 	u32 val32, mask, shift;
1517 	int rc;
1518 
1519 	rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
1520 	if (rc)
1521 		return rc;
1522 	shift = (off & 0x3) * 8;
1523 	WARN_ON(shift == 24);
1524 	mask = 0xffff << shift;
1525 	val32 = (val32 & ~mask) | (in << shift);
1526 
1527 	rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
1528 	return rc;
1529 }
1530 
1531 static int native_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in)
1532 {
1533 	u64 aligned_off = off & ~0x3L;
1534 	u32 val32, mask, shift;
1535 	int rc;
1536 
1537 	rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
1538 	if (rc)
1539 		return rc;
1540 	shift = (off & 0x3) * 8;
1541 	mask = 0xff << shift;
1542 	val32 = (val32 & ~mask) | (in << shift);
1543 
1544 	rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
1545 	return rc;
1546 }
1547 
1548 const struct cxl_backend_ops cxl_native_ops = {
1549 	.module = THIS_MODULE,
1550 	.adapter_reset = cxl_pci_reset,
1551 	.alloc_one_irq = cxl_pci_alloc_one_irq,
1552 	.release_one_irq = cxl_pci_release_one_irq,
1553 	.alloc_irq_ranges = cxl_pci_alloc_irq_ranges,
1554 	.release_irq_ranges = cxl_pci_release_irq_ranges,
1555 	.setup_irq = cxl_pci_setup_irq,
1556 	.handle_psl_slice_error = native_handle_psl_slice_error,
1557 	.psl_interrupt = NULL,
1558 	.ack_irq = native_ack_irq,
1559 	.irq_wait = native_irq_wait,
1560 	.attach_process = native_attach_process,
1561 	.detach_process = native_detach_process,
1562 	.update_ivtes = native_update_ivtes,
1563 	.support_attributes = native_support_attributes,
1564 	.link_ok = cxl_adapter_link_ok,
1565 	.release_afu = cxl_pci_release_afu,
1566 	.afu_read_err_buffer = cxl_pci_afu_read_err_buffer,
1567 	.afu_check_and_enable = native_afu_check_and_enable,
1568 	.afu_activate_mode = native_afu_activate_mode,
1569 	.afu_deactivate_mode = native_afu_deactivate_mode,
1570 	.afu_reset = native_afu_reset,
1571 	.afu_cr_read8 = native_afu_cr_read8,
1572 	.afu_cr_read16 = native_afu_cr_read16,
1573 	.afu_cr_read32 = native_afu_cr_read32,
1574 	.afu_cr_read64 = native_afu_cr_read64,
1575 	.afu_cr_write8 = native_afu_cr_write8,
1576 	.afu_cr_write16 = native_afu_cr_write16,
1577 	.afu_cr_write32 = native_afu_cr_write32,
1578 	.read_adapter_vpd = cxl_pci_read_adapter_vpd,
1579 };
1580