xref: /openbmc/linux/drivers/misc/cxl/native.c (revision fe329452)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2f204e0b8SIan Munsie /*
3f204e0b8SIan Munsie  * Copyright 2014 IBM Corp.
4f204e0b8SIan Munsie  */
5f204e0b8SIan Munsie 
6f204e0b8SIan Munsie #include <linux/spinlock.h>
7f204e0b8SIan Munsie #include <linux/sched.h>
8e6017571SIngo Molnar #include <linux/sched/clock.h>
9f204e0b8SIan Munsie #include <linux/slab.h>
10f204e0b8SIan Munsie #include <linux/mutex.h>
11f204e0b8SIan Munsie #include <linux/mm.h>
12f204e0b8SIan Munsie #include <linux/uaccess.h>
132bc79ffcSMichael Neuling #include <linux/delay.h>
14*d8d2af70SChristophe Leroy #include <linux/irqdomain.h>
15f204e0b8SIan Munsie #include <asm/synch.h>
16b1db5513SChristophe Lombard #include <asm/switch_to.h>
17ec249dd8SMichael Neuling #include <misc/cxl-base.h>
18f204e0b8SIan Munsie 
19f204e0b8SIan Munsie #include "cxl.h"
209bcf28cdSIan Munsie #include "trace.h"
21f204e0b8SIan Munsie 
afu_control(struct cxl_afu * afu,u64 command,u64 clear,u64 result,u64 mask,bool enabled)225e7823c9SIan Munsie static int afu_control(struct cxl_afu *afu, u64 command, u64 clear,
23f204e0b8SIan Munsie 		       u64 result, u64 mask, bool enabled)
24f204e0b8SIan Munsie {
255e7823c9SIan Munsie 	u64 AFU_Cntl;
26f204e0b8SIan Munsie 	unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
279bcf28cdSIan Munsie 	int rc = 0;
28f204e0b8SIan Munsie 
29f204e0b8SIan Munsie 	spin_lock(&afu->afu_cntl_lock);
30f204e0b8SIan Munsie 	pr_devel("AFU command starting: %llx\n", command);
31f204e0b8SIan Munsie 
329bcf28cdSIan Munsie 	trace_cxl_afu_ctrl(afu, command);
339bcf28cdSIan Munsie 
345e7823c9SIan Munsie 	AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
355e7823c9SIan Munsie 	cxl_p2n_write(afu, CXL_AFU_Cntl_An, (AFU_Cntl & ~clear) | command);
36f204e0b8SIan Munsie 
37f204e0b8SIan Munsie 	AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
38f204e0b8SIan Munsie 	while ((AFU_Cntl & mask) != result) {
39f204e0b8SIan Munsie 		if (time_after_eq(jiffies, timeout)) {
40f204e0b8SIan Munsie 			dev_warn(&afu->dev, "WARNING: AFU control timed out!\n");
419bcf28cdSIan Munsie 			rc = -EBUSY;
429bcf28cdSIan Munsie 			goto out;
43f204e0b8SIan Munsie 		}
440b3f9c75SDaniel Axtens 
450d400f77SChristophe Lombard 		if (!cxl_ops->link_ok(afu->adapter, afu)) {
460b3f9c75SDaniel Axtens 			afu->enabled = enabled;
470b3f9c75SDaniel Axtens 			rc = -EIO;
480b3f9c75SDaniel Axtens 			goto out;
490b3f9c75SDaniel Axtens 		}
500b3f9c75SDaniel Axtens 
51de369538SRasmus Villemoes 		pr_devel_ratelimited("AFU control... (0x%016llx)\n",
52f204e0b8SIan Munsie 				     AFU_Cntl | command);
53f204e0b8SIan Munsie 		cpu_relax();
54f204e0b8SIan Munsie 		AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
553382a622SAndrew Donnellan 	}
562a4f667aSIan Munsie 
572a4f667aSIan Munsie 	if (AFU_Cntl & CXL_AFU_Cntl_An_RA) {
582a4f667aSIan Munsie 		/*
592a4f667aSIan Munsie 		 * Workaround for a bug in the XSL used in the Mellanox CX4
602a4f667aSIan Munsie 		 * that fails to clear the RA bit after an AFU reset,
612a4f667aSIan Munsie 		 * preventing subsequent AFU resets from working.
622a4f667aSIan Munsie 		 */
632a4f667aSIan Munsie 		cxl_p2n_write(afu, CXL_AFU_Cntl_An, AFU_Cntl & ~CXL_AFU_Cntl_An_RA);
642a4f667aSIan Munsie 	}
652a4f667aSIan Munsie 
66f204e0b8SIan Munsie 	pr_devel("AFU command complete: %llx\n", command);
67f204e0b8SIan Munsie 	afu->enabled = enabled;
689bcf28cdSIan Munsie out:
699bcf28cdSIan Munsie 	trace_cxl_afu_ctrl_done(afu, command, rc);
70f204e0b8SIan Munsie 	spin_unlock(&afu->afu_cntl_lock);
71f204e0b8SIan Munsie 
729bcf28cdSIan Munsie 	return rc;
73f204e0b8SIan Munsie }
74f204e0b8SIan Munsie 
afu_enable(struct cxl_afu * afu)75f204e0b8SIan Munsie static int afu_enable(struct cxl_afu *afu)
76f204e0b8SIan Munsie {
77f204e0b8SIan Munsie 	pr_devel("AFU enable request\n");
78f204e0b8SIan Munsie 
795e7823c9SIan Munsie 	return afu_control(afu, CXL_AFU_Cntl_An_E, 0,
80f204e0b8SIan Munsie 			   CXL_AFU_Cntl_An_ES_Enabled,
81f204e0b8SIan Munsie 			   CXL_AFU_Cntl_An_ES_MASK, true);
82f204e0b8SIan Munsie }
83f204e0b8SIan Munsie 
cxl_afu_disable(struct cxl_afu * afu)84f204e0b8SIan Munsie int cxl_afu_disable(struct cxl_afu *afu)
85f204e0b8SIan Munsie {
86f204e0b8SIan Munsie 	pr_devel("AFU disable request\n");
87f204e0b8SIan Munsie 
885e7823c9SIan Munsie 	return afu_control(afu, 0, CXL_AFU_Cntl_An_E,
895e7823c9SIan Munsie 			   CXL_AFU_Cntl_An_ES_Disabled,
90f204e0b8SIan Munsie 			   CXL_AFU_Cntl_An_ES_MASK, false);
91f204e0b8SIan Munsie }
92f204e0b8SIan Munsie 
93f204e0b8SIan Munsie /* This will disable as well as reset */
native_afu_reset(struct cxl_afu * afu)942b04cf31SFrederic Barrat static int native_afu_reset(struct cxl_afu *afu)
95f204e0b8SIan Munsie {
96a715626aSAlastair D'Silva 	int rc;
97a715626aSAlastair D'Silva 	u64 serr;
98a715626aSAlastair D'Silva 
99f204e0b8SIan Munsie 	pr_devel("AFU reset request\n");
100f204e0b8SIan Munsie 
101a715626aSAlastair D'Silva 	rc = afu_control(afu, CXL_AFU_Cntl_An_RA, 0,
102f204e0b8SIan Munsie 			   CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled,
103f204e0b8SIan Munsie 			   CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK,
104f204e0b8SIan Munsie 			   false);
105a715626aSAlastair D'Silva 
106797625deSChristophe Lombard 	/*
107797625deSChristophe Lombard 	 * Re-enable any masked interrupts when the AFU is not
108797625deSChristophe Lombard 	 * activated to avoid side effects after attaching a process
109797625deSChristophe Lombard 	 * in dedicated mode.
110797625deSChristophe Lombard 	 */
111797625deSChristophe Lombard 	if (afu->current_mode == 0) {
112a715626aSAlastair D'Silva 		serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
113a715626aSAlastair D'Silva 		serr &= ~CXL_PSL_SERR_An_IRQ_MASKS;
114a715626aSAlastair D'Silva 		cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
115797625deSChristophe Lombard 	}
116a715626aSAlastair D'Silva 
117a715626aSAlastair D'Silva 	return rc;
118f204e0b8SIan Munsie }
119f204e0b8SIan Munsie 
native_afu_check_and_enable(struct cxl_afu * afu)1202b04cf31SFrederic Barrat static int native_afu_check_and_enable(struct cxl_afu *afu)
121f204e0b8SIan Munsie {
1220d400f77SChristophe Lombard 	if (!cxl_ops->link_ok(afu->adapter, afu)) {
1230b3f9c75SDaniel Axtens 		WARN(1, "Refusing to enable afu while link down!\n");
1240b3f9c75SDaniel Axtens 		return -EIO;
1250b3f9c75SDaniel Axtens 	}
126f204e0b8SIan Munsie 	if (afu->enabled)
127f204e0b8SIan Munsie 		return 0;
128f204e0b8SIan Munsie 	return afu_enable(afu);
129f204e0b8SIan Munsie }
130f204e0b8SIan Munsie 
cxl_psl_purge(struct cxl_afu * afu)131f204e0b8SIan Munsie int cxl_psl_purge(struct cxl_afu *afu)
132f204e0b8SIan Munsie {
133f204e0b8SIan Munsie 	u64 PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
134f204e0b8SIan Munsie 	u64 AFU_Cntl = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
135f204e0b8SIan Munsie 	u64 dsisr, dar;
136f204e0b8SIan Munsie 	u64 start, end;
137f24be42aSChristophe Lombard 	u64 trans_fault = 0x0ULL;
138f204e0b8SIan Munsie 	unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
1399bcf28cdSIan Munsie 	int rc = 0;
1409bcf28cdSIan Munsie 
1419bcf28cdSIan Munsie 	trace_cxl_psl_ctrl(afu, CXL_PSL_SCNTL_An_Pc);
142f204e0b8SIan Munsie 
143f204e0b8SIan Munsie 	pr_devel("PSL purge request\n");
144f204e0b8SIan Munsie 
145797625deSChristophe Lombard 	if (cxl_is_power8())
146f24be42aSChristophe Lombard 		trans_fault = CXL_PSL_DSISR_TRANS;
147797625deSChristophe Lombard 	if (cxl_is_power9())
148f24be42aSChristophe Lombard 		trans_fault = CXL_PSL9_DSISR_An_TF;
149f24be42aSChristophe Lombard 
1500d400f77SChristophe Lombard 	if (!cxl_ops->link_ok(afu->adapter, afu)) {
1510b3f9c75SDaniel Axtens 		dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n");
1520b3f9c75SDaniel Axtens 		rc = -EIO;
1530b3f9c75SDaniel Axtens 		goto out;
1540b3f9c75SDaniel Axtens 	}
1550b3f9c75SDaniel Axtens 
156f204e0b8SIan Munsie 	if ((AFU_Cntl & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
157f204e0b8SIan Munsie 		WARN(1, "psl_purge request while AFU not disabled!\n");
158f204e0b8SIan Munsie 		cxl_afu_disable(afu);
159f204e0b8SIan Munsie 	}
160f204e0b8SIan Munsie 
161f204e0b8SIan Munsie 	cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
162f204e0b8SIan Munsie 		       PSL_CNTL | CXL_PSL_SCNTL_An_Pc);
163f204e0b8SIan Munsie 	start = local_clock();
164f204e0b8SIan Munsie 	PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
165f204e0b8SIan Munsie 	while ((PSL_CNTL &  CXL_PSL_SCNTL_An_Ps_MASK)
166f204e0b8SIan Munsie 			== CXL_PSL_SCNTL_An_Ps_Pending) {
167f204e0b8SIan Munsie 		if (time_after_eq(jiffies, timeout)) {
168f204e0b8SIan Munsie 			dev_warn(&afu->dev, "WARNING: PSL Purge timed out!\n");
1699bcf28cdSIan Munsie 			rc = -EBUSY;
1709bcf28cdSIan Munsie 			goto out;
171f204e0b8SIan Munsie 		}
1720d400f77SChristophe Lombard 		if (!cxl_ops->link_ok(afu->adapter, afu)) {
1730b3f9c75SDaniel Axtens 			rc = -EIO;
1740b3f9c75SDaniel Axtens 			goto out;
1750b3f9c75SDaniel Axtens 		}
1760b3f9c75SDaniel Axtens 
177f204e0b8SIan Munsie 		dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
178abd1d99bSChristophe Lombard 		pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx  PSL_DSISR: 0x%016llx\n",
179abd1d99bSChristophe Lombard 				     PSL_CNTL, dsisr);
180abd1d99bSChristophe Lombard 
181f24be42aSChristophe Lombard 		if (dsisr & trans_fault) {
182f204e0b8SIan Munsie 			dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
183abd1d99bSChristophe Lombard 			dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n",
184abd1d99bSChristophe Lombard 				   dsisr, dar);
185f204e0b8SIan Munsie 			cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
186f204e0b8SIan Munsie 		} else if (dsisr) {
187abd1d99bSChristophe Lombard 			dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n",
188abd1d99bSChristophe Lombard 				   dsisr);
189f204e0b8SIan Munsie 			cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
190f204e0b8SIan Munsie 		} else {
191f204e0b8SIan Munsie 			cpu_relax();
192f204e0b8SIan Munsie 		}
193f204e0b8SIan Munsie 		PSL_CNTL = cxl_p1n_read(afu, CXL_PSL_SCNTL_An);
1943382a622SAndrew Donnellan 	}
195f204e0b8SIan Munsie 	end = local_clock();
196f204e0b8SIan Munsie 	pr_devel("PSL purged in %lld ns\n", end - start);
197f204e0b8SIan Munsie 
198f204e0b8SIan Munsie 	cxl_p1n_write(afu, CXL_PSL_SCNTL_An,
199f204e0b8SIan Munsie 		       PSL_CNTL & ~CXL_PSL_SCNTL_An_Pc);
2009bcf28cdSIan Munsie out:
2019bcf28cdSIan Munsie 	trace_cxl_psl_ctrl_done(afu, CXL_PSL_SCNTL_An_Pc, rc);
2029bcf28cdSIan Munsie 	return rc;
203f204e0b8SIan Munsie }
204f204e0b8SIan Munsie 
spa_max_procs(int spa_size)205f204e0b8SIan Munsie static int spa_max_procs(int spa_size)
206f204e0b8SIan Munsie {
207f204e0b8SIan Munsie 	/*
208f204e0b8SIan Munsie 	 * From the CAIA:
209f204e0b8SIan Munsie 	 *    end_of_SPA_area = SPA_Base + ((n+4) * 128) + (( ((n*8) + 127) >> 7) * 128) + 255
210f204e0b8SIan Munsie 	 * Most of that junk is really just an overly-complicated way of saying
211f204e0b8SIan Munsie 	 * the last 256 bytes are __aligned(128), so it's really:
212f204e0b8SIan Munsie 	 *    end_of_SPA_area = end_of_PSL_queue_area + __aligned(128) 255
213f204e0b8SIan Munsie 	 * and
214f204e0b8SIan Munsie 	 *    end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1
215f204e0b8SIan Munsie 	 * so
216f204e0b8SIan Munsie 	 *    sizeof(SPA) = ((n+4) * 128) + (n*8) + __aligned(128) 256
217f204e0b8SIan Munsie 	 * Ignore the alignment (which is safe in this case as long as we are
218f204e0b8SIan Munsie 	 * careful with our rounding) and solve for n:
219f204e0b8SIan Munsie 	 */
220f204e0b8SIan Munsie 	return ((spa_size / 8) - 96) / 17;
221f204e0b8SIan Munsie }
222f204e0b8SIan Munsie 
cxl_alloc_spa(struct cxl_afu * afu,int mode)223f24be42aSChristophe Lombard static int cxl_alloc_spa(struct cxl_afu *afu, int mode)
224f204e0b8SIan Munsie {
225895a7980SIan Munsie 	unsigned spa_size;
226895a7980SIan Munsie 
227f204e0b8SIan Munsie 	/* Work out how many pages to allocate */
2282224b671SIan Munsie 	afu->native->spa_order = -1;
229f204e0b8SIan Munsie 	do {
230cbffa3a5SChristophe Lombard 		afu->native->spa_order++;
231895a7980SIan Munsie 		spa_size = (1 << afu->native->spa_order) * PAGE_SIZE;
232895a7980SIan Munsie 
233895a7980SIan Munsie 		if (spa_size > 0x100000) {
234895a7980SIan Munsie 			dev_warn(&afu->dev, "num_of_processes too large for the SPA, limiting to %i (0x%x)\n",
235895a7980SIan Munsie 					afu->native->spa_max_procs, afu->native->spa_size);
236f24be42aSChristophe Lombard 			if (mode != CXL_MODE_DEDICATED)
237895a7980SIan Munsie 				afu->num_procs = afu->native->spa_max_procs;
238895a7980SIan Munsie 			break;
239895a7980SIan Munsie 		}
240895a7980SIan Munsie 
241895a7980SIan Munsie 		afu->native->spa_size = spa_size;
242cbffa3a5SChristophe Lombard 		afu->native->spa_max_procs = spa_max_procs(afu->native->spa_size);
243cbffa3a5SChristophe Lombard 	} while (afu->native->spa_max_procs < afu->num_procs);
244f204e0b8SIan Munsie 
245cbffa3a5SChristophe Lombard 	if (!(afu->native->spa = (struct cxl_process_element *)
246cbffa3a5SChristophe Lombard 	      __get_free_pages(GFP_KERNEL | __GFP_ZERO, afu->native->spa_order))) {
247f204e0b8SIan Munsie 		pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n");
248f204e0b8SIan Munsie 		return -ENOMEM;
249f204e0b8SIan Munsie 	}
250f204e0b8SIan Munsie 	pr_devel("spa pages: %i afu->spa_max_procs: %i   afu->num_procs: %i\n",
251cbffa3a5SChristophe Lombard 		 1<<afu->native->spa_order, afu->native->spa_max_procs, afu->num_procs);
252f204e0b8SIan Munsie 
25305155772SDaniel Axtens 	return 0;
25405155772SDaniel Axtens }
25505155772SDaniel Axtens 
attach_spa(struct cxl_afu * afu)25605155772SDaniel Axtens static void attach_spa(struct cxl_afu *afu)
25705155772SDaniel Axtens {
25805155772SDaniel Axtens 	u64 spap;
25905155772SDaniel Axtens 
260cbffa3a5SChristophe Lombard 	afu->native->sw_command_status = (__be64 *)((char *)afu->native->spa +
261cbffa3a5SChristophe Lombard 					    ((afu->native->spa_max_procs + 3) * 128));
262f204e0b8SIan Munsie 
263cbffa3a5SChristophe Lombard 	spap = virt_to_phys(afu->native->spa) & CXL_PSL_SPAP_Addr;
264cbffa3a5SChristophe Lombard 	spap |= ((afu->native->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size;
265f204e0b8SIan Munsie 	spap |= CXL_PSL_SPAP_V;
266cbffa3a5SChristophe Lombard 	pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n",
267cbffa3a5SChristophe Lombard 		afu->native->spa, afu->native->spa_max_procs,
268cbffa3a5SChristophe Lombard 		afu->native->sw_command_status, spap);
269f204e0b8SIan Munsie 	cxl_p1n_write(afu, CXL_PSL_SPAP_An, spap);
270f204e0b8SIan Munsie }
271f204e0b8SIan Munsie 
cxl_release_spa(struct cxl_afu * afu)27205155772SDaniel Axtens void cxl_release_spa(struct cxl_afu *afu)
27305155772SDaniel Axtens {
274cbffa3a5SChristophe Lombard 	if (afu->native->spa) {
275cbffa3a5SChristophe Lombard 		free_pages((unsigned long) afu->native->spa,
276cbffa3a5SChristophe Lombard 			afu->native->spa_order);
277cbffa3a5SChristophe Lombard 		afu->native->spa = NULL;
27805155772SDaniel Axtens 	}
279f204e0b8SIan Munsie }
280f204e0b8SIan Munsie 
281f24be42aSChristophe Lombard /*
282f24be42aSChristophe Lombard  * Invalidation of all ERAT entries is no longer required by CAIA2. Use
283f24be42aSChristophe Lombard  * only for debug.
284f24be42aSChristophe Lombard  */
cxl_invalidate_all_psl9(struct cxl * adapter)285f24be42aSChristophe Lombard int cxl_invalidate_all_psl9(struct cxl *adapter)
286f24be42aSChristophe Lombard {
287f24be42aSChristophe Lombard 	unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
288f24be42aSChristophe Lombard 	u64 ierat;
289f24be42aSChristophe Lombard 
290f24be42aSChristophe Lombard 	pr_devel("CXL adapter - invalidation of all ERAT entries\n");
291f24be42aSChristophe Lombard 
292f24be42aSChristophe Lombard 	/* Invalidates all ERAT entries for Radix or HPT */
293f24be42aSChristophe Lombard 	ierat = CXL_XSL9_IERAT_IALL;
294f24be42aSChristophe Lombard 	if (radix_enabled())
295f24be42aSChristophe Lombard 		ierat |= CXL_XSL9_IERAT_INVR;
296f24be42aSChristophe Lombard 	cxl_p1_write(adapter, CXL_XSL9_IERAT, ierat);
297f24be42aSChristophe Lombard 
298f24be42aSChristophe Lombard 	while (cxl_p1_read(adapter, CXL_XSL9_IERAT) & CXL_XSL9_IERAT_IINPROG) {
299f24be42aSChristophe Lombard 		if (time_after_eq(jiffies, timeout)) {
300f24be42aSChristophe Lombard 			dev_warn(&adapter->dev,
301f24be42aSChristophe Lombard 			"WARNING: CXL adapter invalidation of all ERAT entries timed out!\n");
302f24be42aSChristophe Lombard 			return -EBUSY;
303f24be42aSChristophe Lombard 		}
304f24be42aSChristophe Lombard 		if (!cxl_ops->link_ok(adapter, NULL))
305f24be42aSChristophe Lombard 			return -EIO;
306f24be42aSChristophe Lombard 		cpu_relax();
307f24be42aSChristophe Lombard 	}
308f24be42aSChristophe Lombard 	return 0;
309f24be42aSChristophe Lombard }
310f24be42aSChristophe Lombard 
cxl_invalidate_all_psl8(struct cxl * adapter)31164663f37SChristophe Lombard int cxl_invalidate_all_psl8(struct cxl *adapter)
312f204e0b8SIan Munsie {
313f204e0b8SIan Munsie 	unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
314f204e0b8SIan Munsie 
315f204e0b8SIan Munsie 	pr_devel("CXL adapter wide TLBIA & SLBIA\n");
316f204e0b8SIan Munsie 
317f204e0b8SIan Munsie 	cxl_p1_write(adapter, CXL_PSL_AFUSEL, CXL_PSL_AFUSEL_A);
318f204e0b8SIan Munsie 
319f204e0b8SIan Munsie 	cxl_p1_write(adapter, CXL_PSL_TLBIA, CXL_TLB_SLB_IQ_ALL);
320f204e0b8SIan Munsie 	while (cxl_p1_read(adapter, CXL_PSL_TLBIA) & CXL_TLB_SLB_P) {
321f204e0b8SIan Munsie 		if (time_after_eq(jiffies, timeout)) {
322f204e0b8SIan Munsie 			dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n");
323f204e0b8SIan Munsie 			return -EBUSY;
324f204e0b8SIan Munsie 		}
3250d400f77SChristophe Lombard 		if (!cxl_ops->link_ok(adapter, NULL))
3260b3f9c75SDaniel Axtens 			return -EIO;
327f204e0b8SIan Munsie 		cpu_relax();
328f204e0b8SIan Munsie 	}
329f204e0b8SIan Munsie 
330f204e0b8SIan Munsie 	cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_ALL);
331f204e0b8SIan Munsie 	while (cxl_p1_read(adapter, CXL_PSL_SLBIA) & CXL_TLB_SLB_P) {
332f204e0b8SIan Munsie 		if (time_after_eq(jiffies, timeout)) {
333f204e0b8SIan Munsie 			dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n");
334f204e0b8SIan Munsie 			return -EBUSY;
335f204e0b8SIan Munsie 		}
3360d400f77SChristophe Lombard 		if (!cxl_ops->link_ok(adapter, NULL))
3370b3f9c75SDaniel Axtens 			return -EIO;
338f204e0b8SIan Munsie 		cpu_relax();
339f204e0b8SIan Munsie 	}
340f204e0b8SIan Munsie 	return 0;
341f204e0b8SIan Munsie }
342f204e0b8SIan Munsie 
cxl_data_cache_flush(struct cxl * adapter)343aaa2245eSFrederic Barrat int cxl_data_cache_flush(struct cxl *adapter)
344aaa2245eSFrederic Barrat {
345aaa2245eSFrederic Barrat 	u64 reg;
346aaa2245eSFrederic Barrat 	unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
347aaa2245eSFrederic Barrat 
34894322ed8SVaibhav Jain 	/*
34994322ed8SVaibhav Jain 	 * Do a datacache flush only if datacache is available.
35094322ed8SVaibhav Jain 	 * In case of PSL9D datacache absent hence flush operation.
35194322ed8SVaibhav Jain 	 * would timeout.
35294322ed8SVaibhav Jain 	 */
35394322ed8SVaibhav Jain 	if (adapter->native->no_data_cache) {
35494322ed8SVaibhav Jain 		pr_devel("No PSL data cache. Ignoring cache flush req.\n");
35594322ed8SVaibhav Jain 		return 0;
35694322ed8SVaibhav Jain 	}
357aaa2245eSFrederic Barrat 
35894322ed8SVaibhav Jain 	pr_devel("Flushing data cache\n");
359aaa2245eSFrederic Barrat 	reg = cxl_p1_read(adapter, CXL_PSL_Control);
360aaa2245eSFrederic Barrat 	reg |= CXL_PSL_Control_Fr;
361aaa2245eSFrederic Barrat 	cxl_p1_write(adapter, CXL_PSL_Control, reg);
362aaa2245eSFrederic Barrat 
363aaa2245eSFrederic Barrat 	reg = cxl_p1_read(adapter, CXL_PSL_Control);
364aaa2245eSFrederic Barrat 	while ((reg & CXL_PSL_Control_Fs_MASK) != CXL_PSL_Control_Fs_Complete) {
365aaa2245eSFrederic Barrat 		if (time_after_eq(jiffies, timeout)) {
366aaa2245eSFrederic Barrat 			dev_warn(&adapter->dev, "WARNING: cache flush timed out!\n");
367aaa2245eSFrederic Barrat 			return -EBUSY;
368aaa2245eSFrederic Barrat 		}
369aaa2245eSFrederic Barrat 
370aaa2245eSFrederic Barrat 		if (!cxl_ops->link_ok(adapter, NULL)) {
371aaa2245eSFrederic Barrat 			dev_warn(&adapter->dev, "WARNING: link down when flushing cache\n");
372aaa2245eSFrederic Barrat 			return -EIO;
373aaa2245eSFrederic Barrat 		}
374aaa2245eSFrederic Barrat 		cpu_relax();
375aaa2245eSFrederic Barrat 		reg = cxl_p1_read(adapter, CXL_PSL_Control);
376aaa2245eSFrederic Barrat 	}
377aaa2245eSFrederic Barrat 
378aaa2245eSFrederic Barrat 	reg &= ~CXL_PSL_Control_Fr;
379aaa2245eSFrederic Barrat 	cxl_p1_write(adapter, CXL_PSL_Control, reg);
380aaa2245eSFrederic Barrat 	return 0;
381aaa2245eSFrederic Barrat }
382aaa2245eSFrederic Barrat 
cxl_write_sstp(struct cxl_afu * afu,u64 sstp0,u64 sstp1)383f204e0b8SIan Munsie static int cxl_write_sstp(struct cxl_afu *afu, u64 sstp0, u64 sstp1)
384f204e0b8SIan Munsie {
385f204e0b8SIan Munsie 	int rc;
386f204e0b8SIan Munsie 
387f204e0b8SIan Munsie 	/* 1. Disable SSTP by writing 0 to SSTP1[V] */
388f204e0b8SIan Munsie 	cxl_p2n_write(afu, CXL_SSTP1_An, 0);
389f204e0b8SIan Munsie 
390f204e0b8SIan Munsie 	/* 2. Invalidate all SLB entries */
391f204e0b8SIan Munsie 	if ((rc = cxl_afu_slbia(afu)))
392f204e0b8SIan Munsie 		return rc;
393f204e0b8SIan Munsie 
394f204e0b8SIan Munsie 	/* 3. Set SSTP0_An */
395f204e0b8SIan Munsie 	cxl_p2n_write(afu, CXL_SSTP0_An, sstp0);
396f204e0b8SIan Munsie 
397f204e0b8SIan Munsie 	/* 4. Set SSTP1_An */
398f204e0b8SIan Munsie 	cxl_p2n_write(afu, CXL_SSTP1_An, sstp1);
399f204e0b8SIan Munsie 
400f204e0b8SIan Munsie 	return 0;
401f204e0b8SIan Munsie }
402f204e0b8SIan Munsie 
403f204e0b8SIan Munsie /* Using per slice version may improve performance here. (ie. SLBIA_An) */
slb_invalid(struct cxl_context * ctx)404f204e0b8SIan Munsie static void slb_invalid(struct cxl_context *ctx)
405f204e0b8SIan Munsie {
406f204e0b8SIan Munsie 	struct cxl *adapter = ctx->afu->adapter;
407f204e0b8SIan Munsie 	u64 slbia;
408f204e0b8SIan Munsie 
409cbffa3a5SChristophe Lombard 	WARN_ON(!mutex_is_locked(&ctx->afu->native->spa_mutex));
410f204e0b8SIan Munsie 
411f204e0b8SIan Munsie 	cxl_p1_write(adapter, CXL_PSL_LBISEL,
412f204e0b8SIan Munsie 			((u64)be32_to_cpu(ctx->elem->common.pid) << 32) |
413f204e0b8SIan Munsie 			be32_to_cpu(ctx->elem->lpid));
414f204e0b8SIan Munsie 	cxl_p1_write(adapter, CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID);
415f204e0b8SIan Munsie 
416f204e0b8SIan Munsie 	while (1) {
4170d400f77SChristophe Lombard 		if (!cxl_ops->link_ok(adapter, NULL))
4180b3f9c75SDaniel Axtens 			break;
419f204e0b8SIan Munsie 		slbia = cxl_p1_read(adapter, CXL_PSL_SLBIA);
420f204e0b8SIan Munsie 		if (!(slbia & CXL_TLB_SLB_P))
421f204e0b8SIan Munsie 			break;
422f204e0b8SIan Munsie 		cpu_relax();
423f204e0b8SIan Munsie 	}
424f204e0b8SIan Munsie }
425f204e0b8SIan Munsie 
do_process_element_cmd(struct cxl_context * ctx,u64 cmd,u64 pe_state)426f204e0b8SIan Munsie static int do_process_element_cmd(struct cxl_context *ctx,
427f204e0b8SIan Munsie 				  u64 cmd, u64 pe_state)
428f204e0b8SIan Munsie {
429f204e0b8SIan Munsie 	u64 state;
430a98e6e9fSIan Munsie 	unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
4319bcf28cdSIan Munsie 	int rc = 0;
4329bcf28cdSIan Munsie 
4339bcf28cdSIan Munsie 	trace_cxl_llcmd(ctx, cmd);
434f204e0b8SIan Munsie 
435f204e0b8SIan Munsie 	WARN_ON(!ctx->afu->enabled);
436f204e0b8SIan Munsie 
437f204e0b8SIan Munsie 	ctx->elem->software_state = cpu_to_be32(pe_state);
438f204e0b8SIan Munsie 	smp_wmb();
439cbffa3a5SChristophe Lombard 	*(ctx->afu->native->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe);
440f204e0b8SIan Munsie 	smp_mb();
441f204e0b8SIan Munsie 	cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe);
442f204e0b8SIan Munsie 	while (1) {
443a98e6e9fSIan Munsie 		if (time_after_eq(jiffies, timeout)) {
444a98e6e9fSIan Munsie 			dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n");
4459bcf28cdSIan Munsie 			rc = -EBUSY;
4469bcf28cdSIan Munsie 			goto out;
447a98e6e9fSIan Munsie 		}
4480d400f77SChristophe Lombard 		if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
4490b3f9c75SDaniel Axtens 			dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n");
4500b3f9c75SDaniel Axtens 			rc = -EIO;
4510b3f9c75SDaniel Axtens 			goto out;
4520b3f9c75SDaniel Axtens 		}
453cbffa3a5SChristophe Lombard 		state = be64_to_cpup(ctx->afu->native->sw_command_status);
454f204e0b8SIan Munsie 		if (state == ~0ULL) {
455f204e0b8SIan Munsie 			pr_err("cxl: Error adding process element to AFU\n");
4569bcf28cdSIan Munsie 			rc = -1;
4579bcf28cdSIan Munsie 			goto out;
458f204e0b8SIan Munsie 		}
459f204e0b8SIan Munsie 		if ((state & (CXL_SPA_SW_CMD_MASK | CXL_SPA_SW_STATE_MASK  | CXL_SPA_SW_LINK_MASK)) ==
460f204e0b8SIan Munsie 		    (cmd | (cmd >> 16) | ctx->pe))
461f204e0b8SIan Munsie 			break;
462f204e0b8SIan Munsie 		/*
463f204e0b8SIan Munsie 		 * The command won't finish in the PSL if there are
464f204e0b8SIan Munsie 		 * outstanding DSIs.  Hence we need to yield here in
465f204e0b8SIan Munsie 		 * case there are outstanding DSIs that we need to
466f204e0b8SIan Munsie 		 * service.  Tuning possiblity: we could wait for a
467f204e0b8SIan Munsie 		 * while before sched
468f204e0b8SIan Munsie 		 */
469f204e0b8SIan Munsie 		schedule();
470f204e0b8SIan Munsie 
471f204e0b8SIan Munsie 	}
4729bcf28cdSIan Munsie out:
4739bcf28cdSIan Munsie 	trace_cxl_llcmd_done(ctx, cmd, rc);
4749bcf28cdSIan Munsie 	return rc;
475f204e0b8SIan Munsie }
476f204e0b8SIan Munsie 
add_process_element(struct cxl_context * ctx)477f204e0b8SIan Munsie static int add_process_element(struct cxl_context *ctx)
478f204e0b8SIan Munsie {
479f204e0b8SIan Munsie 	int rc = 0;
480f204e0b8SIan Munsie 
481cbffa3a5SChristophe Lombard 	mutex_lock(&ctx->afu->native->spa_mutex);
482f204e0b8SIan Munsie 	pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe);
483f204e0b8SIan Munsie 	if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V)))
484f204e0b8SIan Munsie 		ctx->pe_inserted = true;
485f204e0b8SIan Munsie 	pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe);
486cbffa3a5SChristophe Lombard 	mutex_unlock(&ctx->afu->native->spa_mutex);
487f204e0b8SIan Munsie 	return rc;
488f204e0b8SIan Munsie }
489f204e0b8SIan Munsie 
terminate_process_element(struct cxl_context * ctx)490f204e0b8SIan Munsie static int terminate_process_element(struct cxl_context *ctx)
491f204e0b8SIan Munsie {
492f204e0b8SIan Munsie 	int rc = 0;
493f204e0b8SIan Munsie 
494f204e0b8SIan Munsie 	/* fast path terminate if it's already invalid */
495f204e0b8SIan Munsie 	if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V)))
496f204e0b8SIan Munsie 		return rc;
497f204e0b8SIan Munsie 
498cbffa3a5SChristophe Lombard 	mutex_lock(&ctx->afu->native->spa_mutex);
499f204e0b8SIan Munsie 	pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe);
5000b3f9c75SDaniel Axtens 	/* We could be asked to terminate when the hw is down. That
5010b3f9c75SDaniel Axtens 	 * should always succeed: it's not running if the hw has gone
5020b3f9c75SDaniel Axtens 	 * away and is being reset.
5030b3f9c75SDaniel Axtens 	 */
5040d400f77SChristophe Lombard 	if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
505f204e0b8SIan Munsie 		rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE,
506f204e0b8SIan Munsie 					    CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T);
507f204e0b8SIan Munsie 	ctx->elem->software_state = 0;	/* Remove Valid bit */
508f204e0b8SIan Munsie 	pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe);
509cbffa3a5SChristophe Lombard 	mutex_unlock(&ctx->afu->native->spa_mutex);
510f204e0b8SIan Munsie 	return rc;
511f204e0b8SIan Munsie }
512f204e0b8SIan Munsie 
remove_process_element(struct cxl_context * ctx)513f204e0b8SIan Munsie static int remove_process_element(struct cxl_context *ctx)
514f204e0b8SIan Munsie {
515f204e0b8SIan Munsie 	int rc = 0;
516f204e0b8SIan Munsie 
517cbffa3a5SChristophe Lombard 	mutex_lock(&ctx->afu->native->spa_mutex);
518f204e0b8SIan Munsie 	pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe);
5190b3f9c75SDaniel Axtens 
5200b3f9c75SDaniel Axtens 	/* We could be asked to remove when the hw is down. Again, if
5210b3f9c75SDaniel Axtens 	 * the hw is down, the PE is gone, so we succeed.
5220b3f9c75SDaniel Axtens 	 */
5230d400f77SChristophe Lombard 	if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu))
5240b3f9c75SDaniel Axtens 		rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0);
5250b3f9c75SDaniel Axtens 
5260b3f9c75SDaniel Axtens 	if (!rc)
527f204e0b8SIan Munsie 		ctx->pe_inserted = false;
528abd1d99bSChristophe Lombard 	if (cxl_is_power8())
529f204e0b8SIan Munsie 		slb_invalid(ctx);
530f204e0b8SIan Munsie 	pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe);
531cbffa3a5SChristophe Lombard 	mutex_unlock(&ctx->afu->native->spa_mutex);
532f204e0b8SIan Munsie 
533f204e0b8SIan Munsie 	return rc;
534f204e0b8SIan Munsie }
535f204e0b8SIan Munsie 
cxl_assign_psn_space(struct cxl_context * ctx)5361a1a94b8SMichael Neuling void cxl_assign_psn_space(struct cxl_context *ctx)
537f204e0b8SIan Munsie {
538f204e0b8SIan Munsie 	if (!ctx->afu->pp_size || ctx->master) {
539f204e0b8SIan Munsie 		ctx->psn_phys = ctx->afu->psn_phys;
540f204e0b8SIan Munsie 		ctx->psn_size = ctx->afu->adapter->ps_size;
541f204e0b8SIan Munsie 	} else {
542f204e0b8SIan Munsie 		ctx->psn_phys = ctx->afu->psn_phys +
543cbffa3a5SChristophe Lombard 			(ctx->afu->native->pp_offset + ctx->afu->pp_size * ctx->pe);
544f204e0b8SIan Munsie 		ctx->psn_size = ctx->afu->pp_size;
545f204e0b8SIan Munsie 	}
546f204e0b8SIan Munsie }
547f204e0b8SIan Munsie 
activate_afu_directed(struct cxl_afu * afu)548f204e0b8SIan Munsie static int activate_afu_directed(struct cxl_afu *afu)
549f204e0b8SIan Munsie {
550f204e0b8SIan Munsie 	int rc;
551f204e0b8SIan Munsie 
552f204e0b8SIan Munsie 	dev_info(&afu->dev, "Activating AFU directed mode\n");
553f204e0b8SIan Munsie 
5544108efb0SChristophe Lombard 	afu->num_procs = afu->max_procs_virtualised;
555cbffa3a5SChristophe Lombard 	if (afu->native->spa == NULL) {
556f24be42aSChristophe Lombard 		if (cxl_alloc_spa(afu, CXL_MODE_DIRECTED))
557f204e0b8SIan Munsie 			return -ENOMEM;
55805155772SDaniel Axtens 	}
55905155772SDaniel Axtens 	attach_spa(afu);
560f204e0b8SIan Munsie 
561f204e0b8SIan Munsie 	cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU);
562abd1d99bSChristophe Lombard 	if (cxl_is_power8())
563f204e0b8SIan Munsie 		cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
564f204e0b8SIan Munsie 	cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
565f204e0b8SIan Munsie 
566f204e0b8SIan Munsie 	afu->current_mode = CXL_MODE_DIRECTED;
567f204e0b8SIan Munsie 
568f204e0b8SIan Munsie 	if ((rc = cxl_chardev_m_afu_add(afu)))
569f204e0b8SIan Munsie 		return rc;
570f204e0b8SIan Munsie 
571f204e0b8SIan Munsie 	if ((rc = cxl_sysfs_afu_m_add(afu)))
572f204e0b8SIan Munsie 		goto err;
573f204e0b8SIan Munsie 
574f204e0b8SIan Munsie 	if ((rc = cxl_chardev_s_afu_add(afu)))
575f204e0b8SIan Munsie 		goto err1;
576f204e0b8SIan Munsie 
577f204e0b8SIan Munsie 	return 0;
578f204e0b8SIan Munsie err1:
579f204e0b8SIan Munsie 	cxl_sysfs_afu_m_remove(afu);
580f204e0b8SIan Munsie err:
581f204e0b8SIan Munsie 	cxl_chardev_afu_remove(afu);
582f204e0b8SIan Munsie 	return rc;
583f204e0b8SIan Munsie }
584f204e0b8SIan Munsie 
585f204e0b8SIan Munsie #ifdef CONFIG_CPU_LITTLE_ENDIAN
586f204e0b8SIan Munsie #define set_endian(sr) ((sr) |= CXL_PSL_SR_An_LE)
587f204e0b8SIan Munsie #else
588f204e0b8SIan Munsie #define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE))
589f204e0b8SIan Munsie #endif
590f204e0b8SIan Munsie 
cxl_calculate_sr(bool master,bool kernel,bool real_mode,bool p9)5913ced8d73SChristophe Lombard u64 cxl_calculate_sr(bool master, bool kernel, bool real_mode, bool p9)
5922f663527SMichael Neuling {
5932f663527SMichael Neuling 	u64 sr = 0;
5942f663527SMichael Neuling 
595e606e035SFrederic Barrat 	set_endian(sr);
5963ced8d73SChristophe Lombard 	if (master)
5972f663527SMichael Neuling 		sr |= CXL_PSL_SR_An_MP;
5982f663527SMichael Neuling 	if (mfspr(SPRN_LPCR) & LPCR_TC)
5992f663527SMichael Neuling 		sr |= CXL_PSL_SR_An_TC;
600c5828150SAlastair D'Silva 
6013ced8d73SChristophe Lombard 	if (kernel) {
6023ced8d73SChristophe Lombard 		if (!real_mode)
6037a0d85d3SIan Munsie 			sr |= CXL_PSL_SR_An_R;
6047a0d85d3SIan Munsie 		sr |= (mfmsr() & MSR_SF) | CXL_PSL_SR_An_HV;
6052f663527SMichael Neuling 	} else {
6062f663527SMichael Neuling 		sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R;
607f24be42aSChristophe Lombard 		if (radix_enabled())
608f24be42aSChristophe Lombard 			sr |= CXL_PSL_SR_An_HV;
609f24be42aSChristophe Lombard 		else
6102f663527SMichael Neuling 			sr &= ~(CXL_PSL_SR_An_HV);
6112f663527SMichael Neuling 		if (!test_tsk_thread_flag(current, TIF_32BIT))
6122f663527SMichael Neuling 			sr |= CXL_PSL_SR_An_SF;
6132f663527SMichael Neuling 	}
6143ced8d73SChristophe Lombard 	if (p9) {
615f24be42aSChristophe Lombard 		if (radix_enabled())
616f24be42aSChristophe Lombard 			sr |= CXL_PSL_SR_An_XLAT_ror;
617f24be42aSChristophe Lombard 		else
618f24be42aSChristophe Lombard 			sr |= CXL_PSL_SR_An_XLAT_hpt;
619f24be42aSChristophe Lombard 	}
6202f663527SMichael Neuling 	return sr;
6212f663527SMichael Neuling }
6222f663527SMichael Neuling 
calculate_sr(struct cxl_context * ctx)6233ced8d73SChristophe Lombard static u64 calculate_sr(struct cxl_context *ctx)
6243ced8d73SChristophe Lombard {
625c5828150SAlastair D'Silva 	return cxl_calculate_sr(ctx->master, ctx->kernel, false,
6263ced8d73SChristophe Lombard 				cxl_is_power9());
6273ced8d73SChristophe Lombard }
6283ced8d73SChristophe Lombard 
update_ivtes_directed(struct cxl_context * ctx)629292841b0SIan Munsie static void update_ivtes_directed(struct cxl_context *ctx)
630292841b0SIan Munsie {
631292841b0SIan Munsie 	bool need_update = (ctx->status == STARTED);
632292841b0SIan Munsie 	int r;
633292841b0SIan Munsie 
634292841b0SIan Munsie 	if (need_update) {
635292841b0SIan Munsie 		WARN_ON(terminate_process_element(ctx));
636292841b0SIan Munsie 		WARN_ON(remove_process_element(ctx));
637292841b0SIan Munsie 	}
638292841b0SIan Munsie 
639292841b0SIan Munsie 	for (r = 0; r < CXL_IRQ_RANGES; r++) {
640292841b0SIan Munsie 		ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
641292841b0SIan Munsie 		ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
642292841b0SIan Munsie 	}
643292841b0SIan Munsie 
644292841b0SIan Munsie 	/*
645292841b0SIan Munsie 	 * Theoretically we could use the update llcmd, instead of a
646292841b0SIan Munsie 	 * terminate/remove/add (or if an atomic update was required we could
647292841b0SIan Munsie 	 * do a suspend/update/resume), however it seems there might be issues
648292841b0SIan Munsie 	 * with the update llcmd on some cards (including those using an XSL on
649292841b0SIan Munsie 	 * an ASIC) so for now it's safest to go with the commands that are
650292841b0SIan Munsie 	 * known to work. In the future if we come across a situation where the
651292841b0SIan Munsie 	 * card may be performing transactions using the same PE while we are
652292841b0SIan Munsie 	 * doing this update we might need to revisit this.
653292841b0SIan Munsie 	 */
654292841b0SIan Munsie 	if (need_update)
655292841b0SIan Munsie 		WARN_ON(add_process_element(ctx));
656292841b0SIan Munsie }
657292841b0SIan Munsie 
process_element_entry_psl9(struct cxl_context * ctx,u64 wed,u64 amr)658f24be42aSChristophe Lombard static int process_element_entry_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
659f24be42aSChristophe Lombard {
660f24be42aSChristophe Lombard 	u32 pid;
661b1db5513SChristophe Lombard 	int rc;
662f24be42aSChristophe Lombard 
663f24be42aSChristophe Lombard 	cxl_assign_psn_space(ctx);
664f24be42aSChristophe Lombard 
665f24be42aSChristophe Lombard 	ctx->elem->ctxtime = 0; /* disable */
666f24be42aSChristophe Lombard 	ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
667f24be42aSChristophe Lombard 	ctx->elem->haurp = 0; /* disable */
668f24be42aSChristophe Lombard 
669f24be42aSChristophe Lombard 	if (ctx->kernel)
670f24be42aSChristophe Lombard 		pid = 0;
671f24be42aSChristophe Lombard 	else {
672f24be42aSChristophe Lombard 		if (ctx->mm == NULL) {
673f24be42aSChristophe Lombard 			pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
674f24be42aSChristophe Lombard 				__func__, ctx->pe, pid_nr(ctx->pid));
675f24be42aSChristophe Lombard 			return -EINVAL;
676f24be42aSChristophe Lombard 		}
677f24be42aSChristophe Lombard 		pid = ctx->mm->context.id;
678f24be42aSChristophe Lombard 	}
679f24be42aSChristophe Lombard 
680b1db5513SChristophe Lombard 	/* Assign a unique TIDR (thread id) for the current thread */
681b1db5513SChristophe Lombard 	if (!(ctx->tidr) && (ctx->assign_tidr)) {
682b1db5513SChristophe Lombard 		rc = set_thread_tidr(current);
683b1db5513SChristophe Lombard 		if (rc)
684b1db5513SChristophe Lombard 			return -ENODEV;
685b1db5513SChristophe Lombard 		ctx->tidr = current->thread.tidr;
686b1db5513SChristophe Lombard 		pr_devel("%s: current tidr: %d\n", __func__, ctx->tidr);
687b1db5513SChristophe Lombard 	}
688b1db5513SChristophe Lombard 
689b1db5513SChristophe Lombard 	ctx->elem->common.tid = cpu_to_be32(ctx->tidr);
690f24be42aSChristophe Lombard 	ctx->elem->common.pid = cpu_to_be32(pid);
691f24be42aSChristophe Lombard 
692f24be42aSChristophe Lombard 	ctx->elem->sr = cpu_to_be64(calculate_sr(ctx));
693f24be42aSChristophe Lombard 
694f24be42aSChristophe Lombard 	ctx->elem->common.csrp = 0; /* disable */
695f24be42aSChristophe Lombard 
696f24be42aSChristophe Lombard 	cxl_prefault(ctx, wed);
697f24be42aSChristophe Lombard 
698f24be42aSChristophe Lombard 	/*
699f24be42aSChristophe Lombard 	 * Ensure we have the multiplexed PSL interrupt set up to take faults
700f24be42aSChristophe Lombard 	 * for kernel contexts that may not have allocated any AFU IRQs at all:
701f24be42aSChristophe Lombard 	 */
702f24be42aSChristophe Lombard 	if (ctx->irqs.range[0] == 0) {
703f24be42aSChristophe Lombard 		ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
704f24be42aSChristophe Lombard 		ctx->irqs.range[0] = 1;
705f24be42aSChristophe Lombard 	}
706f24be42aSChristophe Lombard 
707f24be42aSChristophe Lombard 	ctx->elem->common.amr = cpu_to_be64(amr);
708f24be42aSChristophe Lombard 	ctx->elem->common.wed = cpu_to_be64(wed);
709f24be42aSChristophe Lombard 
710f24be42aSChristophe Lombard 	return 0;
711f24be42aSChristophe Lombard }
712f24be42aSChristophe Lombard 
cxl_attach_afu_directed_psl9(struct cxl_context * ctx,u64 wed,u64 amr)713f24be42aSChristophe Lombard int cxl_attach_afu_directed_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
714f24be42aSChristophe Lombard {
715f24be42aSChristophe Lombard 	int result;
716f24be42aSChristophe Lombard 
717f24be42aSChristophe Lombard 	/* fill the process element entry */
718f24be42aSChristophe Lombard 	result = process_element_entry_psl9(ctx, wed, amr);
719f24be42aSChristophe Lombard 	if (result)
720f24be42aSChristophe Lombard 		return result;
721f24be42aSChristophe Lombard 
722f24be42aSChristophe Lombard 	update_ivtes_directed(ctx);
723f24be42aSChristophe Lombard 
724f24be42aSChristophe Lombard 	/* first guy needs to enable */
725f24be42aSChristophe Lombard 	result = cxl_ops->afu_check_and_enable(ctx->afu);
726f24be42aSChristophe Lombard 	if (result)
727f24be42aSChristophe Lombard 		return result;
728f24be42aSChristophe Lombard 
729f24be42aSChristophe Lombard 	return add_process_element(ctx);
730f24be42aSChristophe Lombard }
731f24be42aSChristophe Lombard 
cxl_attach_afu_directed_psl8(struct cxl_context * ctx,u64 wed,u64 amr)73264663f37SChristophe Lombard int cxl_attach_afu_directed_psl8(struct cxl_context *ctx, u64 wed, u64 amr)
733f204e0b8SIan Munsie {
7342f663527SMichael Neuling 	u32 pid;
735292841b0SIan Munsie 	int result;
736f204e0b8SIan Munsie 
7371a1a94b8SMichael Neuling 	cxl_assign_psn_space(ctx);
738f204e0b8SIan Munsie 
739f204e0b8SIan Munsie 	ctx->elem->ctxtime = 0; /* disable */
740f204e0b8SIan Munsie 	ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID));
741f204e0b8SIan Munsie 	ctx->elem->haurp = 0; /* disable */
742f24be42aSChristophe Lombard 	ctx->elem->u.sdr = cpu_to_be64(mfspr(SPRN_SDR1));
743f204e0b8SIan Munsie 
7442f663527SMichael Neuling 	pid = current->pid;
7452f663527SMichael Neuling 	if (ctx->kernel)
7462f663527SMichael Neuling 		pid = 0;
747f204e0b8SIan Munsie 	ctx->elem->common.tid = 0;
7482f663527SMichael Neuling 	ctx->elem->common.pid = cpu_to_be32(pid);
7492f663527SMichael Neuling 
7502f663527SMichael Neuling 	ctx->elem->sr = cpu_to_be64(calculate_sr(ctx));
751f204e0b8SIan Munsie 
752f204e0b8SIan Munsie 	ctx->elem->common.csrp = 0; /* disable */
753f24be42aSChristophe Lombard 	ctx->elem->common.u.psl8.aurp0 = 0; /* disable */
754f24be42aSChristophe Lombard 	ctx->elem->common.u.psl8.aurp1 = 0; /* disable */
755f204e0b8SIan Munsie 
756f204e0b8SIan Munsie 	cxl_prefault(ctx, wed);
757f204e0b8SIan Munsie 
758f24be42aSChristophe Lombard 	ctx->elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0);
759f24be42aSChristophe Lombard 	ctx->elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1);
760f204e0b8SIan Munsie 
7613c206fa7SIan Munsie 	/*
7623c206fa7SIan Munsie 	 * Ensure we have the multiplexed PSL interrupt set up to take faults
7633c206fa7SIan Munsie 	 * for kernel contexts that may not have allocated any AFU IRQs at all:
7643c206fa7SIan Munsie 	 */
7653c206fa7SIan Munsie 	if (ctx->irqs.range[0] == 0) {
7663c206fa7SIan Munsie 		ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq;
7673c206fa7SIan Munsie 		ctx->irqs.range[0] = 1;
7683c206fa7SIan Munsie 	}
7693c206fa7SIan Munsie 
770292841b0SIan Munsie 	update_ivtes_directed(ctx);
771f204e0b8SIan Munsie 
772f204e0b8SIan Munsie 	ctx->elem->common.amr = cpu_to_be64(amr);
773f204e0b8SIan Munsie 	ctx->elem->common.wed = cpu_to_be64(wed);
774f204e0b8SIan Munsie 
775f204e0b8SIan Munsie 	/* first guy needs to enable */
7765be587b1SFrederic Barrat 	if ((result = cxl_ops->afu_check_and_enable(ctx->afu)))
777f204e0b8SIan Munsie 		return result;
778f204e0b8SIan Munsie 
779368857c1SDaniel Axtens 	return add_process_element(ctx);
780f204e0b8SIan Munsie }
781f204e0b8SIan Munsie 
deactivate_afu_directed(struct cxl_afu * afu)782f204e0b8SIan Munsie static int deactivate_afu_directed(struct cxl_afu *afu)
783f204e0b8SIan Munsie {
784f204e0b8SIan Munsie 	dev_info(&afu->dev, "Deactivating AFU directed mode\n");
785f204e0b8SIan Munsie 
786f204e0b8SIan Munsie 	afu->current_mode = 0;
787f204e0b8SIan Munsie 	afu->num_procs = 0;
788f204e0b8SIan Munsie 
789f204e0b8SIan Munsie 	cxl_sysfs_afu_m_remove(afu);
790f204e0b8SIan Munsie 	cxl_chardev_afu_remove(afu);
791f204e0b8SIan Munsie 
7925e7823c9SIan Munsie 	/*
7935e7823c9SIan Munsie 	 * The CAIA section 2.2.1 indicates that the procedure for starting and
7945e7823c9SIan Munsie 	 * stopping an AFU in AFU directed mode is AFU specific, which is not
7955e7823c9SIan Munsie 	 * ideal since this code is generic and with one exception has no
7965e7823c9SIan Munsie 	 * knowledge of the AFU. This is in contrast to the procedure for
7975e7823c9SIan Munsie 	 * disabling a dedicated process AFU, which is documented to just
7985e7823c9SIan Munsie 	 * require a reset. The architecture does indicate that both an AFU
7995e7823c9SIan Munsie 	 * reset and an AFU disable should result in the AFU being disabled and
8005e7823c9SIan Munsie 	 * we do both followed by a PSL purge for safety.
8015e7823c9SIan Munsie 	 *
8025e7823c9SIan Munsie 	 * Notably we used to have some issues with the disable sequence on PSL
8035e7823c9SIan Munsie 	 * cards, which is why we ended up using this heavy weight procedure in
8045e7823c9SIan Munsie 	 * the first place, however a bug was discovered that had rendered the
8055e7823c9SIan Munsie 	 * disable operation ineffective, so it is conceivable that was the
8065e7823c9SIan Munsie 	 * sole explanation for those difficulties. Careful regression testing
8075e7823c9SIan Munsie 	 * is recommended if anyone attempts to remove or reorder these
8085e7823c9SIan Munsie 	 * operations.
8095e7823c9SIan Munsie 	 *
8105e7823c9SIan Munsie 	 * The XSL on the Mellanox CX4 behaves a little differently from the
8115e7823c9SIan Munsie 	 * PSL based cards and will time out an AFU reset if the AFU is still
8125e7823c9SIan Munsie 	 * enabled. That card is special in that we do have a means to identify
8135e7823c9SIan Munsie 	 * it from this code, so in that case we skip the reset and just use a
8145e7823c9SIan Munsie 	 * disable/purge to avoid the timeout and corresponding noise in the
8155e7823c9SIan Munsie 	 * kernel log.
8165e7823c9SIan Munsie 	 */
8175e7823c9SIan Munsie 	if (afu->adapter->native->sl_ops->needs_reset_before_disable)
8185be587b1SFrederic Barrat 		cxl_ops->afu_reset(afu);
819f204e0b8SIan Munsie 	cxl_afu_disable(afu);
820f204e0b8SIan Munsie 	cxl_psl_purge(afu);
821f204e0b8SIan Munsie 
822f204e0b8SIan Munsie 	return 0;
823f204e0b8SIan Munsie }
824f204e0b8SIan Munsie 
cxl_activate_dedicated_process_psl9(struct cxl_afu * afu)825f24be42aSChristophe Lombard int cxl_activate_dedicated_process_psl9(struct cxl_afu *afu)
826f24be42aSChristophe Lombard {
827f24be42aSChristophe Lombard 	dev_info(&afu->dev, "Activating dedicated process mode\n");
828f24be42aSChristophe Lombard 
829f24be42aSChristophe Lombard 	/*
830f24be42aSChristophe Lombard 	 * If XSL is set to dedicated mode (Set in PSL_SCNTL reg), the
831f24be42aSChristophe Lombard 	 * XSL and AFU are programmed to work with a single context.
832f24be42aSChristophe Lombard 	 * The context information should be configured in the SPA area
833f24be42aSChristophe Lombard 	 * index 0 (so PSL_SPAP must be configured before enabling the
834f24be42aSChristophe Lombard 	 * AFU).
835f24be42aSChristophe Lombard 	 */
836f24be42aSChristophe Lombard 	afu->num_procs = 1;
837f24be42aSChristophe Lombard 	if (afu->native->spa == NULL) {
838f24be42aSChristophe Lombard 		if (cxl_alloc_spa(afu, CXL_MODE_DEDICATED))
839f24be42aSChristophe Lombard 			return -ENOMEM;
840f24be42aSChristophe Lombard 	}
841f24be42aSChristophe Lombard 	attach_spa(afu);
842f24be42aSChristophe Lombard 
843f24be42aSChristophe Lombard 	cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process);
844f24be42aSChristophe Lombard 	cxl_p1n_write(afu, CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L);
845f24be42aSChristophe Lombard 
846f24be42aSChristophe Lombard 	afu->current_mode = CXL_MODE_DEDICATED;
847f24be42aSChristophe Lombard 
848f24be42aSChristophe Lombard 	return cxl_chardev_d_afu_add(afu);
849f24be42aSChristophe Lombard }
850f24be42aSChristophe Lombard 
cxl_activate_dedicated_process_psl8(struct cxl_afu * afu)85164663f37SChristophe Lombard int cxl_activate_dedicated_process_psl8(struct cxl_afu *afu)
852f204e0b8SIan Munsie {
853f204e0b8SIan Munsie 	dev_info(&afu->dev, "Activating dedicated process mode\n");
854f204e0b8SIan Munsie 
855f204e0b8SIan Munsie 	cxl_p1n_write(afu, CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process);
856f204e0b8SIan Munsie 
857f204e0b8SIan Munsie 	cxl_p1n_write(afu, CXL_PSL_CtxTime_An, 0); /* disable */
858f204e0b8SIan Munsie 	cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0);    /* disable */
859f204e0b8SIan Munsie 	cxl_p1n_write(afu, CXL_PSL_AMOR_An, 0xFFFFFFFFFFFFFFFFULL);
860f204e0b8SIan Munsie 	cxl_p1n_write(afu, CXL_PSL_LPID_An, mfspr(SPRN_LPID));
861f204e0b8SIan Munsie 	cxl_p1n_write(afu, CXL_HAURP_An, 0);       /* disable */
862f204e0b8SIan Munsie 	cxl_p1n_write(afu, CXL_PSL_SDR_An, mfspr(SPRN_SDR1));
863f204e0b8SIan Munsie 
864f204e0b8SIan Munsie 	cxl_p2n_write(afu, CXL_CSRP_An, 0);        /* disable */
865f204e0b8SIan Munsie 	cxl_p2n_write(afu, CXL_AURP0_An, 0);       /* disable */
866f204e0b8SIan Munsie 	cxl_p2n_write(afu, CXL_AURP1_An, 0);       /* disable */
867f204e0b8SIan Munsie 
868f204e0b8SIan Munsie 	afu->current_mode = CXL_MODE_DEDICATED;
869f204e0b8SIan Munsie 	afu->num_procs = 1;
870f204e0b8SIan Munsie 
871f204e0b8SIan Munsie 	return cxl_chardev_d_afu_add(afu);
872f204e0b8SIan Munsie }
873f204e0b8SIan Munsie 
cxl_update_dedicated_ivtes_psl9(struct cxl_context * ctx)874f24be42aSChristophe Lombard void cxl_update_dedicated_ivtes_psl9(struct cxl_context *ctx)
875f24be42aSChristophe Lombard {
876f24be42aSChristophe Lombard 	int r;
877f24be42aSChristophe Lombard 
878f24be42aSChristophe Lombard 	for (r = 0; r < CXL_IRQ_RANGES; r++) {
879f24be42aSChristophe Lombard 		ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]);
880f24be42aSChristophe Lombard 		ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]);
881f24be42aSChristophe Lombard 	}
882f24be42aSChristophe Lombard }
883f24be42aSChristophe Lombard 
cxl_update_dedicated_ivtes_psl8(struct cxl_context * ctx)88464663f37SChristophe Lombard void cxl_update_dedicated_ivtes_psl8(struct cxl_context *ctx)
885292841b0SIan Munsie {
886292841b0SIan Munsie 	struct cxl_afu *afu = ctx->afu;
887292841b0SIan Munsie 
888292841b0SIan Munsie 	cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An,
889292841b0SIan Munsie 		       (((u64)ctx->irqs.offset[0] & 0xffff) << 48) |
890292841b0SIan Munsie 		       (((u64)ctx->irqs.offset[1] & 0xffff) << 32) |
891292841b0SIan Munsie 		       (((u64)ctx->irqs.offset[2] & 0xffff) << 16) |
892292841b0SIan Munsie 			((u64)ctx->irqs.offset[3] & 0xffff));
893292841b0SIan Munsie 	cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, (u64)
894292841b0SIan Munsie 		       (((u64)ctx->irqs.range[0] & 0xffff) << 48) |
895292841b0SIan Munsie 		       (((u64)ctx->irqs.range[1] & 0xffff) << 32) |
896292841b0SIan Munsie 		       (((u64)ctx->irqs.range[2] & 0xffff) << 16) |
897292841b0SIan Munsie 			((u64)ctx->irqs.range[3] & 0xffff));
898292841b0SIan Munsie }
899292841b0SIan Munsie 
cxl_attach_dedicated_process_psl9(struct cxl_context * ctx,u64 wed,u64 amr)900f24be42aSChristophe Lombard int cxl_attach_dedicated_process_psl9(struct cxl_context *ctx, u64 wed, u64 amr)
901f24be42aSChristophe Lombard {
902f24be42aSChristophe Lombard 	struct cxl_afu *afu = ctx->afu;
903f24be42aSChristophe Lombard 	int result;
904f24be42aSChristophe Lombard 
905f24be42aSChristophe Lombard 	/* fill the process element entry */
906f24be42aSChristophe Lombard 	result = process_element_entry_psl9(ctx, wed, amr);
907f24be42aSChristophe Lombard 	if (result)
908f24be42aSChristophe Lombard 		return result;
909f24be42aSChristophe Lombard 
910f24be42aSChristophe Lombard 	if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes)
911f24be42aSChristophe Lombard 		afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
912f24be42aSChristophe Lombard 
9138512bffdSVaibhav Jain 	ctx->elem->software_state = cpu_to_be32(CXL_PE_SOFTWARE_STATE_V);
9148512bffdSVaibhav Jain 	/*
9158512bffdSVaibhav Jain 	 * Ideally we should do a wmb() here to make sure the changes to the
9168512bffdSVaibhav Jain 	 * PE are visible to the card before we call afu_enable.
9178512bffdSVaibhav Jain 	 * On ppc64 though all mmios are preceded by a 'sync' instruction hence
9188512bffdSVaibhav Jain 	 * we dont dont need one here.
9198512bffdSVaibhav Jain 	 */
9208512bffdSVaibhav Jain 
921f24be42aSChristophe Lombard 	result = cxl_ops->afu_reset(afu);
922f24be42aSChristophe Lombard 	if (result)
923f24be42aSChristophe Lombard 		return result;
924f24be42aSChristophe Lombard 
925f24be42aSChristophe Lombard 	return afu_enable(afu);
926f24be42aSChristophe Lombard }
927f24be42aSChristophe Lombard 
cxl_attach_dedicated_process_psl8(struct cxl_context * ctx,u64 wed,u64 amr)92864663f37SChristophe Lombard int cxl_attach_dedicated_process_psl8(struct cxl_context *ctx, u64 wed, u64 amr)
929f204e0b8SIan Munsie {
930f204e0b8SIan Munsie 	struct cxl_afu *afu = ctx->afu;
9312f663527SMichael Neuling 	u64 pid;
932f204e0b8SIan Munsie 	int rc;
933f204e0b8SIan Munsie 
9342f663527SMichael Neuling 	pid = (u64)current->pid << 32;
9352f663527SMichael Neuling 	if (ctx->kernel)
9362f663527SMichael Neuling 		pid = 0;
9372f663527SMichael Neuling 	cxl_p2n_write(afu, CXL_PSL_PID_TID_An, pid);
9382f663527SMichael Neuling 
9392f663527SMichael Neuling 	cxl_p1n_write(afu, CXL_PSL_SR_An, calculate_sr(ctx));
940f204e0b8SIan Munsie 
941f204e0b8SIan Munsie 	if ((rc = cxl_write_sstp(afu, ctx->sstp0, ctx->sstp1)))
942f204e0b8SIan Munsie 		return rc;
943f204e0b8SIan Munsie 
944f204e0b8SIan Munsie 	cxl_prefault(ctx, wed);
945f204e0b8SIan Munsie 
946bdd2e715SChristophe Lombard 	if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes)
947bdd2e715SChristophe Lombard 		afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
948f204e0b8SIan Munsie 
949f204e0b8SIan Munsie 	cxl_p2n_write(afu, CXL_PSL_AMR_An, amr);
950f204e0b8SIan Munsie 
951f204e0b8SIan Munsie 	/* master only context for dedicated */
9521a1a94b8SMichael Neuling 	cxl_assign_psn_space(ctx);
953f204e0b8SIan Munsie 
9545be587b1SFrederic Barrat 	if ((rc = cxl_ops->afu_reset(afu)))
955f204e0b8SIan Munsie 		return rc;
956f204e0b8SIan Munsie 
957f204e0b8SIan Munsie 	cxl_p2n_write(afu, CXL_PSL_WED_An, wed);
958f204e0b8SIan Munsie 
959f204e0b8SIan Munsie 	return afu_enable(afu);
960f204e0b8SIan Munsie }
961f204e0b8SIan Munsie 
deactivate_dedicated_process(struct cxl_afu * afu)962f204e0b8SIan Munsie static int deactivate_dedicated_process(struct cxl_afu *afu)
963f204e0b8SIan Munsie {
964f204e0b8SIan Munsie 	dev_info(&afu->dev, "Deactivating dedicated process mode\n");
965f204e0b8SIan Munsie 
966f204e0b8SIan Munsie 	afu->current_mode = 0;
967f204e0b8SIan Munsie 	afu->num_procs = 0;
968f204e0b8SIan Munsie 
969f204e0b8SIan Munsie 	cxl_chardev_afu_remove(afu);
970f204e0b8SIan Munsie 
971f204e0b8SIan Munsie 	return 0;
972f204e0b8SIan Munsie }
973f204e0b8SIan Munsie 
native_afu_deactivate_mode(struct cxl_afu * afu,int mode)9742b04cf31SFrederic Barrat static int native_afu_deactivate_mode(struct cxl_afu *afu, int mode)
975f204e0b8SIan Munsie {
976f204e0b8SIan Munsie 	if (mode == CXL_MODE_DIRECTED)
977f204e0b8SIan Munsie 		return deactivate_afu_directed(afu);
978f204e0b8SIan Munsie 	if (mode == CXL_MODE_DEDICATED)
979f204e0b8SIan Munsie 		return deactivate_dedicated_process(afu);
980f204e0b8SIan Munsie 	return 0;
981f204e0b8SIan Munsie }
982f204e0b8SIan Munsie 
native_afu_activate_mode(struct cxl_afu * afu,int mode)9832b04cf31SFrederic Barrat static int native_afu_activate_mode(struct cxl_afu *afu, int mode)
984f204e0b8SIan Munsie {
985f204e0b8SIan Munsie 	if (!mode)
986f204e0b8SIan Munsie 		return 0;
987f204e0b8SIan Munsie 	if (!(mode & afu->modes_supported))
988f204e0b8SIan Munsie 		return -EINVAL;
989f204e0b8SIan Munsie 
9900d400f77SChristophe Lombard 	if (!cxl_ops->link_ok(afu->adapter, afu)) {
9910b3f9c75SDaniel Axtens 		WARN(1, "Device link is down, refusing to activate!\n");
9920b3f9c75SDaniel Axtens 		return -EIO;
9930b3f9c75SDaniel Axtens 	}
9940b3f9c75SDaniel Axtens 
995f204e0b8SIan Munsie 	if (mode == CXL_MODE_DIRECTED)
996f204e0b8SIan Munsie 		return activate_afu_directed(afu);
997bdd2e715SChristophe Lombard 	if ((mode == CXL_MODE_DEDICATED) &&
998bdd2e715SChristophe Lombard 	    (afu->adapter->native->sl_ops->activate_dedicated_process))
999bdd2e715SChristophe Lombard 		return afu->adapter->native->sl_ops->activate_dedicated_process(afu);
1000f204e0b8SIan Munsie 
1001f204e0b8SIan Munsie 	return -EINVAL;
1002f204e0b8SIan Munsie }
1003f204e0b8SIan Munsie 
native_attach_process(struct cxl_context * ctx,bool kernel,u64 wed,u64 amr)10042b04cf31SFrederic Barrat static int native_attach_process(struct cxl_context *ctx, bool kernel,
10052b04cf31SFrederic Barrat 				u64 wed, u64 amr)
1006f204e0b8SIan Munsie {
10070d400f77SChristophe Lombard 	if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) {
10080b3f9c75SDaniel Axtens 		WARN(1, "Device link is down, refusing to attach process!\n");
10090b3f9c75SDaniel Axtens 		return -EIO;
10100b3f9c75SDaniel Axtens 	}
10110b3f9c75SDaniel Axtens 
1012f204e0b8SIan Munsie 	ctx->kernel = kernel;
1013bdd2e715SChristophe Lombard 	if ((ctx->afu->current_mode == CXL_MODE_DIRECTED) &&
1014bdd2e715SChristophe Lombard 	    (ctx->afu->adapter->native->sl_ops->attach_afu_directed))
1015bdd2e715SChristophe Lombard 		return ctx->afu->adapter->native->sl_ops->attach_afu_directed(ctx, wed, amr);
1016f204e0b8SIan Munsie 
1017bdd2e715SChristophe Lombard 	if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) &&
1018bdd2e715SChristophe Lombard 	    (ctx->afu->adapter->native->sl_ops->attach_dedicated_process))
1019bdd2e715SChristophe Lombard 		return ctx->afu->adapter->native->sl_ops->attach_dedicated_process(ctx, wed, amr);
1020f204e0b8SIan Munsie 
1021f204e0b8SIan Munsie 	return -EINVAL;
1022f204e0b8SIan Munsie }
1023f204e0b8SIan Munsie 
detach_process_native_dedicated(struct cxl_context * ctx)1024f204e0b8SIan Munsie static inline int detach_process_native_dedicated(struct cxl_context *ctx)
1025f204e0b8SIan Munsie {
10265e7823c9SIan Munsie 	/*
10275e7823c9SIan Munsie 	 * The CAIA section 2.1.1 indicates that we need to do an AFU reset to
10285e7823c9SIan Munsie 	 * stop the AFU in dedicated mode (we therefore do not make that
10295e7823c9SIan Munsie 	 * optional like we do in the afu directed path). It does not indicate
10305e7823c9SIan Munsie 	 * that we need to do an explicit disable (which should occur
10315e7823c9SIan Munsie 	 * implicitly as part of the reset) or purge, but we do these as well
10325e7823c9SIan Munsie 	 * to be on the safe side.
10335e7823c9SIan Munsie 	 *
10345e7823c9SIan Munsie 	 * Notably we used to have some issues with the disable sequence
10355e7823c9SIan Munsie 	 * (before the sequence was spelled out in the architecture) which is
10365e7823c9SIan Munsie 	 * why we were so heavy weight in the first place, however a bug was
10375e7823c9SIan Munsie 	 * discovered that had rendered the disable operation ineffective, so
10385e7823c9SIan Munsie 	 * it is conceivable that was the sole explanation for those
10395e7823c9SIan Munsie 	 * difficulties. Point is, we should be careful and do some regression
10405e7823c9SIan Munsie 	 * testing if we ever attempt to remove any part of this procedure.
10415e7823c9SIan Munsie 	 */
10425be587b1SFrederic Barrat 	cxl_ops->afu_reset(ctx->afu);
1043f204e0b8SIan Munsie 	cxl_afu_disable(ctx->afu);
1044f204e0b8SIan Munsie 	cxl_psl_purge(ctx->afu);
1045f204e0b8SIan Munsie 	return 0;
1046f204e0b8SIan Munsie }
1047f204e0b8SIan Munsie 
native_update_ivtes(struct cxl_context * ctx)1048292841b0SIan Munsie static void native_update_ivtes(struct cxl_context *ctx)
1049292841b0SIan Munsie {
1050292841b0SIan Munsie 	if (ctx->afu->current_mode == CXL_MODE_DIRECTED)
1051292841b0SIan Munsie 		return update_ivtes_directed(ctx);
1052bdd2e715SChristophe Lombard 	if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) &&
1053bdd2e715SChristophe Lombard 	    (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes))
1054bdd2e715SChristophe Lombard 		return ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx);
1055292841b0SIan Munsie 	WARN(1, "native_update_ivtes: Bad mode\n");
1056292841b0SIan Munsie }
1057292841b0SIan Munsie 
detach_process_native_afu_directed(struct cxl_context * ctx)1058f204e0b8SIan Munsie static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
1059f204e0b8SIan Munsie {
1060f204e0b8SIan Munsie 	if (!ctx->pe_inserted)
1061f204e0b8SIan Munsie 		return 0;
1062f204e0b8SIan Munsie 	if (terminate_process_element(ctx))
1063f204e0b8SIan Munsie 		return -1;
1064f204e0b8SIan Munsie 	if (remove_process_element(ctx))
1065f204e0b8SIan Munsie 		return -1;
1066f204e0b8SIan Munsie 
1067f204e0b8SIan Munsie 	return 0;
1068f204e0b8SIan Munsie }
1069f204e0b8SIan Munsie 
native_detach_process(struct cxl_context * ctx)10702b04cf31SFrederic Barrat static int native_detach_process(struct cxl_context *ctx)
1071f204e0b8SIan Munsie {
10729bcf28cdSIan Munsie 	trace_cxl_detach(ctx);
10739bcf28cdSIan Munsie 
1074f204e0b8SIan Munsie 	if (ctx->afu->current_mode == CXL_MODE_DEDICATED)
1075f204e0b8SIan Munsie 		return detach_process_native_dedicated(ctx);
1076f204e0b8SIan Munsie 
1077f204e0b8SIan Munsie 	return detach_process_native_afu_directed(ctx);
1078f204e0b8SIan Munsie }
1079f204e0b8SIan Munsie 
native_get_irq_info(struct cxl_afu * afu,struct cxl_irq_info * info)10802b04cf31SFrederic Barrat static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info)
1081f204e0b8SIan Munsie {
10820b3f9c75SDaniel Axtens 	/* If the adapter has gone away, we can't get any meaningful
10830b3f9c75SDaniel Axtens 	 * information.
10840b3f9c75SDaniel Axtens 	 */
10850d400f77SChristophe Lombard 	if (!cxl_ops->link_ok(afu->adapter, afu))
10860b3f9c75SDaniel Axtens 		return -EIO;
10870b3f9c75SDaniel Axtens 
1088bc78b05bSIan Munsie 	info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
1089bc78b05bSIan Munsie 	info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
1090abd1d99bSChristophe Lombard 	if (cxl_is_power8())
1091bc78b05bSIan Munsie 		info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An);
1092bc78b05bSIan Munsie 	info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An);
1093bc78b05bSIan Munsie 	info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
1094444c4ba4SChristophe Lombard 	info->proc_handle = 0;
1095f204e0b8SIan Munsie 
1096f204e0b8SIan Munsie 	return 0;
1097f204e0b8SIan Munsie }
1098f204e0b8SIan Munsie 
cxl_native_irq_dump_regs_psl9(struct cxl_context * ctx)1099f24be42aSChristophe Lombard void cxl_native_irq_dump_regs_psl9(struct cxl_context *ctx)
1100f24be42aSChristophe Lombard {
11018f6a9042SVaibhav Jain 	u64 fir1, serr;
1102f24be42aSChristophe Lombard 
1103f24be42aSChristophe Lombard 	fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL9_FIR1);
1104f24be42aSChristophe Lombard 
1105f24be42aSChristophe Lombard 	dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
1106f24be42aSChristophe Lombard 	if (ctx->afu->adapter->native->sl_ops->register_serr_irq) {
1107f24be42aSChristophe Lombard 		serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
1108f24be42aSChristophe Lombard 		cxl_afu_decode_psl_serr(ctx->afu, serr);
1109f24be42aSChristophe Lombard 	}
1110f24be42aSChristophe Lombard }
1111f24be42aSChristophe Lombard 
cxl_native_irq_dump_regs_psl8(struct cxl_context * ctx)111264663f37SChristophe Lombard void cxl_native_irq_dump_regs_psl8(struct cxl_context *ctx)
1113d56d301bSFrederic Barrat {
1114d56d301bSFrederic Barrat 	u64 fir1, fir2, fir_slice, serr, afu_debug;
1115d56d301bSFrederic Barrat 
1116d56d301bSFrederic Barrat 	fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1);
1117d56d301bSFrederic Barrat 	fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2);
1118d56d301bSFrederic Barrat 	fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An);
1119d56d301bSFrederic Barrat 	afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An);
1120d56d301bSFrederic Barrat 
1121d56d301bSFrederic Barrat 	dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n", fir1);
1122d56d301bSFrederic Barrat 	dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n", fir2);
11236d382616SFrederic Barrat 	if (ctx->afu->adapter->native->sl_ops->register_serr_irq) {
11246d382616SFrederic Barrat 		serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An);
11256e0c50f9SPhilippe Bergheaud 		cxl_afu_decode_psl_serr(ctx->afu, serr);
11266d382616SFrederic Barrat 	}
1127d56d301bSFrederic Barrat 	dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
1128d56d301bSFrederic Barrat 	dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
11296d382616SFrederic Barrat }
1130d56d301bSFrederic Barrat 
native_handle_psl_slice_error(struct cxl_context * ctx,u64 dsisr,u64 errstat)11316d382616SFrederic Barrat static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx,
11326d382616SFrederic Barrat 						u64 dsisr, u64 errstat)
11336d382616SFrederic Barrat {
11346d382616SFrederic Barrat 
11356d382616SFrederic Barrat 	dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n", errstat);
11366d382616SFrederic Barrat 
11376d382616SFrederic Barrat 	if (ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers)
11386d382616SFrederic Barrat 		ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers(ctx);
11396d382616SFrederic Barrat 
11406d382616SFrederic Barrat 	if (ctx->afu->adapter->native->sl_ops->debugfs_stop_trace) {
1141d56d301bSFrederic Barrat 		dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n");
11426d382616SFrederic Barrat 		ctx->afu->adapter->native->sl_ops->debugfs_stop_trace(ctx->afu->adapter);
11436d382616SFrederic Barrat 	}
1144d56d301bSFrederic Barrat 
11455be587b1SFrederic Barrat 	return cxl_ops->ack_irq(ctx, 0, errstat);
1146d56d301bSFrederic Barrat }
1147d56d301bSFrederic Barrat 
cxl_is_translation_fault(struct cxl_afu * afu,u64 dsisr)1148f24be42aSChristophe Lombard static bool cxl_is_translation_fault(struct cxl_afu *afu, u64 dsisr)
1149f24be42aSChristophe Lombard {
1150797625deSChristophe Lombard 	if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_TRANS))
1151f24be42aSChristophe Lombard 		return true;
1152f24be42aSChristophe Lombard 
1153797625deSChristophe Lombard 	if ((cxl_is_power9()) && (dsisr & CXL_PSL9_DSISR_An_TF))
1154f24be42aSChristophe Lombard 		return true;
1155f24be42aSChristophe Lombard 
1156f24be42aSChristophe Lombard 	return false;
1157f24be42aSChristophe Lombard }
1158f24be42aSChristophe Lombard 
cxl_fail_irq_psl(struct cxl_afu * afu,struct cxl_irq_info * irq_info)1159bdd2e715SChristophe Lombard irqreturn_t cxl_fail_irq_psl(struct cxl_afu *afu, struct cxl_irq_info *irq_info)
1160d56d301bSFrederic Barrat {
1161f24be42aSChristophe Lombard 	if (cxl_is_translation_fault(afu, irq_info->dsisr))
1162d56d301bSFrederic Barrat 		cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
1163d56d301bSFrederic Barrat 	else
1164d56d301bSFrederic Barrat 		cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
1165d56d301bSFrederic Barrat 
1166d56d301bSFrederic Barrat 	return IRQ_HANDLED;
1167d56d301bSFrederic Barrat }
1168d56d301bSFrederic Barrat 
native_irq_multiplexed(int irq,void * data)11692b04cf31SFrederic Barrat static irqreturn_t native_irq_multiplexed(int irq, void *data)
1170d56d301bSFrederic Barrat {
1171d56d301bSFrederic Barrat 	struct cxl_afu *afu = data;
1172d56d301bSFrederic Barrat 	struct cxl_context *ctx;
1173d56d301bSFrederic Barrat 	struct cxl_irq_info irq_info;
1174abf051beSVaibhav Jain 	u64 phreg = cxl_p2n_read(afu, CXL_PSL_PEHandle_An);
1175bdd2e715SChristophe Lombard 	int ph, ret = IRQ_HANDLED, res;
1176d56d301bSFrederic Barrat 
1177abf051beSVaibhav Jain 	/* check if eeh kicked in while the interrupt was in flight */
1178abf051beSVaibhav Jain 	if (unlikely(phreg == ~0ULL)) {
1179abf051beSVaibhav Jain 		dev_warn(&afu->dev,
1180abf051beSVaibhav Jain 			 "Ignoring slice interrupt(%d) due to fenced card",
1181abf051beSVaibhav Jain 			 irq);
1182abf051beSVaibhav Jain 		return IRQ_HANDLED;
1183abf051beSVaibhav Jain 	}
1184abf051beSVaibhav Jain 	/* Mask the pe-handle from register value */
1185abf051beSVaibhav Jain 	ph = phreg & 0xffff;
1186bdd2e715SChristophe Lombard 	if ((res = native_get_irq_info(afu, &irq_info))) {
1187bdd2e715SChristophe Lombard 		WARN(1, "Unable to get CXL IRQ Info: %i\n", res);
1188bdd2e715SChristophe Lombard 		if (afu->adapter->native->sl_ops->fail_irq)
1189bdd2e715SChristophe Lombard 			return afu->adapter->native->sl_ops->fail_irq(afu, &irq_info);
1190bdd2e715SChristophe Lombard 		return ret;
1191d56d301bSFrederic Barrat 	}
1192d56d301bSFrederic Barrat 
1193d56d301bSFrederic Barrat 	rcu_read_lock();
1194d56d301bSFrederic Barrat 	ctx = idr_find(&afu->contexts_idr, ph);
1195d56d301bSFrederic Barrat 	if (ctx) {
1196bdd2e715SChristophe Lombard 		if (afu->adapter->native->sl_ops->handle_interrupt)
1197bdd2e715SChristophe Lombard 			ret = afu->adapter->native->sl_ops->handle_interrupt(irq, ctx, &irq_info);
1198d56d301bSFrederic Barrat 		rcu_read_unlock();
1199d56d301bSFrederic Barrat 		return ret;
1200d56d301bSFrederic Barrat 	}
1201d56d301bSFrederic Barrat 	rcu_read_unlock();
1202d56d301bSFrederic Barrat 
1203d56d301bSFrederic Barrat 	WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR"
1204d56d301bSFrederic Barrat 		" %016llx\n(Possible AFU HW issue - was a term/remove acked"
1205d56d301bSFrederic Barrat 		" with outstanding transactions?)\n", ph, irq_info.dsisr,
1206d56d301bSFrederic Barrat 		irq_info.dar);
1207bdd2e715SChristophe Lombard 	if (afu->adapter->native->sl_ops->fail_irq)
1208bdd2e715SChristophe Lombard 		ret = afu->adapter->native->sl_ops->fail_irq(afu, &irq_info);
1209bdd2e715SChristophe Lombard 	return ret;
1210d56d301bSFrederic Barrat }
1211d56d301bSFrederic Barrat 
native_irq_wait(struct cxl_context * ctx)12126fd40f19SAndrew Donnellan static void native_irq_wait(struct cxl_context *ctx)
12132bc79ffcSMichael Neuling {
12142bc79ffcSMichael Neuling 	u64 dsisr;
12152bc79ffcSMichael Neuling 	int timeout = 1000;
12162bc79ffcSMichael Neuling 	int ph;
12172bc79ffcSMichael Neuling 
12182bc79ffcSMichael Neuling 	/*
12192bc79ffcSMichael Neuling 	 * Wait until no further interrupts are presented by the PSL
12202bc79ffcSMichael Neuling 	 * for this context.
12212bc79ffcSMichael Neuling 	 */
12222bc79ffcSMichael Neuling 	while (timeout--) {
12232bc79ffcSMichael Neuling 		ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff;
12242bc79ffcSMichael Neuling 		if (ph != ctx->pe)
12252bc79ffcSMichael Neuling 			return;
12262bc79ffcSMichael Neuling 		dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An);
1227797625deSChristophe Lombard 		if (cxl_is_power8() &&
1228abd1d99bSChristophe Lombard 		   ((dsisr & CXL_PSL_DSISR_PENDING) == 0))
12292bc79ffcSMichael Neuling 			return;
1230797625deSChristophe Lombard 		if (cxl_is_power9() &&
1231f24be42aSChristophe Lombard 		   ((dsisr & CXL_PSL9_DSISR_PENDING) == 0))
1232f24be42aSChristophe Lombard 			return;
12332bc79ffcSMichael Neuling 		/*
12342bc79ffcSMichael Neuling 		 * We are waiting for the workqueue to process our
12352bc79ffcSMichael Neuling 		 * irq, so need to let that run here.
12362bc79ffcSMichael Neuling 		 */
12372bc79ffcSMichael Neuling 		msleep(1);
12382bc79ffcSMichael Neuling 	}
12392bc79ffcSMichael Neuling 
12402bc79ffcSMichael Neuling 	dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i"
12412bc79ffcSMichael Neuling 		 " DSISR %016llx!\n", ph, dsisr);
12422bc79ffcSMichael Neuling 	return;
12432bc79ffcSMichael Neuling }
12442bc79ffcSMichael Neuling 
native_slice_irq_err(int irq,void * data)12452b04cf31SFrederic Barrat static irqreturn_t native_slice_irq_err(int irq, void *data)
1246d56d301bSFrederic Barrat {
1247d56d301bSFrederic Barrat 	struct cxl_afu *afu = data;
1248abd1d99bSChristophe Lombard 	u64 errstat, serr, afu_error, dsisr;
1249a715626aSAlastair D'Silva 	u64 fir_slice, afu_debug, irq_mask;
1250d56d301bSFrederic Barrat 
12516d382616SFrederic Barrat 	/*
12526d382616SFrederic Barrat 	 * slice err interrupt is only used with full PSL (no XSL)
12536d382616SFrederic Barrat 	 */
1254d56d301bSFrederic Barrat 	serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
1255d56d301bSFrederic Barrat 	errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
12566e0c50f9SPhilippe Bergheaud 	afu_error = cxl_p2n_read(afu, CXL_AFU_ERR_An);
12576e0c50f9SPhilippe Bergheaud 	dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
12586e0c50f9SPhilippe Bergheaud 	cxl_afu_decode_psl_serr(afu, serr);
1259abd1d99bSChristophe Lombard 
1260abd1d99bSChristophe Lombard 	if (cxl_is_power8()) {
1261abd1d99bSChristophe Lombard 		fir_slice = cxl_p1n_read(afu, CXL_PSL_FIR_SLICE_An);
1262abd1d99bSChristophe Lombard 		afu_debug = cxl_p1n_read(afu, CXL_AFU_DEBUG_An);
1263d56d301bSFrederic Barrat 		dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n", fir_slice);
1264d56d301bSFrederic Barrat 		dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n", afu_debug);
1265abd1d99bSChristophe Lombard 	}
1266abd1d99bSChristophe Lombard 	dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n", errstat);
12676e0c50f9SPhilippe Bergheaud 	dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n", afu_error);
12686e0c50f9SPhilippe Bergheaud 	dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n", dsisr);
1269d56d301bSFrederic Barrat 
1270a715626aSAlastair D'Silva 	/* mask off the IRQ so it won't retrigger until the AFU is reset */
1271a715626aSAlastair D'Silva 	irq_mask = (serr & CXL_PSL_SERR_An_IRQS) >> 32;
1272a715626aSAlastair D'Silva 	serr |= irq_mask;
1273d56d301bSFrederic Barrat 	cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
1274a715626aSAlastair D'Silva 	dev_info(&afu->dev, "Further such interrupts will be masked until the AFU is reset\n");
1275d56d301bSFrederic Barrat 
1276d56d301bSFrederic Barrat 	return IRQ_HANDLED;
1277d56d301bSFrederic Barrat }
1278d56d301bSFrederic Barrat 
cxl_native_err_irq_dump_regs_psl9(struct cxl * adapter)1279990f19aeSVaibhav Jain void cxl_native_err_irq_dump_regs_psl9(struct cxl *adapter)
1280990f19aeSVaibhav Jain {
1281990f19aeSVaibhav Jain 	u64 fir1;
1282990f19aeSVaibhav Jain 
1283990f19aeSVaibhav Jain 	fir1 = cxl_p1_read(adapter, CXL_PSL9_FIR1);
1284990f19aeSVaibhav Jain 	dev_crit(&adapter->dev, "PSL_FIR: 0x%016llx\n", fir1);
1285990f19aeSVaibhav Jain }
1286990f19aeSVaibhav Jain 
cxl_native_err_irq_dump_regs_psl8(struct cxl * adapter)1287990f19aeSVaibhav Jain void cxl_native_err_irq_dump_regs_psl8(struct cxl *adapter)
12886d382616SFrederic Barrat {
12896d382616SFrederic Barrat 	u64 fir1, fir2;
12906d382616SFrederic Barrat 
12916d382616SFrederic Barrat 	fir1 = cxl_p1_read(adapter, CXL_PSL_FIR1);
12926d382616SFrederic Barrat 	fir2 = cxl_p1_read(adapter, CXL_PSL_FIR2);
1293990f19aeSVaibhav Jain 	dev_crit(&adapter->dev,
1294990f19aeSVaibhav Jain 		 "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n",
1295990f19aeSVaibhav Jain 		 fir1, fir2);
12966d382616SFrederic Barrat }
12976d382616SFrederic Barrat 
native_irq_err(int irq,void * data)12982b04cf31SFrederic Barrat static irqreturn_t native_irq_err(int irq, void *data)
1299d56d301bSFrederic Barrat {
1300d56d301bSFrederic Barrat 	struct cxl *adapter = data;
13016d382616SFrederic Barrat 	u64 err_ivte;
1302d56d301bSFrederic Barrat 
1303d56d301bSFrederic Barrat 	WARN(1, "CXL ERROR interrupt %i\n", irq);
1304d56d301bSFrederic Barrat 
1305d56d301bSFrederic Barrat 	err_ivte = cxl_p1_read(adapter, CXL_PSL_ErrIVTE);
1306d56d301bSFrederic Barrat 	dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n", err_ivte);
1307d56d301bSFrederic Barrat 
13086d382616SFrederic Barrat 	if (adapter->native->sl_ops->debugfs_stop_trace) {
1309d56d301bSFrederic Barrat 		dev_crit(&adapter->dev, "STOPPING CXL TRACE\n");
13106d382616SFrederic Barrat 		adapter->native->sl_ops->debugfs_stop_trace(adapter);
13116d382616SFrederic Barrat 	}
1312d56d301bSFrederic Barrat 
13136d382616SFrederic Barrat 	if (adapter->native->sl_ops->err_irq_dump_registers)
13146d382616SFrederic Barrat 		adapter->native->sl_ops->err_irq_dump_registers(adapter);
1315d56d301bSFrederic Barrat 
1316d56d301bSFrederic Barrat 	return IRQ_HANDLED;
1317d56d301bSFrederic Barrat }
1318d56d301bSFrederic Barrat 
cxl_native_register_psl_err_irq(struct cxl * adapter)13192b04cf31SFrederic Barrat int cxl_native_register_psl_err_irq(struct cxl *adapter)
1320d56d301bSFrederic Barrat {
1321d56d301bSFrederic Barrat 	int rc;
1322d56d301bSFrederic Barrat 
1323d56d301bSFrederic Barrat 	adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
1324d56d301bSFrederic Barrat 				      dev_name(&adapter->dev));
1325d56d301bSFrederic Barrat 	if (!adapter->irq_name)
1326d56d301bSFrederic Barrat 		return -ENOMEM;
1327d56d301bSFrederic Barrat 
13282b04cf31SFrederic Barrat 	if ((rc = cxl_register_one_irq(adapter, native_irq_err, adapter,
1329cbffa3a5SChristophe Lombard 				       &adapter->native->err_hwirq,
1330cbffa3a5SChristophe Lombard 				       &adapter->native->err_virq,
1331d56d301bSFrederic Barrat 				       adapter->irq_name))) {
1332d56d301bSFrederic Barrat 		kfree(adapter->irq_name);
1333d56d301bSFrederic Barrat 		adapter->irq_name = NULL;
1334d56d301bSFrederic Barrat 		return rc;
1335d56d301bSFrederic Barrat 	}
1336d56d301bSFrederic Barrat 
1337cbffa3a5SChristophe Lombard 	cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->native->err_hwirq & 0xffff);
1338d56d301bSFrederic Barrat 
1339d56d301bSFrederic Barrat 	return 0;
1340d56d301bSFrederic Barrat }
1341d56d301bSFrederic Barrat 
cxl_native_release_psl_err_irq(struct cxl * adapter)13422b04cf31SFrederic Barrat void cxl_native_release_psl_err_irq(struct cxl *adapter)
1343d56d301bSFrederic Barrat {
1344b3aa20baSVaibhav Jain 	if (adapter->native->err_virq == 0 ||
1345b3aa20baSVaibhav Jain 	    adapter->native->err_virq !=
1346b3aa20baSVaibhav Jain 	    irq_find_mapping(NULL, adapter->native->err_hwirq))
1347d56d301bSFrederic Barrat 		return;
1348d56d301bSFrederic Barrat 
1349d56d301bSFrederic Barrat 	cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
1350cbffa3a5SChristophe Lombard 	cxl_unmap_irq(adapter->native->err_virq, adapter);
1351cbffa3a5SChristophe Lombard 	cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
1352d56d301bSFrederic Barrat 	kfree(adapter->irq_name);
1353b3aa20baSVaibhav Jain 	adapter->native->err_virq = 0;
1354d56d301bSFrederic Barrat }
1355d56d301bSFrederic Barrat 
cxl_native_register_serr_irq(struct cxl_afu * afu)13562b04cf31SFrederic Barrat int cxl_native_register_serr_irq(struct cxl_afu *afu)
1357d56d301bSFrederic Barrat {
1358d56d301bSFrederic Barrat 	u64 serr;
1359d56d301bSFrederic Barrat 	int rc;
1360d56d301bSFrederic Barrat 
1361d56d301bSFrederic Barrat 	afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
1362d56d301bSFrederic Barrat 				      dev_name(&afu->dev));
1363d56d301bSFrederic Barrat 	if (!afu->err_irq_name)
1364d56d301bSFrederic Barrat 		return -ENOMEM;
1365d56d301bSFrederic Barrat 
13662b04cf31SFrederic Barrat 	if ((rc = cxl_register_one_irq(afu->adapter, native_slice_irq_err, afu,
1367d56d301bSFrederic Barrat 				       &afu->serr_hwirq,
1368d56d301bSFrederic Barrat 				       &afu->serr_virq, afu->err_irq_name))) {
1369d56d301bSFrederic Barrat 		kfree(afu->err_irq_name);
1370d56d301bSFrederic Barrat 		afu->err_irq_name = NULL;
1371d56d301bSFrederic Barrat 		return rc;
1372d56d301bSFrederic Barrat 	}
1373d56d301bSFrederic Barrat 
1374d56d301bSFrederic Barrat 	serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
1375abd1d99bSChristophe Lombard 	if (cxl_is_power8())
1376d56d301bSFrederic Barrat 		serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff);
1377f24be42aSChristophe Lombard 	if (cxl_is_power9()) {
1378f24be42aSChristophe Lombard 		/*
1379f24be42aSChristophe Lombard 		 * By default, all errors are masked. So don't set all masks.
1380f24be42aSChristophe Lombard 		 * Slice errors will be transfered.
1381f24be42aSChristophe Lombard 		 */
1382f24be42aSChristophe Lombard 		serr = (serr & ~0xff0000007fffffffULL) | (afu->serr_hwirq & 0xffff);
1383f24be42aSChristophe Lombard 	}
1384d56d301bSFrederic Barrat 	cxl_p1n_write(afu, CXL_PSL_SERR_An, serr);
1385d56d301bSFrederic Barrat 
1386d56d301bSFrederic Barrat 	return 0;
1387d56d301bSFrederic Barrat }
1388d56d301bSFrederic Barrat 
cxl_native_release_serr_irq(struct cxl_afu * afu)13892b04cf31SFrederic Barrat void cxl_native_release_serr_irq(struct cxl_afu *afu)
1390d56d301bSFrederic Barrat {
1391b3aa20baSVaibhav Jain 	if (afu->serr_virq == 0 ||
1392b3aa20baSVaibhav Jain 	    afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
1393d56d301bSFrederic Barrat 		return;
1394d56d301bSFrederic Barrat 
1395d56d301bSFrederic Barrat 	cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
1396d56d301bSFrederic Barrat 	cxl_unmap_irq(afu->serr_virq, afu);
13975be587b1SFrederic Barrat 	cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
1398d56d301bSFrederic Barrat 	kfree(afu->err_irq_name);
1399b3aa20baSVaibhav Jain 	afu->serr_virq = 0;
1400d56d301bSFrederic Barrat }
1401d56d301bSFrederic Barrat 
cxl_native_register_psl_irq(struct cxl_afu * afu)14022b04cf31SFrederic Barrat int cxl_native_register_psl_irq(struct cxl_afu *afu)
1403d56d301bSFrederic Barrat {
1404d56d301bSFrederic Barrat 	int rc;
1405d56d301bSFrederic Barrat 
1406d56d301bSFrederic Barrat 	afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s",
1407d56d301bSFrederic Barrat 				      dev_name(&afu->dev));
1408d56d301bSFrederic Barrat 	if (!afu->psl_irq_name)
1409d56d301bSFrederic Barrat 		return -ENOMEM;
1410d56d301bSFrederic Barrat 
1411cbffa3a5SChristophe Lombard 	if ((rc = cxl_register_one_irq(afu->adapter, native_irq_multiplexed,
1412cbffa3a5SChristophe Lombard 				    afu, &afu->native->psl_hwirq, &afu->native->psl_virq,
1413d56d301bSFrederic Barrat 				    afu->psl_irq_name))) {
1414d56d301bSFrederic Barrat 		kfree(afu->psl_irq_name);
1415d56d301bSFrederic Barrat 		afu->psl_irq_name = NULL;
1416d56d301bSFrederic Barrat 	}
1417d56d301bSFrederic Barrat 	return rc;
1418d56d301bSFrederic Barrat }
1419d56d301bSFrederic Barrat 
cxl_native_release_psl_irq(struct cxl_afu * afu)14202b04cf31SFrederic Barrat void cxl_native_release_psl_irq(struct cxl_afu *afu)
1421d56d301bSFrederic Barrat {
1422b3aa20baSVaibhav Jain 	if (afu->native->psl_virq == 0 ||
1423b3aa20baSVaibhav Jain 	    afu->native->psl_virq !=
1424b3aa20baSVaibhav Jain 	    irq_find_mapping(NULL, afu->native->psl_hwirq))
1425d56d301bSFrederic Barrat 		return;
1426d56d301bSFrederic Barrat 
1427cbffa3a5SChristophe Lombard 	cxl_unmap_irq(afu->native->psl_virq, afu);
1428cbffa3a5SChristophe Lombard 	cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
1429d56d301bSFrederic Barrat 	kfree(afu->psl_irq_name);
1430b3aa20baSVaibhav Jain 	afu->native->psl_virq = 0;
1431d56d301bSFrederic Barrat }
1432d56d301bSFrederic Barrat 
recover_psl_err(struct cxl_afu * afu,u64 errstat)1433f204e0b8SIan Munsie static void recover_psl_err(struct cxl_afu *afu, u64 errstat)
1434f204e0b8SIan Munsie {
1435f204e0b8SIan Munsie 	u64 dsisr;
1436f204e0b8SIan Munsie 
1437de369538SRasmus Villemoes 	pr_devel("RECOVERING FROM PSL ERROR... (0x%016llx)\n", errstat);
1438f204e0b8SIan Munsie 
1439f204e0b8SIan Munsie 	/* Clear PSL_DSISR[PE] */
1440f204e0b8SIan Munsie 	dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
1441f204e0b8SIan Munsie 	cxl_p2n_write(afu, CXL_PSL_DSISR_An, dsisr & ~CXL_PSL_DSISR_An_PE);
1442f204e0b8SIan Munsie 
1443f204e0b8SIan Munsie 	/* Write 1s to clear error status bits */
1444f204e0b8SIan Munsie 	cxl_p2n_write(afu, CXL_PSL_ErrStat_An, errstat);
1445f204e0b8SIan Munsie }
1446f204e0b8SIan Munsie 
native_ack_irq(struct cxl_context * ctx,u64 tfc,u64 psl_reset_mask)14472b04cf31SFrederic Barrat static int native_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask)
1448f204e0b8SIan Munsie {
14499bcf28cdSIan Munsie 	trace_cxl_psl_irq_ack(ctx, tfc);
1450f204e0b8SIan Munsie 	if (tfc)
1451f204e0b8SIan Munsie 		cxl_p2n_write(ctx->afu, CXL_PSL_TFC_An, tfc);
1452f204e0b8SIan Munsie 	if (psl_reset_mask)
1453f204e0b8SIan Munsie 		recover_psl_err(ctx->afu, psl_reset_mask);
1454f204e0b8SIan Munsie 
1455f204e0b8SIan Munsie 	return 0;
1456f204e0b8SIan Munsie }
1457f204e0b8SIan Munsie 
cxl_check_error(struct cxl_afu * afu)1458f204e0b8SIan Munsie int cxl_check_error(struct cxl_afu *afu)
1459f204e0b8SIan Munsie {
1460f204e0b8SIan Munsie 	return (cxl_p1n_read(afu, CXL_PSL_SCNTL_An) == ~0ULL);
1461f204e0b8SIan Munsie }
1462d56d301bSFrederic Barrat 
native_support_attributes(const char * attr_name,enum cxl_attrs type)14634752876cSChristophe Lombard static bool native_support_attributes(const char *attr_name,
14644752876cSChristophe Lombard 				      enum cxl_attrs type)
14654752876cSChristophe Lombard {
14664752876cSChristophe Lombard 	return true;
14674752876cSChristophe Lombard }
14684752876cSChristophe Lombard 
native_afu_cr_read64(struct cxl_afu * afu,int cr,u64 off,u64 * out)14692b04cf31SFrederic Barrat static int native_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off, u64 *out)
1470d56d301bSFrederic Barrat {
14710d400f77SChristophe Lombard 	if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
14725be587b1SFrederic Barrat 		return -EIO;
14735be587b1SFrederic Barrat 	if (unlikely(off >= afu->crs_len))
14745be587b1SFrederic Barrat 		return -ERANGE;
1475cbffa3a5SChristophe Lombard 	*out = in_le64(afu->native->afu_desc_mmio + afu->crs_offset +
14765be587b1SFrederic Barrat 		(cr * afu->crs_len) + off);
14775be587b1SFrederic Barrat 	return 0;
1478d56d301bSFrederic Barrat }
1479d56d301bSFrederic Barrat 
native_afu_cr_read32(struct cxl_afu * afu,int cr,u64 off,u32 * out)14802b04cf31SFrederic Barrat static int native_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off, u32 *out)
1481d56d301bSFrederic Barrat {
14820d400f77SChristophe Lombard 	if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
14835be587b1SFrederic Barrat 		return -EIO;
14845be587b1SFrederic Barrat 	if (unlikely(off >= afu->crs_len))
14855be587b1SFrederic Barrat 		return -ERANGE;
1486cbffa3a5SChristophe Lombard 	*out = in_le32(afu->native->afu_desc_mmio + afu->crs_offset +
14875be587b1SFrederic Barrat 		(cr * afu->crs_len) + off);
14885be587b1SFrederic Barrat 	return 0;
1489d56d301bSFrederic Barrat }
1490d56d301bSFrederic Barrat 
native_afu_cr_read16(struct cxl_afu * afu,int cr,u64 off,u16 * out)14912b04cf31SFrederic Barrat static int native_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off, u16 *out)
1492d56d301bSFrederic Barrat {
1493d56d301bSFrederic Barrat 	u64 aligned_off = off & ~0x3L;
1494d56d301bSFrederic Barrat 	u32 val;
14955be587b1SFrederic Barrat 	int rc;
1496d56d301bSFrederic Barrat 
14972b04cf31SFrederic Barrat 	rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
14985be587b1SFrederic Barrat 	if (!rc)
14995be587b1SFrederic Barrat 		*out = (val >> ((off & 0x3) * 8)) & 0xffff;
15005be587b1SFrederic Barrat 	return rc;
1501d56d301bSFrederic Barrat }
1502d56d301bSFrederic Barrat 
native_afu_cr_read8(struct cxl_afu * afu,int cr,u64 off,u8 * out)15032b04cf31SFrederic Barrat static int native_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off, u8 *out)
1504d56d301bSFrederic Barrat {
1505d56d301bSFrederic Barrat 	u64 aligned_off = off & ~0x3L;
1506d56d301bSFrederic Barrat 	u32 val;
15075be587b1SFrederic Barrat 	int rc;
1508d56d301bSFrederic Barrat 
15092b04cf31SFrederic Barrat 	rc = native_afu_cr_read32(afu, cr, aligned_off, &val);
15105be587b1SFrederic Barrat 	if (!rc)
15115be587b1SFrederic Barrat 		*out = (val >> ((off & 0x3) * 8)) & 0xff;
15125be587b1SFrederic Barrat 	return rc;
1513d56d301bSFrederic Barrat }
15145be587b1SFrederic Barrat 
native_afu_cr_write32(struct cxl_afu * afu,int cr,u64 off,u32 in)1515d601ea91SFrederic Barrat static int native_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in)
1516d601ea91SFrederic Barrat {
15170d400f77SChristophe Lombard 	if (unlikely(!cxl_ops->link_ok(afu->adapter, afu)))
1518d601ea91SFrederic Barrat 		return -EIO;
1519d601ea91SFrederic Barrat 	if (unlikely(off >= afu->crs_len))
1520d601ea91SFrederic Barrat 		return -ERANGE;
1521d601ea91SFrederic Barrat 	out_le32(afu->native->afu_desc_mmio + afu->crs_offset +
1522d601ea91SFrederic Barrat 		(cr * afu->crs_len) + off, in);
1523d601ea91SFrederic Barrat 	return 0;
1524d601ea91SFrederic Barrat }
1525d601ea91SFrederic Barrat 
native_afu_cr_write16(struct cxl_afu * afu,int cr,u64 off,u16 in)1526d601ea91SFrederic Barrat static int native_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in)
1527d601ea91SFrederic Barrat {
1528d601ea91SFrederic Barrat 	u64 aligned_off = off & ~0x3L;
1529d601ea91SFrederic Barrat 	u32 val32, mask, shift;
1530d601ea91SFrederic Barrat 	int rc;
1531d601ea91SFrederic Barrat 
1532d601ea91SFrederic Barrat 	rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
1533d601ea91SFrederic Barrat 	if (rc)
1534d601ea91SFrederic Barrat 		return rc;
1535d601ea91SFrederic Barrat 	shift = (off & 0x3) * 8;
1536d601ea91SFrederic Barrat 	WARN_ON(shift == 24);
1537d601ea91SFrederic Barrat 	mask = 0xffff << shift;
1538d601ea91SFrederic Barrat 	val32 = (val32 & ~mask) | (in << shift);
1539d601ea91SFrederic Barrat 
1540d601ea91SFrederic Barrat 	rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
1541d601ea91SFrederic Barrat 	return rc;
1542d601ea91SFrederic Barrat }
1543d601ea91SFrederic Barrat 
native_afu_cr_write8(struct cxl_afu * afu,int cr,u64 off,u8 in)1544d601ea91SFrederic Barrat static int native_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in)
1545d601ea91SFrederic Barrat {
1546d601ea91SFrederic Barrat 	u64 aligned_off = off & ~0x3L;
1547d601ea91SFrederic Barrat 	u32 val32, mask, shift;
1548d601ea91SFrederic Barrat 	int rc;
1549d601ea91SFrederic Barrat 
1550d601ea91SFrederic Barrat 	rc = native_afu_cr_read32(afu, cr, aligned_off, &val32);
1551d601ea91SFrederic Barrat 	if (rc)
1552d601ea91SFrederic Barrat 		return rc;
1553d601ea91SFrederic Barrat 	shift = (off & 0x3) * 8;
1554d601ea91SFrederic Barrat 	mask = 0xff << shift;
1555d601ea91SFrederic Barrat 	val32 = (val32 & ~mask) | (in << shift);
1556d601ea91SFrederic Barrat 
1557d601ea91SFrederic Barrat 	rc = native_afu_cr_write32(afu, cr, aligned_off, val32);
1558d601ea91SFrederic Barrat 	return rc;
1559d601ea91SFrederic Barrat }
1560d601ea91SFrederic Barrat 
15615be587b1SFrederic Barrat const struct cxl_backend_ops cxl_native_ops = {
15625be587b1SFrederic Barrat 	.module = THIS_MODULE,
15632b04cf31SFrederic Barrat 	.adapter_reset = cxl_pci_reset,
15642b04cf31SFrederic Barrat 	.alloc_one_irq = cxl_pci_alloc_one_irq,
15652b04cf31SFrederic Barrat 	.release_one_irq = cxl_pci_release_one_irq,
15662b04cf31SFrederic Barrat 	.alloc_irq_ranges = cxl_pci_alloc_irq_ranges,
15672b04cf31SFrederic Barrat 	.release_irq_ranges = cxl_pci_release_irq_ranges,
15682b04cf31SFrederic Barrat 	.setup_irq = cxl_pci_setup_irq,
15692b04cf31SFrederic Barrat 	.handle_psl_slice_error = native_handle_psl_slice_error,
15705be587b1SFrederic Barrat 	.psl_interrupt = NULL,
15712b04cf31SFrederic Barrat 	.ack_irq = native_ack_irq,
15722bc79ffcSMichael Neuling 	.irq_wait = native_irq_wait,
15732b04cf31SFrederic Barrat 	.attach_process = native_attach_process,
15742b04cf31SFrederic Barrat 	.detach_process = native_detach_process,
1575292841b0SIan Munsie 	.update_ivtes = native_update_ivtes,
15764752876cSChristophe Lombard 	.support_attributes = native_support_attributes,
15775be587b1SFrederic Barrat 	.link_ok = cxl_adapter_link_ok,
15782b04cf31SFrederic Barrat 	.release_afu = cxl_pci_release_afu,
15792b04cf31SFrederic Barrat 	.afu_read_err_buffer = cxl_pci_afu_read_err_buffer,
15802b04cf31SFrederic Barrat 	.afu_check_and_enable = native_afu_check_and_enable,
15812b04cf31SFrederic Barrat 	.afu_activate_mode = native_afu_activate_mode,
15822b04cf31SFrederic Barrat 	.afu_deactivate_mode = native_afu_deactivate_mode,
15832b04cf31SFrederic Barrat 	.afu_reset = native_afu_reset,
15842b04cf31SFrederic Barrat 	.afu_cr_read8 = native_afu_cr_read8,
15852b04cf31SFrederic Barrat 	.afu_cr_read16 = native_afu_cr_read16,
15862b04cf31SFrederic Barrat 	.afu_cr_read32 = native_afu_cr_read32,
15872b04cf31SFrederic Barrat 	.afu_cr_read64 = native_afu_cr_read64,
1588d601ea91SFrederic Barrat 	.afu_cr_write8 = native_afu_cr_write8,
1589d601ea91SFrederic Barrat 	.afu_cr_write16 = native_afu_cr_write16,
1590d601ea91SFrederic Barrat 	.afu_cr_write32 = native_afu_cr_write32,
1591d601ea91SFrederic Barrat 	.read_adapter_vpd = cxl_pci_read_adapter_vpd,
15925be587b1SFrederic Barrat };
1593