xref: /openbmc/linux/arch/s390/pci/pci_insn.c (revision bacf743e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * s390 specific pci instructions
4  *
5  * Copyright IBM Corp. 2013
6  */
7 
8 #include <linux/export.h>
9 #include <linux/errno.h>
10 #include <linux/delay.h>
11 #include <linux/jump_label.h>
12 #include <asm/asm-extable.h>
13 #include <asm/facility.h>
14 #include <asm/pci_insn.h>
15 #include <asm/pci_debug.h>
16 #include <asm/pci_io.h>
17 #include <asm/processor.h>
18 
19 #define ZPCI_INSN_BUSY_DELAY	1	/* 1 microsecond */
20 
21 static inline void zpci_err_insn(u8 cc, u8 status, u64 req, u64 offset)
22 {
23 	struct {
24 		u64 req;
25 		u64 offset;
26 		u8 cc;
27 		u8 status;
28 	} __packed data = {req, offset, cc, status};
29 
30 	zpci_err_hex(&data, sizeof(data));
31 }
32 
33 /* Modify PCI Function Controls */
34 static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
35 {
36 	u8 cc;
37 
38 	asm volatile (
39 		"	.insn	rxy,0xe300000000d0,%[req],%[fib]\n"
40 		"	ipm	%[cc]\n"
41 		"	srl	%[cc],28\n"
42 		: [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib)
43 		: : "cc");
44 	*status = req >> 24 & 0xff;
45 	return cc;
46 }
47 
48 u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status)
49 {
50 	u8 cc;
51 
52 	do {
53 		cc = __mpcifc(req, fib, status);
54 		if (cc == 2)
55 			msleep(ZPCI_INSN_BUSY_DELAY);
56 	} while (cc == 2);
57 
58 	if (cc)
59 		zpci_err_insn(cc, *status, req, 0);
60 
61 	return cc;
62 }
63 
64 /* Refresh PCI Translations */
65 static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
66 {
67 	union register_pair addr_range = {.even = addr, .odd = range};
68 	u8 cc;
69 
70 	asm volatile (
71 		"	.insn	rre,0xb9d30000,%[fn],%[addr_range]\n"
72 		"	ipm	%[cc]\n"
73 		"	srl	%[cc],28\n"
74 		: [cc] "=d" (cc), [fn] "+d" (fn)
75 		: [addr_range] "d" (addr_range.pair)
76 		: "cc");
77 	*status = fn >> 24 & 0xff;
78 	return cc;
79 }
80 
81 int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
82 {
83 	u8 cc, status;
84 
85 	do {
86 		cc = __rpcit(fn, addr, range, &status);
87 		if (cc == 2)
88 			udelay(ZPCI_INSN_BUSY_DELAY);
89 	} while (cc == 2);
90 
91 	if (cc)
92 		zpci_err_insn(cc, status, addr, range);
93 
94 	if (cc == 1 && (status == 4 || status == 16))
95 		return -ENOMEM;
96 
97 	return (cc) ? -EIO : 0;
98 }
99 
100 /* Set Interruption Controls */
101 int __zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib)
102 {
103 	if (!test_facility(72))
104 		return -EIO;
105 
106 	asm volatile(
107 		".insn	rsy,0xeb00000000d1,%[ctl],%[isc],%[iib]\n"
108 		: : [ctl] "d" (ctl), [isc] "d" (isc << 27), [iib] "Q" (*iib));
109 
110 	return 0;
111 }
112 
113 /* PCI Load */
114 static inline int ____pcilg(u64 *data, u64 req, u64 offset, u8 *status)
115 {
116 	union register_pair req_off = {.even = req, .odd = offset};
117 	int cc = -ENXIO;
118 	u64 __data;
119 
120 	asm volatile (
121 		"	.insn	rre,0xb9d20000,%[data],%[req_off]\n"
122 		"0:	ipm	%[cc]\n"
123 		"	srl	%[cc],28\n"
124 		"1:\n"
125 		EX_TABLE(0b, 1b)
126 		: [cc] "+d" (cc), [data] "=d" (__data),
127 		  [req_off] "+&d" (req_off.pair) :: "cc");
128 	*status = req_off.even >> 24 & 0xff;
129 	*data = __data;
130 	return cc;
131 }
132 
133 static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
134 {
135 	u64 __data;
136 	int cc;
137 
138 	cc = ____pcilg(&__data, req, offset, status);
139 	if (!cc)
140 		*data = __data;
141 
142 	return cc;
143 }
144 
145 int __zpci_load(u64 *data, u64 req, u64 offset)
146 {
147 	u8 status;
148 	int cc;
149 
150 	do {
151 		cc = __pcilg(data, req, offset, &status);
152 		if (cc == 2)
153 			udelay(ZPCI_INSN_BUSY_DELAY);
154 	} while (cc == 2);
155 
156 	if (cc)
157 		zpci_err_insn(cc, status, req, offset);
158 
159 	return (cc > 0) ? -EIO : cc;
160 }
161 EXPORT_SYMBOL_GPL(__zpci_load);
162 
163 static inline int zpci_load_fh(u64 *data, const volatile void __iomem *addr,
164 			       unsigned long len)
165 {
166 	struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)];
167 	u64 req = ZPCI_CREATE_REQ(READ_ONCE(entry->fh), entry->bar, len);
168 
169 	return __zpci_load(data, req, ZPCI_OFFSET(addr));
170 }
171 
172 static inline int __pcilg_mio(u64 *data, u64 ioaddr, u64 len, u8 *status)
173 {
174 	union register_pair ioaddr_len = {.even = ioaddr, .odd = len};
175 	int cc = -ENXIO;
176 	u64 __data;
177 
178 	asm volatile (
179 		"       .insn   rre,0xb9d60000,%[data],%[ioaddr_len]\n"
180 		"0:     ipm     %[cc]\n"
181 		"       srl     %[cc],28\n"
182 		"1:\n"
183 		EX_TABLE(0b, 1b)
184 		: [cc] "+d" (cc), [data] "=d" (__data),
185 		  [ioaddr_len] "+&d" (ioaddr_len.pair) :: "cc");
186 	*status = ioaddr_len.odd >> 24 & 0xff;
187 	*data = __data;
188 	return cc;
189 }
190 
191 int zpci_load(u64 *data, const volatile void __iomem *addr, unsigned long len)
192 {
193 	u8 status;
194 	int cc;
195 
196 	if (!static_branch_unlikely(&have_mio))
197 		return zpci_load_fh(data, addr, len);
198 
199 	cc = __pcilg_mio(data, (__force u64) addr, len, &status);
200 	if (cc)
201 		zpci_err_insn(cc, status, 0, (__force u64) addr);
202 
203 	return (cc > 0) ? -EIO : cc;
204 }
205 EXPORT_SYMBOL_GPL(zpci_load);
206 
207 /* PCI Store */
208 static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
209 {
210 	union register_pair req_off = {.even = req, .odd = offset};
211 	int cc = -ENXIO;
212 
213 	asm volatile (
214 		"	.insn	rre,0xb9d00000,%[data],%[req_off]\n"
215 		"0:	ipm	%[cc]\n"
216 		"	srl	%[cc],28\n"
217 		"1:\n"
218 		EX_TABLE(0b, 1b)
219 		: [cc] "+d" (cc), [req_off] "+&d" (req_off.pair)
220 		: [data] "d" (data)
221 		: "cc");
222 	*status = req_off.even >> 24 & 0xff;
223 	return cc;
224 }
225 
226 int __zpci_store(u64 data, u64 req, u64 offset)
227 {
228 	u8 status;
229 	int cc;
230 
231 	do {
232 		cc = __pcistg(data, req, offset, &status);
233 		if (cc == 2)
234 			udelay(ZPCI_INSN_BUSY_DELAY);
235 	} while (cc == 2);
236 
237 	if (cc)
238 		zpci_err_insn(cc, status, req, offset);
239 
240 	return (cc > 0) ? -EIO : cc;
241 }
242 EXPORT_SYMBOL_GPL(__zpci_store);
243 
244 static inline int zpci_store_fh(const volatile void __iomem *addr, u64 data,
245 				unsigned long len)
246 {
247 	struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)];
248 	u64 req = ZPCI_CREATE_REQ(READ_ONCE(entry->fh), entry->bar, len);
249 
250 	return __zpci_store(data, req, ZPCI_OFFSET(addr));
251 }
252 
253 static inline int __pcistg_mio(u64 data, u64 ioaddr, u64 len, u8 *status)
254 {
255 	union register_pair ioaddr_len = {.even = ioaddr, .odd = len};
256 	int cc = -ENXIO;
257 
258 	asm volatile (
259 		"       .insn   rre,0xb9d40000,%[data],%[ioaddr_len]\n"
260 		"0:     ipm     %[cc]\n"
261 		"       srl     %[cc],28\n"
262 		"1:\n"
263 		EX_TABLE(0b, 1b)
264 		: [cc] "+d" (cc), [ioaddr_len] "+&d" (ioaddr_len.pair)
265 		: [data] "d" (data)
266 		: "cc", "memory");
267 	*status = ioaddr_len.odd >> 24 & 0xff;
268 	return cc;
269 }
270 
271 int zpci_store(const volatile void __iomem *addr, u64 data, unsigned long len)
272 {
273 	u8 status;
274 	int cc;
275 
276 	if (!static_branch_unlikely(&have_mio))
277 		return zpci_store_fh(addr, data, len);
278 
279 	cc = __pcistg_mio(data, (__force u64) addr, len, &status);
280 	if (cc)
281 		zpci_err_insn(cc, status, 0, (__force u64) addr);
282 
283 	return (cc > 0) ? -EIO : cc;
284 }
285 EXPORT_SYMBOL_GPL(zpci_store);
286 
287 /* PCI Store Block */
288 static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
289 {
290 	int cc = -ENXIO;
291 
292 	asm volatile (
293 		"	.insn	rsy,0xeb00000000d0,%[req],%[offset],%[data]\n"
294 		"0:	ipm	%[cc]\n"
295 		"	srl	%[cc],28\n"
296 		"1:\n"
297 		EX_TABLE(0b, 1b)
298 		: [cc] "+d" (cc), [req] "+d" (req)
299 		: [offset] "d" (offset), [data] "Q" (*data)
300 		: "cc");
301 	*status = req >> 24 & 0xff;
302 	return cc;
303 }
304 
305 int __zpci_store_block(const u64 *data, u64 req, u64 offset)
306 {
307 	u8 status;
308 	int cc;
309 
310 	do {
311 		cc = __pcistb(data, req, offset, &status);
312 		if (cc == 2)
313 			udelay(ZPCI_INSN_BUSY_DELAY);
314 	} while (cc == 2);
315 
316 	if (cc)
317 		zpci_err_insn(cc, status, req, offset);
318 
319 	return (cc > 0) ? -EIO : cc;
320 }
321 EXPORT_SYMBOL_GPL(__zpci_store_block);
322 
323 static inline int zpci_write_block_fh(volatile void __iomem *dst,
324 				      const void *src, unsigned long len)
325 {
326 	struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(dst)];
327 	u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len);
328 	u64 offset = ZPCI_OFFSET(dst);
329 
330 	return __zpci_store_block(src, req, offset);
331 }
332 
333 static inline int __pcistb_mio(const u64 *data, u64 ioaddr, u64 len, u8 *status)
334 {
335 	int cc = -ENXIO;
336 
337 	asm volatile (
338 		"       .insn   rsy,0xeb00000000d4,%[len],%[ioaddr],%[data]\n"
339 		"0:     ipm     %[cc]\n"
340 		"       srl     %[cc],28\n"
341 		"1:\n"
342 		EX_TABLE(0b, 1b)
343 		: [cc] "+d" (cc), [len] "+d" (len)
344 		: [ioaddr] "d" (ioaddr), [data] "Q" (*data)
345 		: "cc");
346 	*status = len >> 24 & 0xff;
347 	return cc;
348 }
349 
350 int zpci_write_block(volatile void __iomem *dst,
351 		     const void *src, unsigned long len)
352 {
353 	u8 status;
354 	int cc;
355 
356 	if (!static_branch_unlikely(&have_mio))
357 		return zpci_write_block_fh(dst, src, len);
358 
359 	cc = __pcistb_mio(src, (__force u64) dst, len, &status);
360 	if (cc)
361 		zpci_err_insn(cc, status, 0, (__force u64) dst);
362 
363 	return (cc > 0) ? -EIO : cc;
364 }
365 EXPORT_SYMBOL_GPL(zpci_write_block);
366 
367 static inline void __pciwb_mio(void)
368 {
369 	asm volatile (".insn    rre,0xb9d50000,0,0\n");
370 }
371 
372 void zpci_barrier(void)
373 {
374 	if (static_branch_likely(&have_mio))
375 		__pciwb_mio();
376 }
377 EXPORT_SYMBOL_GPL(zpci_barrier);
378