xref: /openbmc/linux/arch/s390/pci/pci_insn.c (revision d40d48e1)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * s390 specific pci instructions
4  *
5  * Copyright IBM Corp. 2013
6  */
7 
8 #include <linux/export.h>
9 #include <linux/errno.h>
10 #include <linux/delay.h>
11 #include <linux/jump_label.h>
12 #include <asm/facility.h>
13 #include <asm/pci_insn.h>
14 #include <asm/pci_debug.h>
15 #include <asm/pci_io.h>
16 #include <asm/processor.h>
17 
18 #define ZPCI_INSN_BUSY_DELAY	1	/* 1 microsecond */
19 
20 static inline void zpci_err_insn(u8 cc, u8 status, u64 req, u64 offset)
21 {
22 	struct {
23 		u64 req;
24 		u64 offset;
25 		u8 cc;
26 		u8 status;
27 	} __packed data = {req, offset, cc, status};
28 
29 	zpci_err_hex(&data, sizeof(data));
30 }
31 
32 /* Modify PCI Function Controls */
33 static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
34 {
35 	u8 cc;
36 
37 	asm volatile (
38 		"	.insn	rxy,0xe300000000d0,%[req],%[fib]\n"
39 		"	ipm	%[cc]\n"
40 		"	srl	%[cc],28\n"
41 		: [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib)
42 		: : "cc");
43 	*status = req >> 24 & 0xff;
44 	return cc;
45 }
46 
47 u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status)
48 {
49 	u8 cc;
50 
51 	do {
52 		cc = __mpcifc(req, fib, status);
53 		if (cc == 2)
54 			msleep(ZPCI_INSN_BUSY_DELAY);
55 	} while (cc == 2);
56 
57 	if (cc)
58 		zpci_err_insn(cc, *status, req, 0);
59 
60 	return cc;
61 }
62 
63 /* Refresh PCI Translations */
64 static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
65 {
66 	union register_pair addr_range = {.even = addr, .odd = range};
67 	u8 cc;
68 
69 	asm volatile (
70 		"	.insn	rre,0xb9d30000,%[fn],%[addr_range]\n"
71 		"	ipm	%[cc]\n"
72 		"	srl	%[cc],28\n"
73 		: [cc] "=d" (cc), [fn] "+d" (fn)
74 		: [addr_range] "d" (addr_range.pair)
75 		: "cc");
76 	*status = fn >> 24 & 0xff;
77 	return cc;
78 }
79 
80 int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
81 {
82 	u8 cc, status;
83 
84 	do {
85 		cc = __rpcit(fn, addr, range, &status);
86 		if (cc == 2)
87 			udelay(ZPCI_INSN_BUSY_DELAY);
88 	} while (cc == 2);
89 
90 	if (cc)
91 		zpci_err_insn(cc, status, addr, range);
92 
93 	if (cc == 1 && (status == 4 || status == 16))
94 		return -ENOMEM;
95 
96 	return (cc) ? -EIO : 0;
97 }
98 
99 /* Set Interruption Controls */
100 int __zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib)
101 {
102 	if (!test_facility(72))
103 		return -EIO;
104 
105 	asm volatile(
106 		".insn	rsy,0xeb00000000d1,%[ctl],%[isc],%[iib]\n"
107 		: : [ctl] "d" (ctl), [isc] "d" (isc << 27), [iib] "Q" (*iib));
108 
109 	return 0;
110 }
111 
112 /* PCI Load */
113 static inline int ____pcilg(u64 *data, u64 req, u64 offset, u8 *status)
114 {
115 	union register_pair req_off = {.even = req, .odd = offset};
116 	int cc = -ENXIO;
117 	u64 __data;
118 
119 	asm volatile (
120 		"	.insn	rre,0xb9d20000,%[data],%[req_off]\n"
121 		"0:	ipm	%[cc]\n"
122 		"	srl	%[cc],28\n"
123 		"1:\n"
124 		EX_TABLE(0b, 1b)
125 		: [cc] "+d" (cc), [data] "=d" (__data),
126 		  [req_off] "+&d" (req_off.pair) :: "cc");
127 	*status = req_off.even >> 24 & 0xff;
128 	*data = __data;
129 	return cc;
130 }
131 
132 static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
133 {
134 	u64 __data;
135 	int cc;
136 
137 	cc = ____pcilg(&__data, req, offset, status);
138 	if (!cc)
139 		*data = __data;
140 
141 	return cc;
142 }
143 
144 int __zpci_load(u64 *data, u64 req, u64 offset)
145 {
146 	u8 status;
147 	int cc;
148 
149 	do {
150 		cc = __pcilg(data, req, offset, &status);
151 		if (cc == 2)
152 			udelay(ZPCI_INSN_BUSY_DELAY);
153 	} while (cc == 2);
154 
155 	if (cc)
156 		zpci_err_insn(cc, status, req, offset);
157 
158 	return (cc > 0) ? -EIO : cc;
159 }
160 EXPORT_SYMBOL_GPL(__zpci_load);
161 
162 static inline int zpci_load_fh(u64 *data, const volatile void __iomem *addr,
163 			       unsigned long len)
164 {
165 	struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)];
166 	u64 req = ZPCI_CREATE_REQ(READ_ONCE(entry->fh), entry->bar, len);
167 
168 	return __zpci_load(data, req, ZPCI_OFFSET(addr));
169 }
170 
171 static inline int __pcilg_mio(u64 *data, u64 ioaddr, u64 len, u8 *status)
172 {
173 	union register_pair ioaddr_len = {.even = ioaddr, .odd = len};
174 	int cc = -ENXIO;
175 	u64 __data;
176 
177 	asm volatile (
178 		"       .insn   rre,0xb9d60000,%[data],%[ioaddr_len]\n"
179 		"0:     ipm     %[cc]\n"
180 		"       srl     %[cc],28\n"
181 		"1:\n"
182 		EX_TABLE(0b, 1b)
183 		: [cc] "+d" (cc), [data] "=d" (__data),
184 		  [ioaddr_len] "+&d" (ioaddr_len.pair) :: "cc");
185 	*status = ioaddr_len.odd >> 24 & 0xff;
186 	*data = __data;
187 	return cc;
188 }
189 
190 int zpci_load(u64 *data, const volatile void __iomem *addr, unsigned long len)
191 {
192 	u8 status;
193 	int cc;
194 
195 	if (!static_branch_unlikely(&have_mio))
196 		return zpci_load_fh(data, addr, len);
197 
198 	cc = __pcilg_mio(data, (__force u64) addr, len, &status);
199 	if (cc)
200 		zpci_err_insn(cc, status, 0, (__force u64) addr);
201 
202 	return (cc > 0) ? -EIO : cc;
203 }
204 EXPORT_SYMBOL_GPL(zpci_load);
205 
206 /* PCI Store */
207 static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
208 {
209 	union register_pair req_off = {.even = req, .odd = offset};
210 	int cc = -ENXIO;
211 
212 	asm volatile (
213 		"	.insn	rre,0xb9d00000,%[data],%[req_off]\n"
214 		"0:	ipm	%[cc]\n"
215 		"	srl	%[cc],28\n"
216 		"1:\n"
217 		EX_TABLE(0b, 1b)
218 		: [cc] "+d" (cc), [req_off] "+&d" (req_off.pair)
219 		: [data] "d" (data)
220 		: "cc");
221 	*status = req_off.even >> 24 & 0xff;
222 	return cc;
223 }
224 
225 int __zpci_store(u64 data, u64 req, u64 offset)
226 {
227 	u8 status;
228 	int cc;
229 
230 	do {
231 		cc = __pcistg(data, req, offset, &status);
232 		if (cc == 2)
233 			udelay(ZPCI_INSN_BUSY_DELAY);
234 	} while (cc == 2);
235 
236 	if (cc)
237 		zpci_err_insn(cc, status, req, offset);
238 
239 	return (cc > 0) ? -EIO : cc;
240 }
241 EXPORT_SYMBOL_GPL(__zpci_store);
242 
243 static inline int zpci_store_fh(const volatile void __iomem *addr, u64 data,
244 				unsigned long len)
245 {
246 	struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)];
247 	u64 req = ZPCI_CREATE_REQ(READ_ONCE(entry->fh), entry->bar, len);
248 
249 	return __zpci_store(data, req, ZPCI_OFFSET(addr));
250 }
251 
252 static inline int __pcistg_mio(u64 data, u64 ioaddr, u64 len, u8 *status)
253 {
254 	union register_pair ioaddr_len = {.even = ioaddr, .odd = len};
255 	int cc = -ENXIO;
256 
257 	asm volatile (
258 		"       .insn   rre,0xb9d40000,%[data],%[ioaddr_len]\n"
259 		"0:     ipm     %[cc]\n"
260 		"       srl     %[cc],28\n"
261 		"1:\n"
262 		EX_TABLE(0b, 1b)
263 		: [cc] "+d" (cc), [ioaddr_len] "+&d" (ioaddr_len.pair)
264 		: [data] "d" (data)
265 		: "cc", "memory");
266 	*status = ioaddr_len.odd >> 24 & 0xff;
267 	return cc;
268 }
269 
270 int zpci_store(const volatile void __iomem *addr, u64 data, unsigned long len)
271 {
272 	u8 status;
273 	int cc;
274 
275 	if (!static_branch_unlikely(&have_mio))
276 		return zpci_store_fh(addr, data, len);
277 
278 	cc = __pcistg_mio(data, (__force u64) addr, len, &status);
279 	if (cc)
280 		zpci_err_insn(cc, status, 0, (__force u64) addr);
281 
282 	return (cc > 0) ? -EIO : cc;
283 }
284 EXPORT_SYMBOL_GPL(zpci_store);
285 
286 /* PCI Store Block */
287 static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
288 {
289 	int cc = -ENXIO;
290 
291 	asm volatile (
292 		"	.insn	rsy,0xeb00000000d0,%[req],%[offset],%[data]\n"
293 		"0:	ipm	%[cc]\n"
294 		"	srl	%[cc],28\n"
295 		"1:\n"
296 		EX_TABLE(0b, 1b)
297 		: [cc] "+d" (cc), [req] "+d" (req)
298 		: [offset] "d" (offset), [data] "Q" (*data)
299 		: "cc");
300 	*status = req >> 24 & 0xff;
301 	return cc;
302 }
303 
304 int __zpci_store_block(const u64 *data, u64 req, u64 offset)
305 {
306 	u8 status;
307 	int cc;
308 
309 	do {
310 		cc = __pcistb(data, req, offset, &status);
311 		if (cc == 2)
312 			udelay(ZPCI_INSN_BUSY_DELAY);
313 	} while (cc == 2);
314 
315 	if (cc)
316 		zpci_err_insn(cc, status, req, offset);
317 
318 	return (cc > 0) ? -EIO : cc;
319 }
320 EXPORT_SYMBOL_GPL(__zpci_store_block);
321 
322 static inline int zpci_write_block_fh(volatile void __iomem *dst,
323 				      const void *src, unsigned long len)
324 {
325 	struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(dst)];
326 	u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len);
327 	u64 offset = ZPCI_OFFSET(dst);
328 
329 	return __zpci_store_block(src, req, offset);
330 }
331 
332 static inline int __pcistb_mio(const u64 *data, u64 ioaddr, u64 len, u8 *status)
333 {
334 	int cc = -ENXIO;
335 
336 	asm volatile (
337 		"       .insn   rsy,0xeb00000000d4,%[len],%[ioaddr],%[data]\n"
338 		"0:     ipm     %[cc]\n"
339 		"       srl     %[cc],28\n"
340 		"1:\n"
341 		EX_TABLE(0b, 1b)
342 		: [cc] "+d" (cc), [len] "+d" (len)
343 		: [ioaddr] "d" (ioaddr), [data] "Q" (*data)
344 		: "cc");
345 	*status = len >> 24 & 0xff;
346 	return cc;
347 }
348 
349 int zpci_write_block(volatile void __iomem *dst,
350 		     const void *src, unsigned long len)
351 {
352 	u8 status;
353 	int cc;
354 
355 	if (!static_branch_unlikely(&have_mio))
356 		return zpci_write_block_fh(dst, src, len);
357 
358 	cc = __pcistb_mio(src, (__force u64) dst, len, &status);
359 	if (cc)
360 		zpci_err_insn(cc, status, 0, (__force u64) dst);
361 
362 	return (cc > 0) ? -EIO : cc;
363 }
364 EXPORT_SYMBOL_GPL(zpci_write_block);
365 
366 static inline void __pciwb_mio(void)
367 {
368 	unsigned long unused = 0;
369 
370 	asm volatile (".insn    rre,0xb9d50000,%[op],%[op]\n"
371 		      : [op] "+d" (unused));
372 }
373 
374 void zpci_barrier(void)
375 {
376 	if (static_branch_likely(&have_mio))
377 		__pciwb_mio();
378 }
379 EXPORT_SYMBOL_GPL(zpci_barrier);
380