1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * s390 specific pci instructions
4 *
5 * Copyright IBM Corp. 2013
6 */
7
8 #include <linux/export.h>
9 #include <linux/errno.h>
10 #include <linux/delay.h>
11 #include <linux/jump_label.h>
12 #include <asm/asm-extable.h>
13 #include <asm/facility.h>
14 #include <asm/pci_insn.h>
15 #include <asm/pci_debug.h>
16 #include <asm/pci_io.h>
17 #include <asm/processor.h>
18
19 #define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */
20
21 struct zpci_err_insn_data {
22 u8 insn;
23 u8 cc;
24 u8 status;
25 union {
26 struct {
27 u64 req;
28 u64 offset;
29 };
30 struct {
31 u64 addr;
32 u64 len;
33 };
34 };
35 } __packed;
36
zpci_err_insn_req(int lvl,u8 insn,u8 cc,u8 status,u64 req,u64 offset)37 static inline void zpci_err_insn_req(int lvl, u8 insn, u8 cc, u8 status,
38 u64 req, u64 offset)
39 {
40 struct zpci_err_insn_data data = {
41 .insn = insn, .cc = cc, .status = status,
42 .req = req, .offset = offset};
43
44 zpci_err_hex_level(lvl, &data, sizeof(data));
45 }
46
zpci_err_insn_addr(int lvl,u8 insn,u8 cc,u8 status,u64 addr,u64 len)47 static inline void zpci_err_insn_addr(int lvl, u8 insn, u8 cc, u8 status,
48 u64 addr, u64 len)
49 {
50 struct zpci_err_insn_data data = {
51 .insn = insn, .cc = cc, .status = status,
52 .addr = addr, .len = len};
53
54 zpci_err_hex_level(lvl, &data, sizeof(data));
55 }
56
57 /* Modify PCI Function Controls */
__mpcifc(u64 req,struct zpci_fib * fib,u8 * status)58 static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
59 {
60 u8 cc;
61
62 asm volatile (
63 " .insn rxy,0xe300000000d0,%[req],%[fib]\n"
64 " ipm %[cc]\n"
65 " srl %[cc],28\n"
66 : [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib)
67 : : "cc");
68 *status = req >> 24 & 0xff;
69 return cc;
70 }
71
zpci_mod_fc(u64 req,struct zpci_fib * fib,u8 * status)72 u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status)
73 {
74 bool retried = false;
75 u8 cc;
76
77 do {
78 cc = __mpcifc(req, fib, status);
79 if (cc == 2) {
80 msleep(ZPCI_INSN_BUSY_DELAY);
81 if (!retried) {
82 zpci_err_insn_req(1, 'M', cc, *status, req, 0);
83 retried = true;
84 }
85 }
86 } while (cc == 2);
87
88 if (cc)
89 zpci_err_insn_req(0, 'M', cc, *status, req, 0);
90 else if (retried)
91 zpci_err_insn_req(1, 'M', cc, *status, req, 0);
92
93 return cc;
94 }
95 EXPORT_SYMBOL_GPL(zpci_mod_fc);
96
97 /* Refresh PCI Translations */
__rpcit(u64 fn,u64 addr,u64 range,u8 * status)98 static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
99 {
100 union register_pair addr_range = {.even = addr, .odd = range};
101 u8 cc;
102
103 asm volatile (
104 " .insn rre,0xb9d30000,%[fn],%[addr_range]\n"
105 " ipm %[cc]\n"
106 " srl %[cc],28\n"
107 : [cc] "=d" (cc), [fn] "+d" (fn)
108 : [addr_range] "d" (addr_range.pair)
109 : "cc");
110 *status = fn >> 24 & 0xff;
111 return cc;
112 }
113
zpci_refresh_trans(u64 fn,u64 addr,u64 range)114 int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
115 {
116 bool retried = false;
117 u8 cc, status;
118
119 do {
120 cc = __rpcit(fn, addr, range, &status);
121 if (cc == 2) {
122 udelay(ZPCI_INSN_BUSY_DELAY);
123 if (!retried) {
124 zpci_err_insn_addr(1, 'R', cc, status, addr, range);
125 retried = true;
126 }
127 }
128 } while (cc == 2);
129
130 if (cc)
131 zpci_err_insn_addr(0, 'R', cc, status, addr, range);
132 else if (retried)
133 zpci_err_insn_addr(1, 'R', cc, status, addr, range);
134
135 if (cc == 1 && (status == 4 || status == 16))
136 return -ENOMEM;
137
138 return (cc) ? -EIO : 0;
139 }
140
141 /* Set Interruption Controls */
zpci_set_irq_ctrl(u16 ctl,u8 isc,union zpci_sic_iib * iib)142 int zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib)
143 {
144 if (!test_facility(72))
145 return -EIO;
146
147 asm volatile(
148 ".insn rsy,0xeb00000000d1,%[ctl],%[isc],%[iib]\n"
149 : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [iib] "Q" (*iib));
150
151 return 0;
152 }
153 EXPORT_SYMBOL_GPL(zpci_set_irq_ctrl);
154
155 /* PCI Load */
____pcilg(u64 * data,u64 req,u64 offset,u8 * status)156 static inline int ____pcilg(u64 *data, u64 req, u64 offset, u8 *status)
157 {
158 union register_pair req_off = {.even = req, .odd = offset};
159 int cc = -ENXIO;
160 u64 __data;
161
162 asm volatile (
163 " .insn rre,0xb9d20000,%[data],%[req_off]\n"
164 "0: ipm %[cc]\n"
165 " srl %[cc],28\n"
166 "1:\n"
167 EX_TABLE(0b, 1b)
168 : [cc] "+d" (cc), [data] "=d" (__data),
169 [req_off] "+&d" (req_off.pair) :: "cc");
170 *status = req_off.even >> 24 & 0xff;
171 *data = __data;
172 return cc;
173 }
174
__pcilg(u64 * data,u64 req,u64 offset,u8 * status)175 static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
176 {
177 u64 __data;
178 int cc;
179
180 cc = ____pcilg(&__data, req, offset, status);
181 if (!cc)
182 *data = __data;
183
184 return cc;
185 }
186
__zpci_load(u64 * data,u64 req,u64 offset)187 int __zpci_load(u64 *data, u64 req, u64 offset)
188 {
189 bool retried = false;
190 u8 status;
191 int cc;
192
193 do {
194 cc = __pcilg(data, req, offset, &status);
195 if (cc == 2) {
196 udelay(ZPCI_INSN_BUSY_DELAY);
197 if (!retried) {
198 zpci_err_insn_req(1, 'l', cc, status, req, offset);
199 retried = true;
200 }
201 }
202 } while (cc == 2);
203
204 if (cc)
205 zpci_err_insn_req(0, 'l', cc, status, req, offset);
206 else if (retried)
207 zpci_err_insn_req(1, 'l', cc, status, req, offset);
208
209 return (cc > 0) ? -EIO : cc;
210 }
211 EXPORT_SYMBOL_GPL(__zpci_load);
212
zpci_load_fh(u64 * data,const volatile void __iomem * addr,unsigned long len)213 static inline int zpci_load_fh(u64 *data, const volatile void __iomem *addr,
214 unsigned long len)
215 {
216 struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)];
217 u64 req = ZPCI_CREATE_REQ(READ_ONCE(entry->fh), entry->bar, len);
218
219 return __zpci_load(data, req, ZPCI_OFFSET(addr));
220 }
221
__pcilg_mio(u64 * data,u64 ioaddr,u64 len,u8 * status)222 static inline int __pcilg_mio(u64 *data, u64 ioaddr, u64 len, u8 *status)
223 {
224 union register_pair ioaddr_len = {.even = ioaddr, .odd = len};
225 int cc = -ENXIO;
226 u64 __data;
227
228 asm volatile (
229 " .insn rre,0xb9d60000,%[data],%[ioaddr_len]\n"
230 "0: ipm %[cc]\n"
231 " srl %[cc],28\n"
232 "1:\n"
233 EX_TABLE(0b, 1b)
234 : [cc] "+d" (cc), [data] "=d" (__data),
235 [ioaddr_len] "+&d" (ioaddr_len.pair) :: "cc");
236 *status = ioaddr_len.odd >> 24 & 0xff;
237 *data = __data;
238 return cc;
239 }
240
zpci_load(u64 * data,const volatile void __iomem * addr,unsigned long len)241 int zpci_load(u64 *data, const volatile void __iomem *addr, unsigned long len)
242 {
243 u8 status;
244 int cc;
245
246 if (!static_branch_unlikely(&have_mio))
247 return zpci_load_fh(data, addr, len);
248
249 cc = __pcilg_mio(data, (__force u64) addr, len, &status);
250 if (cc)
251 zpci_err_insn_addr(0, 'L', cc, status, (__force u64) addr, len);
252
253 return (cc > 0) ? -EIO : cc;
254 }
255 EXPORT_SYMBOL_GPL(zpci_load);
256
257 /* PCI Store */
__pcistg(u64 data,u64 req,u64 offset,u8 * status)258 static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
259 {
260 union register_pair req_off = {.even = req, .odd = offset};
261 int cc = -ENXIO;
262
263 asm volatile (
264 " .insn rre,0xb9d00000,%[data],%[req_off]\n"
265 "0: ipm %[cc]\n"
266 " srl %[cc],28\n"
267 "1:\n"
268 EX_TABLE(0b, 1b)
269 : [cc] "+d" (cc), [req_off] "+&d" (req_off.pair)
270 : [data] "d" (data)
271 : "cc");
272 *status = req_off.even >> 24 & 0xff;
273 return cc;
274 }
275
__zpci_store(u64 data,u64 req,u64 offset)276 int __zpci_store(u64 data, u64 req, u64 offset)
277 {
278 bool retried = false;
279 u8 status;
280 int cc;
281
282 do {
283 cc = __pcistg(data, req, offset, &status);
284 if (cc == 2) {
285 udelay(ZPCI_INSN_BUSY_DELAY);
286 if (!retried) {
287 zpci_err_insn_req(1, 's', cc, status, req, offset);
288 retried = true;
289 }
290 }
291 } while (cc == 2);
292
293 if (cc)
294 zpci_err_insn_req(0, 's', cc, status, req, offset);
295 else if (retried)
296 zpci_err_insn_req(1, 's', cc, status, req, offset);
297
298 return (cc > 0) ? -EIO : cc;
299 }
300 EXPORT_SYMBOL_GPL(__zpci_store);
301
zpci_store_fh(const volatile void __iomem * addr,u64 data,unsigned long len)302 static inline int zpci_store_fh(const volatile void __iomem *addr, u64 data,
303 unsigned long len)
304 {
305 struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)];
306 u64 req = ZPCI_CREATE_REQ(READ_ONCE(entry->fh), entry->bar, len);
307
308 return __zpci_store(data, req, ZPCI_OFFSET(addr));
309 }
310
__pcistg_mio(u64 data,u64 ioaddr,u64 len,u8 * status)311 static inline int __pcistg_mio(u64 data, u64 ioaddr, u64 len, u8 *status)
312 {
313 union register_pair ioaddr_len = {.even = ioaddr, .odd = len};
314 int cc = -ENXIO;
315
316 asm volatile (
317 " .insn rre,0xb9d40000,%[data],%[ioaddr_len]\n"
318 "0: ipm %[cc]\n"
319 " srl %[cc],28\n"
320 "1:\n"
321 EX_TABLE(0b, 1b)
322 : [cc] "+d" (cc), [ioaddr_len] "+&d" (ioaddr_len.pair)
323 : [data] "d" (data)
324 : "cc", "memory");
325 *status = ioaddr_len.odd >> 24 & 0xff;
326 return cc;
327 }
328
zpci_store(const volatile void __iomem * addr,u64 data,unsigned long len)329 int zpci_store(const volatile void __iomem *addr, u64 data, unsigned long len)
330 {
331 u8 status;
332 int cc;
333
334 if (!static_branch_unlikely(&have_mio))
335 return zpci_store_fh(addr, data, len);
336
337 cc = __pcistg_mio(data, (__force u64) addr, len, &status);
338 if (cc)
339 zpci_err_insn_addr(0, 'S', cc, status, (__force u64) addr, len);
340
341 return (cc > 0) ? -EIO : cc;
342 }
343 EXPORT_SYMBOL_GPL(zpci_store);
344
345 /* PCI Store Block */
__pcistb(const u64 * data,u64 req,u64 offset,u8 * status)346 static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
347 {
348 int cc = -ENXIO;
349
350 asm volatile (
351 " .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n"
352 "0: ipm %[cc]\n"
353 " srl %[cc],28\n"
354 "1:\n"
355 EX_TABLE(0b, 1b)
356 : [cc] "+d" (cc), [req] "+d" (req)
357 : [offset] "d" (offset), [data] "Q" (*data)
358 : "cc");
359 *status = req >> 24 & 0xff;
360 return cc;
361 }
362
__zpci_store_block(const u64 * data,u64 req,u64 offset)363 int __zpci_store_block(const u64 *data, u64 req, u64 offset)
364 {
365 bool retried = false;
366 u8 status;
367 int cc;
368
369 do {
370 cc = __pcistb(data, req, offset, &status);
371 if (cc == 2) {
372 udelay(ZPCI_INSN_BUSY_DELAY);
373 if (!retried) {
374 zpci_err_insn_req(0, 'b', cc, status, req, offset);
375 retried = true;
376 }
377 }
378 } while (cc == 2);
379
380 if (cc)
381 zpci_err_insn_req(0, 'b', cc, status, req, offset);
382 else if (retried)
383 zpci_err_insn_req(1, 'b', cc, status, req, offset);
384
385 return (cc > 0) ? -EIO : cc;
386 }
387 EXPORT_SYMBOL_GPL(__zpci_store_block);
388
zpci_write_block_fh(volatile void __iomem * dst,const void * src,unsigned long len)389 static inline int zpci_write_block_fh(volatile void __iomem *dst,
390 const void *src, unsigned long len)
391 {
392 struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(dst)];
393 u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len);
394 u64 offset = ZPCI_OFFSET(dst);
395
396 return __zpci_store_block(src, req, offset);
397 }
398
__pcistb_mio(const u64 * data,u64 ioaddr,u64 len,u8 * status)399 static inline int __pcistb_mio(const u64 *data, u64 ioaddr, u64 len, u8 *status)
400 {
401 int cc = -ENXIO;
402
403 asm volatile (
404 " .insn rsy,0xeb00000000d4,%[len],%[ioaddr],%[data]\n"
405 "0: ipm %[cc]\n"
406 " srl %[cc],28\n"
407 "1:\n"
408 EX_TABLE(0b, 1b)
409 : [cc] "+d" (cc), [len] "+d" (len)
410 : [ioaddr] "d" (ioaddr), [data] "Q" (*data)
411 : "cc");
412 *status = len >> 24 & 0xff;
413 return cc;
414 }
415
zpci_write_block(volatile void __iomem * dst,const void * src,unsigned long len)416 int zpci_write_block(volatile void __iomem *dst,
417 const void *src, unsigned long len)
418 {
419 u8 status;
420 int cc;
421
422 if (!static_branch_unlikely(&have_mio))
423 return zpci_write_block_fh(dst, src, len);
424
425 cc = __pcistb_mio(src, (__force u64) dst, len, &status);
426 if (cc)
427 zpci_err_insn_addr(0, 'B', cc, status, (__force u64) dst, len);
428
429 return (cc > 0) ? -EIO : cc;
430 }
431 EXPORT_SYMBOL_GPL(zpci_write_block);
432
__pciwb_mio(void)433 static inline void __pciwb_mio(void)
434 {
435 asm volatile (".insn rre,0xb9d50000,0,0\n");
436 }
437
zpci_barrier(void)438 void zpci_barrier(void)
439 {
440 if (static_branch_likely(&have_mio))
441 __pciwb_mio();
442 }
443 EXPORT_SYMBOL_GPL(zpci_barrier);
444