xref: /openbmc/linux/arch/s390/pci/pci_clp.c (revision 95db3b25)
1 /*
2  * Copyright IBM Corp. 2012
3  *
4  * Author(s):
5  *   Jan Glauber <jang@linux.vnet.ibm.com>
6  */
7 
8 #define KMSG_COMPONENT "zpci"
9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10 
11 #include <linux/compat.h>
12 #include <linux/kernel.h>
13 #include <linux/miscdevice.h>
14 #include <linux/slab.h>
15 #include <linux/err.h>
16 #include <linux/delay.h>
17 #include <linux/pci.h>
18 #include <linux/uaccess.h>
19 #include <asm/pci_debug.h>
20 #include <asm/pci_clp.h>
21 #include <asm/compat.h>
22 #include <asm/clp.h>
23 #include <uapi/asm/clp.h>
24 
25 static inline void zpci_err_clp(unsigned int rsp, int rc)
26 {
27 	struct {
28 		unsigned int rsp;
29 		int rc;
30 	} __packed data = {rsp, rc};
31 
32 	zpci_err_hex(&data, sizeof(data));
33 }
34 
35 /*
36  * Call Logical Processor with c=1, lps=0 and command 1
37  * to get the bit mask of installed logical processors
38  */
39 static inline int clp_get_ilp(unsigned long *ilp)
40 {
41 	unsigned long mask;
42 	int cc = 3;
43 
44 	asm volatile (
45 		"	.insn	rrf,0xb9a00000,%[mask],%[cmd],8,0\n"
46 		"0:	ipm	%[cc]\n"
47 		"	srl	%[cc],28\n"
48 		"1:\n"
49 		EX_TABLE(0b, 1b)
50 		: [cc] "+d" (cc), [mask] "=d" (mask) : [cmd] "a" (1)
51 		: "cc");
52 	*ilp = mask;
53 	return cc;
54 }
55 
56 /*
57  * Call Logical Processor with c=0, the give constant lps and an lpcb request.
58  */
59 static inline int clp_req(void *data, unsigned int lps)
60 {
61 	struct { u8 _[CLP_BLK_SIZE]; } *req = data;
62 	u64 ignored;
63 	int cc = 3;
64 
65 	asm volatile (
66 		"	.insn	rrf,0xb9a00000,%[ign],%[req],0,%[lps]\n"
67 		"0:	ipm	%[cc]\n"
68 		"	srl	%[cc],28\n"
69 		"1:\n"
70 		EX_TABLE(0b, 1b)
71 		: [cc] "+d" (cc), [ign] "=d" (ignored), "+m" (*req)
72 		: [req] "a" (req), [lps] "i" (lps)
73 		: "cc");
74 	return cc;
75 }
76 
77 static void *clp_alloc_block(gfp_t gfp_mask)
78 {
79 	return (void *) __get_free_pages(gfp_mask, get_order(CLP_BLK_SIZE));
80 }
81 
82 static void clp_free_block(void *ptr)
83 {
84 	free_pages((unsigned long) ptr, get_order(CLP_BLK_SIZE));
85 }
86 
87 static void clp_store_query_pci_fngrp(struct zpci_dev *zdev,
88 				      struct clp_rsp_query_pci_grp *response)
89 {
90 	zdev->tlb_refresh = response->refresh;
91 	zdev->dma_mask = response->dasm;
92 	zdev->msi_addr = response->msia;
93 	zdev->max_msi = response->noi;
94 	zdev->fmb_update = response->mui;
95 
96 	switch (response->version) {
97 	case 1:
98 		zdev->max_bus_speed = PCIE_SPEED_5_0GT;
99 		break;
100 	default:
101 		zdev->max_bus_speed = PCI_SPEED_UNKNOWN;
102 		break;
103 	}
104 }
105 
106 static int clp_query_pci_fngrp(struct zpci_dev *zdev, u8 pfgid)
107 {
108 	struct clp_req_rsp_query_pci_grp *rrb;
109 	int rc;
110 
111 	rrb = clp_alloc_block(GFP_KERNEL);
112 	if (!rrb)
113 		return -ENOMEM;
114 
115 	memset(rrb, 0, sizeof(*rrb));
116 	rrb->request.hdr.len = sizeof(rrb->request);
117 	rrb->request.hdr.cmd = CLP_QUERY_PCI_FNGRP;
118 	rrb->response.hdr.len = sizeof(rrb->response);
119 	rrb->request.pfgid = pfgid;
120 
121 	rc = clp_req(rrb, CLP_LPS_PCI);
122 	if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
123 		clp_store_query_pci_fngrp(zdev, &rrb->response);
124 	else {
125 		zpci_err("Q PCI FGRP:\n");
126 		zpci_err_clp(rrb->response.hdr.rsp, rc);
127 		rc = -EIO;
128 	}
129 	clp_free_block(rrb);
130 	return rc;
131 }
132 
133 static int clp_store_query_pci_fn(struct zpci_dev *zdev,
134 				  struct clp_rsp_query_pci *response)
135 {
136 	int i;
137 
138 	for (i = 0; i < PCI_BAR_COUNT; i++) {
139 		zdev->bars[i].val = le32_to_cpu(response->bar[i]);
140 		zdev->bars[i].size = response->bar_size[i];
141 	}
142 	zdev->start_dma = response->sdma;
143 	zdev->end_dma = response->edma;
144 	zdev->pchid = response->pchid;
145 	zdev->pfgid = response->pfgid;
146 	zdev->pft = response->pft;
147 	zdev->vfn = response->vfn;
148 	zdev->uid = response->uid;
149 
150 	memcpy(zdev->pfip, response->pfip, sizeof(zdev->pfip));
151 	if (response->util_str_avail) {
152 		memcpy(zdev->util_str, response->util_str,
153 		       sizeof(zdev->util_str));
154 	}
155 
156 	return 0;
157 }
158 
159 static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
160 {
161 	struct clp_req_rsp_query_pci *rrb;
162 	int rc;
163 
164 	rrb = clp_alloc_block(GFP_KERNEL);
165 	if (!rrb)
166 		return -ENOMEM;
167 
168 	memset(rrb, 0, sizeof(*rrb));
169 	rrb->request.hdr.len = sizeof(rrb->request);
170 	rrb->request.hdr.cmd = CLP_QUERY_PCI_FN;
171 	rrb->response.hdr.len = sizeof(rrb->response);
172 	rrb->request.fh = fh;
173 
174 	rc = clp_req(rrb, CLP_LPS_PCI);
175 	if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
176 		rc = clp_store_query_pci_fn(zdev, &rrb->response);
177 		if (rc)
178 			goto out;
179 		rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid);
180 	} else {
181 		zpci_err("Q PCI FN:\n");
182 		zpci_err_clp(rrb->response.hdr.rsp, rc);
183 		rc = -EIO;
184 	}
185 out:
186 	clp_free_block(rrb);
187 	return rc;
188 }
189 
190 int clp_add_pci_device(u32 fid, u32 fh, int configured)
191 {
192 	struct zpci_dev *zdev;
193 	int rc;
194 
195 	zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, configured);
196 	zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
197 	if (!zdev)
198 		return -ENOMEM;
199 
200 	zdev->fh = fh;
201 	zdev->fid = fid;
202 
203 	/* Query function properties and update zdev */
204 	rc = clp_query_pci_fn(zdev, fh);
205 	if (rc)
206 		goto error;
207 
208 	if (configured)
209 		zdev->state = ZPCI_FN_STATE_CONFIGURED;
210 	else
211 		zdev->state = ZPCI_FN_STATE_STANDBY;
212 
213 	rc = zpci_create_device(zdev);
214 	if (rc)
215 		goto error;
216 	return 0;
217 
218 error:
219 	kfree(zdev);
220 	return rc;
221 }
222 
223 /*
224  * Enable/Disable a given PCI function defined by its function handle.
225  */
226 static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
227 {
228 	struct clp_req_rsp_set_pci *rrb;
229 	int rc, retries = 100;
230 
231 	rrb = clp_alloc_block(GFP_KERNEL);
232 	if (!rrb)
233 		return -ENOMEM;
234 
235 	do {
236 		memset(rrb, 0, sizeof(*rrb));
237 		rrb->request.hdr.len = sizeof(rrb->request);
238 		rrb->request.hdr.cmd = CLP_SET_PCI_FN;
239 		rrb->response.hdr.len = sizeof(rrb->response);
240 		rrb->request.fh = *fh;
241 		rrb->request.oc = command;
242 		rrb->request.ndas = nr_dma_as;
243 
244 		rc = clp_req(rrb, CLP_LPS_PCI);
245 		if (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY) {
246 			retries--;
247 			if (retries < 0)
248 				break;
249 			msleep(20);
250 		}
251 	} while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
252 
253 	if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
254 		*fh = rrb->response.fh;
255 	else {
256 		zpci_err("Set PCI FN:\n");
257 		zpci_err_clp(rrb->response.hdr.rsp, rc);
258 		rc = -EIO;
259 	}
260 	clp_free_block(rrb);
261 	return rc;
262 }
263 
264 int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
265 {
266 	u32 fh = zdev->fh;
267 	int rc;
268 
269 	rc = clp_set_pci_fn(&fh, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
270 	if (!rc)
271 		/* Success -> store enabled handle in zdev */
272 		zdev->fh = fh;
273 
274 	zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
275 	return rc;
276 }
277 
278 int clp_disable_fh(struct zpci_dev *zdev)
279 {
280 	u32 fh = zdev->fh;
281 	int rc;
282 
283 	if (!zdev_enabled(zdev))
284 		return 0;
285 
286 	rc = clp_set_pci_fn(&fh, 0, CLP_SET_DISABLE_PCI_FN);
287 	if (!rc)
288 		/* Success -> store disabled handle in zdev */
289 		zdev->fh = fh;
290 
291 	zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
292 	return rc;
293 }
294 
295 static int clp_list_pci(struct clp_req_rsp_list_pci *rrb,
296 			void (*cb)(struct clp_fh_list_entry *entry))
297 {
298 	u64 resume_token = 0;
299 	int entries, i, rc;
300 
301 	do {
302 		memset(rrb, 0, sizeof(*rrb));
303 		rrb->request.hdr.len = sizeof(rrb->request);
304 		rrb->request.hdr.cmd = CLP_LIST_PCI;
305 		/* store as many entries as possible */
306 		rrb->response.hdr.len = CLP_BLK_SIZE - LIST_PCI_HDR_LEN;
307 		rrb->request.resume_token = resume_token;
308 
309 		/* Get PCI function handle list */
310 		rc = clp_req(rrb, CLP_LPS_PCI);
311 		if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
312 			zpci_err("List PCI FN:\n");
313 			zpci_err_clp(rrb->response.hdr.rsp, rc);
314 			rc = -EIO;
315 			goto out;
316 		}
317 
318 		WARN_ON_ONCE(rrb->response.entry_size !=
319 			sizeof(struct clp_fh_list_entry));
320 
321 		entries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
322 			rrb->response.entry_size;
323 
324 		resume_token = rrb->response.resume_token;
325 		for (i = 0; i < entries; i++)
326 			cb(&rrb->response.fh_list[i]);
327 	} while (resume_token);
328 out:
329 	return rc;
330 }
331 
332 static void __clp_add(struct clp_fh_list_entry *entry)
333 {
334 	if (!entry->vendor_id)
335 		return;
336 
337 	clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
338 }
339 
340 static void __clp_rescan(struct clp_fh_list_entry *entry)
341 {
342 	struct zpci_dev *zdev;
343 
344 	if (!entry->vendor_id)
345 		return;
346 
347 	zdev = get_zdev_by_fid(entry->fid);
348 	if (!zdev) {
349 		clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
350 		return;
351 	}
352 
353 	if (!entry->config_state) {
354 		/*
355 		 * The handle is already disabled, that means no iota/irq freeing via
356 		 * the firmware interfaces anymore. Need to free resources manually
357 		 * (DMA memory, debug, sysfs)...
358 		 */
359 		zpci_stop_device(zdev);
360 	}
361 }
362 
363 static void __clp_update(struct clp_fh_list_entry *entry)
364 {
365 	struct zpci_dev *zdev;
366 
367 	if (!entry->vendor_id)
368 		return;
369 
370 	zdev = get_zdev_by_fid(entry->fid);
371 	if (!zdev)
372 		return;
373 
374 	zdev->fh = entry->fh;
375 }
376 
377 int clp_scan_pci_devices(void)
378 {
379 	struct clp_req_rsp_list_pci *rrb;
380 	int rc;
381 
382 	rrb = clp_alloc_block(GFP_KERNEL);
383 	if (!rrb)
384 		return -ENOMEM;
385 
386 	rc = clp_list_pci(rrb, __clp_add);
387 
388 	clp_free_block(rrb);
389 	return rc;
390 }
391 
392 int clp_rescan_pci_devices(void)
393 {
394 	struct clp_req_rsp_list_pci *rrb;
395 	int rc;
396 
397 	rrb = clp_alloc_block(GFP_KERNEL);
398 	if (!rrb)
399 		return -ENOMEM;
400 
401 	rc = clp_list_pci(rrb, __clp_rescan);
402 
403 	clp_free_block(rrb);
404 	return rc;
405 }
406 
407 int clp_rescan_pci_devices_simple(void)
408 {
409 	struct clp_req_rsp_list_pci *rrb;
410 	int rc;
411 
412 	rrb = clp_alloc_block(GFP_NOWAIT);
413 	if (!rrb)
414 		return -ENOMEM;
415 
416 	rc = clp_list_pci(rrb, __clp_update);
417 
418 	clp_free_block(rrb);
419 	return rc;
420 }
421 
422 static int clp_base_slpc(struct clp_req *req, struct clp_req_rsp_slpc *lpcb)
423 {
424 	unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
425 
426 	if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
427 	    lpcb->response.hdr.len > limit)
428 		return -EINVAL;
429 	return clp_req(lpcb, CLP_LPS_BASE) ? -EOPNOTSUPP : 0;
430 }
431 
432 static int clp_base_command(struct clp_req *req, struct clp_req_hdr *lpcb)
433 {
434 	switch (lpcb->cmd) {
435 	case 0x0001: /* store logical-processor characteristics */
436 		return clp_base_slpc(req, (void *) lpcb);
437 	default:
438 		return -EINVAL;
439 	}
440 }
441 
442 static int clp_pci_slpc(struct clp_req *req, struct clp_req_rsp_slpc *lpcb)
443 {
444 	unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
445 
446 	if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
447 	    lpcb->response.hdr.len > limit)
448 		return -EINVAL;
449 	return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
450 }
451 
452 static int clp_pci_list(struct clp_req *req, struct clp_req_rsp_list_pci *lpcb)
453 {
454 	unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
455 
456 	if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
457 	    lpcb->response.hdr.len > limit)
458 		return -EINVAL;
459 	if (lpcb->request.reserved2 != 0)
460 		return -EINVAL;
461 	return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
462 }
463 
464 static int clp_pci_query(struct clp_req *req,
465 			 struct clp_req_rsp_query_pci *lpcb)
466 {
467 	unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
468 
469 	if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
470 	    lpcb->response.hdr.len > limit)
471 		return -EINVAL;
472 	if (lpcb->request.reserved2 != 0 || lpcb->request.reserved3 != 0)
473 		return -EINVAL;
474 	return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
475 }
476 
477 static int clp_pci_query_grp(struct clp_req *req,
478 			     struct clp_req_rsp_query_pci_grp *lpcb)
479 {
480 	unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
481 
482 	if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
483 	    lpcb->response.hdr.len > limit)
484 		return -EINVAL;
485 	if (lpcb->request.reserved2 != 0 || lpcb->request.reserved3 != 0 ||
486 	    lpcb->request.reserved4 != 0)
487 		return -EINVAL;
488 	return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
489 }
490 
491 static int clp_pci_command(struct clp_req *req, struct clp_req_hdr *lpcb)
492 {
493 	switch (lpcb->cmd) {
494 	case 0x0001: /* store logical-processor characteristics */
495 		return clp_pci_slpc(req, (void *) lpcb);
496 	case 0x0002: /* list PCI functions */
497 		return clp_pci_list(req, (void *) lpcb);
498 	case 0x0003: /* query PCI function */
499 		return clp_pci_query(req, (void *) lpcb);
500 	case 0x0004: /* query PCI function group */
501 		return clp_pci_query_grp(req, (void *) lpcb);
502 	default:
503 		return -EINVAL;
504 	}
505 }
506 
507 static int clp_normal_command(struct clp_req *req)
508 {
509 	struct clp_req_hdr *lpcb;
510 	void __user *uptr;
511 	int rc;
512 
513 	rc = -EINVAL;
514 	if (req->lps != 0 && req->lps != 2)
515 		goto out;
516 
517 	rc = -ENOMEM;
518 	lpcb = clp_alloc_block(GFP_KERNEL);
519 	if (!lpcb)
520 		goto out;
521 
522 	rc = -EFAULT;
523 	uptr = (void __force __user *)(unsigned long) req->data_p;
524 	if (copy_from_user(lpcb, uptr, PAGE_SIZE) != 0)
525 		goto out_free;
526 
527 	rc = -EINVAL;
528 	if (lpcb->fmt != 0 || lpcb->reserved1 != 0 || lpcb->reserved2 != 0)
529 		goto out_free;
530 
531 	switch (req->lps) {
532 	case 0:
533 		rc = clp_base_command(req, lpcb);
534 		break;
535 	case 2:
536 		rc = clp_pci_command(req, lpcb);
537 		break;
538 	}
539 	if (rc)
540 		goto out_free;
541 
542 	rc = -EFAULT;
543 	if (copy_to_user(uptr, lpcb, PAGE_SIZE) != 0)
544 		goto out_free;
545 
546 	rc = 0;
547 
548 out_free:
549 	clp_free_block(lpcb);
550 out:
551 	return rc;
552 }
553 
554 static int clp_immediate_command(struct clp_req *req)
555 {
556 	void __user *uptr;
557 	unsigned long ilp;
558 	int exists;
559 
560 	if (req->cmd > 1 || clp_get_ilp(&ilp) != 0)
561 		return -EINVAL;
562 
563 	uptr = (void __force __user *)(unsigned long) req->data_p;
564 	if (req->cmd == 0) {
565 		/* Command code 0: test for a specific processor */
566 		exists = test_bit_inv(req->lps, &ilp);
567 		return put_user(exists, (int __user *) uptr);
568 	}
569 	/* Command code 1: return bit mask of installed processors */
570 	return put_user(ilp, (unsigned long __user *) uptr);
571 }
572 
573 static long clp_misc_ioctl(struct file *filp, unsigned int cmd,
574 			   unsigned long arg)
575 {
576 	struct clp_req req;
577 	void __user *argp;
578 
579 	if (cmd != CLP_SYNC)
580 		return -EINVAL;
581 
582 	argp = is_compat_task() ? compat_ptr(arg) : (void __user *) arg;
583 	if (copy_from_user(&req, argp, sizeof(req)))
584 		return -EFAULT;
585 	if (req.r != 0)
586 		return -EINVAL;
587 	return req.c ? clp_immediate_command(&req) : clp_normal_command(&req);
588 }
589 
590 static int clp_misc_release(struct inode *inode, struct file *filp)
591 {
592 	return 0;
593 }
594 
595 static const struct file_operations clp_misc_fops = {
596 	.owner = THIS_MODULE,
597 	.open = nonseekable_open,
598 	.release = clp_misc_release,
599 	.unlocked_ioctl = clp_misc_ioctl,
600 	.compat_ioctl = clp_misc_ioctl,
601 	.llseek = no_llseek,
602 };
603 
604 static struct miscdevice clp_misc_device = {
605 	.minor = MISC_DYNAMIC_MINOR,
606 	.name = "clp",
607 	.fops = &clp_misc_fops,
608 };
609 
610 static int __init clp_misc_init(void)
611 {
612 	return misc_register(&clp_misc_device);
613 }
614 
615 device_initcall(clp_misc_init);
616