xref: /openbmc/linux/drivers/s390/net/ism_drv.c (revision c699ce1a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ISM driver for s390.
4  *
5  * Copyright IBM Corp. 2018
6  */
7 #define KMSG_COMPONENT "ism"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/interrupt.h>
13 #include <linux/device.h>
14 #include <linux/pci.h>
15 #include <linux/err.h>
16 #include <linux/ctype.h>
17 #include <linux/processor.h>
18 #include <net/smc.h>
19 
20 #include <asm/debug.h>
21 
22 #include "ism.h"
23 
24 MODULE_DESCRIPTION("ISM driver for s390");
25 MODULE_LICENSE("GPL");
26 
27 #define PCI_DEVICE_ID_IBM_ISM 0x04ED
28 #define DRV_NAME "ism"
29 
30 static const struct pci_device_id ism_device_table[] = {
31 	{ PCI_VDEVICE(IBM, PCI_DEVICE_ID_IBM_ISM), 0 },
32 	{ 0, }
33 };
34 MODULE_DEVICE_TABLE(pci, ism_device_table);
35 
36 static debug_info_t *ism_debug_info;
37 
38 static int ism_cmd(struct ism_dev *ism, void *cmd)
39 {
40 	struct ism_req_hdr *req = cmd;
41 	struct ism_resp_hdr *resp = cmd;
42 
43 	__ism_write_cmd(ism, req + 1, sizeof(*req), req->len - sizeof(*req));
44 	__ism_write_cmd(ism, req, 0, sizeof(*req));
45 
46 	WRITE_ONCE(resp->ret, ISM_ERROR);
47 
48 	__ism_read_cmd(ism, resp, 0, sizeof(*resp));
49 	if (resp->ret) {
50 		debug_text_event(ism_debug_info, 0, "cmd failure");
51 		debug_event(ism_debug_info, 0, resp, sizeof(*resp));
52 		goto out;
53 	}
54 	__ism_read_cmd(ism, resp + 1, sizeof(*resp), resp->len - sizeof(*resp));
55 out:
56 	return resp->ret;
57 }
58 
59 static int ism_cmd_simple(struct ism_dev *ism, u32 cmd_code)
60 {
61 	union ism_cmd_simple cmd;
62 
63 	memset(&cmd, 0, sizeof(cmd));
64 	cmd.request.hdr.cmd = cmd_code;
65 	cmd.request.hdr.len = sizeof(cmd.request);
66 
67 	return ism_cmd(ism, &cmd);
68 }
69 
70 static int query_info(struct ism_dev *ism)
71 {
72 	union ism_qi cmd;
73 
74 	memset(&cmd, 0, sizeof(cmd));
75 	cmd.request.hdr.cmd = ISM_QUERY_INFO;
76 	cmd.request.hdr.len = sizeof(cmd.request);
77 
78 	if (ism_cmd(ism, &cmd))
79 		goto out;
80 
81 	debug_text_event(ism_debug_info, 3, "query info");
82 	debug_event(ism_debug_info, 3, &cmd.response, sizeof(cmd.response));
83 out:
84 	return 0;
85 }
86 
87 static int register_sba(struct ism_dev *ism)
88 {
89 	union ism_reg_sba cmd;
90 	dma_addr_t dma_handle;
91 	struct ism_sba *sba;
92 
93 	sba = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
94 				 GFP_KERNEL);
95 	if (!sba)
96 		return -ENOMEM;
97 
98 	memset(&cmd, 0, sizeof(cmd));
99 	cmd.request.hdr.cmd = ISM_REG_SBA;
100 	cmd.request.hdr.len = sizeof(cmd.request);
101 	cmd.request.sba = dma_handle;
102 
103 	if (ism_cmd(ism, &cmd)) {
104 		dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, sba, dma_handle);
105 		return -EIO;
106 	}
107 
108 	ism->sba = sba;
109 	ism->sba_dma_addr = dma_handle;
110 
111 	return 0;
112 }
113 
114 static int register_ieq(struct ism_dev *ism)
115 {
116 	union ism_reg_ieq cmd;
117 	dma_addr_t dma_handle;
118 	struct ism_eq *ieq;
119 
120 	ieq = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
121 				 GFP_KERNEL);
122 	if (!ieq)
123 		return -ENOMEM;
124 
125 	memset(&cmd, 0, sizeof(cmd));
126 	cmd.request.hdr.cmd = ISM_REG_IEQ;
127 	cmd.request.hdr.len = sizeof(cmd.request);
128 	cmd.request.ieq = dma_handle;
129 	cmd.request.len = sizeof(*ieq);
130 
131 	if (ism_cmd(ism, &cmd)) {
132 		dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, ieq, dma_handle);
133 		return -EIO;
134 	}
135 
136 	ism->ieq = ieq;
137 	ism->ieq_idx = -1;
138 	ism->ieq_dma_addr = dma_handle;
139 
140 	return 0;
141 }
142 
143 static int unregister_sba(struct ism_dev *ism)
144 {
145 	int ret;
146 
147 	if (!ism->sba)
148 		return 0;
149 
150 	ret = ism_cmd_simple(ism, ISM_UNREG_SBA);
151 	if (ret && ret != ISM_ERROR)
152 		return -EIO;
153 
154 	dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
155 			  ism->sba, ism->sba_dma_addr);
156 
157 	ism->sba = NULL;
158 	ism->sba_dma_addr = 0;
159 
160 	return 0;
161 }
162 
163 static int unregister_ieq(struct ism_dev *ism)
164 {
165 	int ret;
166 
167 	if (!ism->ieq)
168 		return 0;
169 
170 	ret = ism_cmd_simple(ism, ISM_UNREG_IEQ);
171 	if (ret && ret != ISM_ERROR)
172 		return -EIO;
173 
174 	dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
175 			  ism->ieq, ism->ieq_dma_addr);
176 
177 	ism->ieq = NULL;
178 	ism->ieq_dma_addr = 0;
179 
180 	return 0;
181 }
182 
183 static int ism_read_local_gid(struct ism_dev *ism)
184 {
185 	union ism_read_gid cmd;
186 	int ret;
187 
188 	memset(&cmd, 0, sizeof(cmd));
189 	cmd.request.hdr.cmd = ISM_READ_GID;
190 	cmd.request.hdr.len = sizeof(cmd.request);
191 
192 	ret = ism_cmd(ism, &cmd);
193 	if (ret)
194 		goto out;
195 
196 	ism->smcd->local_gid = cmd.response.gid;
197 out:
198 	return ret;
199 }
200 
201 static int ism_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid,
202 			  u32 vid)
203 {
204 	struct ism_dev *ism = smcd->priv;
205 	union ism_query_rgid cmd;
206 
207 	memset(&cmd, 0, sizeof(cmd));
208 	cmd.request.hdr.cmd = ISM_QUERY_RGID;
209 	cmd.request.hdr.len = sizeof(cmd.request);
210 
211 	cmd.request.rgid = rgid;
212 	cmd.request.vlan_valid = vid_valid;
213 	cmd.request.vlan_id = vid;
214 
215 	return ism_cmd(ism, &cmd);
216 }
217 
218 static void ism_free_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
219 {
220 	clear_bit(dmb->sba_idx, ism->sba_bitmap);
221 	dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
222 			  dmb->cpu_addr, dmb->dma_addr);
223 }
224 
225 static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
226 {
227 	unsigned long bit;
228 
229 	if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
230 		return -EINVAL;
231 
232 	if (!dmb->sba_idx) {
233 		bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS,
234 					 ISM_DMB_BIT_OFFSET);
235 		if (bit == ISM_NR_DMBS)
236 			return -ENOSPC;
237 
238 		dmb->sba_idx = bit;
239 	}
240 	if (dmb->sba_idx < ISM_DMB_BIT_OFFSET ||
241 	    test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
242 		return -EINVAL;
243 
244 	dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
245 					   &dmb->dma_addr,
246 					   GFP_KERNEL | __GFP_NOWARN |
247 					   __GFP_NOMEMALLOC | __GFP_NORETRY);
248 	if (!dmb->cpu_addr)
249 		clear_bit(dmb->sba_idx, ism->sba_bitmap);
250 
251 	return dmb->cpu_addr ? 0 : -ENOMEM;
252 }
253 
254 static int ism_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
255 {
256 	struct ism_dev *ism = smcd->priv;
257 	union ism_reg_dmb cmd;
258 	int ret;
259 
260 	ret = ism_alloc_dmb(ism, dmb);
261 	if (ret)
262 		goto out;
263 
264 	memset(&cmd, 0, sizeof(cmd));
265 	cmd.request.hdr.cmd = ISM_REG_DMB;
266 	cmd.request.hdr.len = sizeof(cmd.request);
267 
268 	cmd.request.dmb = dmb->dma_addr;
269 	cmd.request.dmb_len = dmb->dmb_len;
270 	cmd.request.sba_idx = dmb->sba_idx;
271 	cmd.request.vlan_valid = dmb->vlan_valid;
272 	cmd.request.vlan_id = dmb->vlan_id;
273 	cmd.request.rgid = dmb->rgid;
274 
275 	ret = ism_cmd(ism, &cmd);
276 	if (ret) {
277 		ism_free_dmb(ism, dmb);
278 		goto out;
279 	}
280 	dmb->dmb_tok = cmd.response.dmb_tok;
281 out:
282 	return ret;
283 }
284 
285 static int ism_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
286 {
287 	struct ism_dev *ism = smcd->priv;
288 	union ism_unreg_dmb cmd;
289 	int ret;
290 
291 	memset(&cmd, 0, sizeof(cmd));
292 	cmd.request.hdr.cmd = ISM_UNREG_DMB;
293 	cmd.request.hdr.len = sizeof(cmd.request);
294 
295 	cmd.request.dmb_tok = dmb->dmb_tok;
296 
297 	ret = ism_cmd(ism, &cmd);
298 	if (ret && ret != ISM_ERROR)
299 		goto out;
300 
301 	ism_free_dmb(ism, dmb);
302 out:
303 	return ret;
304 }
305 
306 static int ism_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
307 {
308 	struct ism_dev *ism = smcd->priv;
309 	union ism_set_vlan_id cmd;
310 
311 	memset(&cmd, 0, sizeof(cmd));
312 	cmd.request.hdr.cmd = ISM_ADD_VLAN_ID;
313 	cmd.request.hdr.len = sizeof(cmd.request);
314 
315 	cmd.request.vlan_id = vlan_id;
316 
317 	return ism_cmd(ism, &cmd);
318 }
319 
320 static int ism_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
321 {
322 	struct ism_dev *ism = smcd->priv;
323 	union ism_set_vlan_id cmd;
324 
325 	memset(&cmd, 0, sizeof(cmd));
326 	cmd.request.hdr.cmd = ISM_DEL_VLAN_ID;
327 	cmd.request.hdr.len = sizeof(cmd.request);
328 
329 	cmd.request.vlan_id = vlan_id;
330 
331 	return ism_cmd(ism, &cmd);
332 }
333 
334 static int ism_set_vlan_required(struct smcd_dev *smcd)
335 {
336 	return ism_cmd_simple(smcd->priv, ISM_SET_VLAN);
337 }
338 
339 static int ism_reset_vlan_required(struct smcd_dev *smcd)
340 {
341 	return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN);
342 }
343 
344 static int ism_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq,
345 			  u32 event_code, u64 info)
346 {
347 	struct ism_dev *ism = smcd->priv;
348 	union ism_sig_ieq cmd;
349 
350 	memset(&cmd, 0, sizeof(cmd));
351 	cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
352 	cmd.request.hdr.len = sizeof(cmd.request);
353 
354 	cmd.request.rgid = rgid;
355 	cmd.request.trigger_irq = trigger_irq;
356 	cmd.request.event_code = event_code;
357 	cmd.request.info = info;
358 
359 	return ism_cmd(ism, &cmd);
360 }
361 
362 static unsigned int max_bytes(unsigned int start, unsigned int len,
363 			      unsigned int boundary)
364 {
365 	return min(boundary - (start & (boundary - 1)), len);
366 }
367 
368 static int ism_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
369 		    bool sf, unsigned int offset, void *data, unsigned int size)
370 {
371 	struct ism_dev *ism = smcd->priv;
372 	unsigned int bytes;
373 	u64 dmb_req;
374 	int ret;
375 
376 	while (size) {
377 		bytes = max_bytes(offset, size, PAGE_SIZE);
378 		dmb_req = ISM_CREATE_REQ(dmb_tok, idx, size == bytes ? sf : 0,
379 					 offset);
380 
381 		ret = __ism_move(ism, dmb_req, data, bytes);
382 		if (ret)
383 			return ret;
384 
385 		size -= bytes;
386 		data += bytes;
387 		offset += bytes;
388 	}
389 
390 	return 0;
391 }
392 
393 static struct ism_systemeid SYSTEM_EID = {
394 	.seid_string = "IBM-SYSZ-ISMSEID00000000",
395 	.serial_number = "0000",
396 	.type = "0000",
397 };
398 
399 static void ism_create_system_eid(void)
400 {
401 	struct cpuid id;
402 	u16 ident_tail;
403 	char tmp[5];
404 
405 	get_cpu_id(&id);
406 	ident_tail = (u16)(id.ident & ISM_IDENT_MASK);
407 	snprintf(tmp, 5, "%04X", ident_tail);
408 	memcpy(&SYSTEM_EID.serial_number, tmp, 4);
409 	snprintf(tmp, 5, "%04X", id.machine);
410 	memcpy(&SYSTEM_EID.type, tmp, 4);
411 }
412 
413 static u8 *ism_get_system_eid(void)
414 {
415 	return SYSTEM_EID.seid_string;
416 }
417 
418 static u16 ism_get_chid(struct smcd_dev *smcd)
419 {
420 	struct ism_dev *ism = (struct ism_dev *)smcd->priv;
421 
422 	if (!ism || !ism->pdev)
423 		return 0;
424 
425 	return to_zpci(ism->pdev)->pchid;
426 }
427 
428 static void ism_handle_event(struct ism_dev *ism)
429 {
430 	struct smcd_event *entry;
431 
432 	while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) {
433 		if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry))
434 			ism->ieq_idx = 0;
435 
436 		entry = &ism->ieq->entry[ism->ieq_idx];
437 		debug_event(ism_debug_info, 2, entry, sizeof(*entry));
438 		smcd_handle_event(ism->smcd, entry);
439 	}
440 }
441 
442 static irqreturn_t ism_handle_irq(int irq, void *data)
443 {
444 	struct ism_dev *ism = data;
445 	unsigned long bit, end;
446 	unsigned long *bv;
447 	u16 dmbemask;
448 
449 	bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET];
450 	end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET;
451 
452 	spin_lock(&ism->lock);
453 	ism->sba->s = 0;
454 	barrier();
455 	for (bit = 0;;) {
456 		bit = find_next_bit_inv(bv, end, bit);
457 		if (bit >= end)
458 			break;
459 
460 		clear_bit_inv(bit, bv);
461 		dmbemask = ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET];
462 		ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
463 		barrier();
464 		smcd_handle_irq(ism->smcd, bit + ISM_DMB_BIT_OFFSET, dmbemask);
465 	}
466 
467 	if (ism->sba->e) {
468 		ism->sba->e = 0;
469 		barrier();
470 		ism_handle_event(ism);
471 	}
472 	spin_unlock(&ism->lock);
473 	return IRQ_HANDLED;
474 }
475 
476 static const struct smcd_ops ism_ops = {
477 	.query_remote_gid = ism_query_rgid,
478 	.register_dmb = ism_register_dmb,
479 	.unregister_dmb = ism_unregister_dmb,
480 	.add_vlan_id = ism_add_vlan_id,
481 	.del_vlan_id = ism_del_vlan_id,
482 	.set_vlan_required = ism_set_vlan_required,
483 	.reset_vlan_required = ism_reset_vlan_required,
484 	.signal_event = ism_signal_ieq,
485 	.move_data = ism_move,
486 	.get_system_eid = ism_get_system_eid,
487 	.get_chid = ism_get_chid,
488 };
489 
490 static int ism_dev_init(struct ism_dev *ism)
491 {
492 	struct pci_dev *pdev = ism->pdev;
493 	int ret;
494 
495 	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
496 	if (ret <= 0)
497 		goto out;
498 
499 	ret = request_irq(pci_irq_vector(pdev, 0), ism_handle_irq, 0,
500 			  pci_name(pdev), ism);
501 	if (ret)
502 		goto free_vectors;
503 
504 	ret = register_sba(ism);
505 	if (ret)
506 		goto free_irq;
507 
508 	ret = register_ieq(ism);
509 	if (ret)
510 		goto unreg_sba;
511 
512 	ret = ism_read_local_gid(ism);
513 	if (ret)
514 		goto unreg_ieq;
515 
516 	if (!ism_add_vlan_id(ism->smcd, ISM_RESERVED_VLANID))
517 		/* hardware is V2 capable */
518 		ism_create_system_eid();
519 
520 	ret = smcd_register_dev(ism->smcd);
521 	if (ret)
522 		goto unreg_ieq;
523 
524 	query_info(ism);
525 	return 0;
526 
527 unreg_ieq:
528 	unregister_ieq(ism);
529 unreg_sba:
530 	unregister_sba(ism);
531 free_irq:
532 	free_irq(pci_irq_vector(pdev, 0), ism);
533 free_vectors:
534 	pci_free_irq_vectors(pdev);
535 out:
536 	return ret;
537 }
538 
539 static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
540 {
541 	struct ism_dev *ism;
542 	int ret;
543 
544 	ism = kzalloc(sizeof(*ism), GFP_KERNEL);
545 	if (!ism)
546 		return -ENOMEM;
547 
548 	spin_lock_init(&ism->lock);
549 	dev_set_drvdata(&pdev->dev, ism);
550 	ism->pdev = pdev;
551 
552 	ret = pci_enable_device_mem(pdev);
553 	if (ret)
554 		goto err;
555 
556 	ret = pci_request_mem_regions(pdev, DRV_NAME);
557 	if (ret)
558 		goto err_disable;
559 
560 	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
561 	if (ret)
562 		goto err_resource;
563 
564 	dma_set_seg_boundary(&pdev->dev, SZ_1M - 1);
565 	dma_set_max_seg_size(&pdev->dev, SZ_1M);
566 	pci_set_master(pdev);
567 
568 	ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops,
569 				   ISM_NR_DMBS);
570 	if (!ism->smcd) {
571 		ret = -ENOMEM;
572 		goto err_resource;
573 	}
574 
575 	ism->smcd->priv = ism;
576 	ret = ism_dev_init(ism);
577 	if (ret)
578 		goto err_free;
579 
580 	return 0;
581 
582 err_free:
583 	smcd_free_dev(ism->smcd);
584 err_resource:
585 	pci_release_mem_regions(pdev);
586 err_disable:
587 	pci_disable_device(pdev);
588 err:
589 	kfree(ism);
590 	dev_set_drvdata(&pdev->dev, NULL);
591 	return ret;
592 }
593 
594 static void ism_dev_exit(struct ism_dev *ism)
595 {
596 	struct pci_dev *pdev = ism->pdev;
597 
598 	smcd_unregister_dev(ism->smcd);
599 	if (SYSTEM_EID.serial_number[0] != '0' ||
600 	    SYSTEM_EID.type[0] != '0')
601 		ism_del_vlan_id(ism->smcd, ISM_RESERVED_VLANID);
602 	unregister_ieq(ism);
603 	unregister_sba(ism);
604 	free_irq(pci_irq_vector(pdev, 0), ism);
605 	pci_free_irq_vectors(pdev);
606 }
607 
608 static void ism_remove(struct pci_dev *pdev)
609 {
610 	struct ism_dev *ism = dev_get_drvdata(&pdev->dev);
611 
612 	ism_dev_exit(ism);
613 
614 	smcd_free_dev(ism->smcd);
615 	pci_release_mem_regions(pdev);
616 	pci_disable_device(pdev);
617 	dev_set_drvdata(&pdev->dev, NULL);
618 	kfree(ism);
619 }
620 
621 static struct pci_driver ism_driver = {
622 	.name	  = DRV_NAME,
623 	.id_table = ism_device_table,
624 	.probe	  = ism_probe,
625 	.remove	  = ism_remove,
626 };
627 
628 static int __init ism_init(void)
629 {
630 	int ret;
631 
632 	ism_debug_info = debug_register("ism", 2, 1, 16);
633 	if (!ism_debug_info)
634 		return -ENODEV;
635 
636 	debug_register_view(ism_debug_info, &debug_hex_ascii_view);
637 	ret = pci_register_driver(&ism_driver);
638 	if (ret)
639 		debug_unregister(ism_debug_info);
640 
641 	return ret;
642 }
643 
644 static void __exit ism_exit(void)
645 {
646 	pci_unregister_driver(&ism_driver);
647 	debug_unregister(ism_debug_info);
648 }
649 
650 module_init(ism_init);
651 module_exit(ism_exit);
652