xref: /openbmc/linux/drivers/s390/net/ism_drv.c (revision 62e59c4e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ISM driver for s390.
4  *
5  * Copyright IBM Corp. 2018
6  */
7 #define KMSG_COMPONENT "ism"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/interrupt.h>
13 #include <linux/device.h>
14 #include <linux/pci.h>
15 #include <linux/err.h>
16 #include <net/smc.h>
17 
18 #include <asm/debug.h>
19 
20 #include "ism.h"
21 
22 MODULE_DESCRIPTION("ISM driver for s390");
23 MODULE_LICENSE("GPL");
24 
25 #define PCI_DEVICE_ID_IBM_ISM 0x04ED
26 #define DRV_NAME "ism"
27 
28 static const struct pci_device_id ism_device_table[] = {
29 	{ PCI_VDEVICE(IBM, PCI_DEVICE_ID_IBM_ISM), 0 },
30 	{ 0, }
31 };
32 MODULE_DEVICE_TABLE(pci, ism_device_table);
33 
34 static debug_info_t *ism_debug_info;
35 
36 static int ism_cmd(struct ism_dev *ism, void *cmd)
37 {
38 	struct ism_req_hdr *req = cmd;
39 	struct ism_resp_hdr *resp = cmd;
40 
41 	memcpy_toio(ism->ctl + sizeof(*req), req + 1, req->len - sizeof(*req));
42 	memcpy_toio(ism->ctl, req, sizeof(*req));
43 
44 	WRITE_ONCE(resp->ret, ISM_ERROR);
45 
46 	memcpy_fromio(resp, ism->ctl, sizeof(*resp));
47 	if (resp->ret) {
48 		debug_text_event(ism_debug_info, 0, "cmd failure");
49 		debug_event(ism_debug_info, 0, resp, sizeof(*resp));
50 		goto out;
51 	}
52 	memcpy_fromio(resp + 1, ism->ctl + sizeof(*resp),
53 		      resp->len - sizeof(*resp));
54 out:
55 	return resp->ret;
56 }
57 
58 static int ism_cmd_simple(struct ism_dev *ism, u32 cmd_code)
59 {
60 	union ism_cmd_simple cmd;
61 
62 	memset(&cmd, 0, sizeof(cmd));
63 	cmd.request.hdr.cmd = cmd_code;
64 	cmd.request.hdr.len = sizeof(cmd.request);
65 
66 	return ism_cmd(ism, &cmd);
67 }
68 
69 static int query_info(struct ism_dev *ism)
70 {
71 	union ism_qi cmd;
72 
73 	memset(&cmd, 0, sizeof(cmd));
74 	cmd.request.hdr.cmd = ISM_QUERY_INFO;
75 	cmd.request.hdr.len = sizeof(cmd.request);
76 
77 	if (ism_cmd(ism, &cmd))
78 		goto out;
79 
80 	debug_text_event(ism_debug_info, 3, "query info");
81 	debug_event(ism_debug_info, 3, &cmd.response, sizeof(cmd.response));
82 out:
83 	return 0;
84 }
85 
86 static int register_sba(struct ism_dev *ism)
87 {
88 	union ism_reg_sba cmd;
89 	dma_addr_t dma_handle;
90 	struct ism_sba *sba;
91 
92 	sba = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
93 				 GFP_KERNEL);
94 	if (!sba)
95 		return -ENOMEM;
96 
97 	memset(&cmd, 0, sizeof(cmd));
98 	cmd.request.hdr.cmd = ISM_REG_SBA;
99 	cmd.request.hdr.len = sizeof(cmd.request);
100 	cmd.request.sba = dma_handle;
101 
102 	if (ism_cmd(ism, &cmd)) {
103 		dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, sba, dma_handle);
104 		return -EIO;
105 	}
106 
107 	ism->sba = sba;
108 	ism->sba_dma_addr = dma_handle;
109 
110 	return 0;
111 }
112 
113 static int register_ieq(struct ism_dev *ism)
114 {
115 	union ism_reg_ieq cmd;
116 	dma_addr_t dma_handle;
117 	struct ism_eq *ieq;
118 
119 	ieq = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
120 				 GFP_KERNEL);
121 	if (!ieq)
122 		return -ENOMEM;
123 
124 	memset(&cmd, 0, sizeof(cmd));
125 	cmd.request.hdr.cmd = ISM_REG_IEQ;
126 	cmd.request.hdr.len = sizeof(cmd.request);
127 	cmd.request.ieq = dma_handle;
128 	cmd.request.len = sizeof(*ieq);
129 
130 	if (ism_cmd(ism, &cmd)) {
131 		dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, ieq, dma_handle);
132 		return -EIO;
133 	}
134 
135 	ism->ieq = ieq;
136 	ism->ieq_idx = -1;
137 	ism->ieq_dma_addr = dma_handle;
138 
139 	return 0;
140 }
141 
142 static int unregister_sba(struct ism_dev *ism)
143 {
144 	int ret;
145 
146 	if (!ism->sba)
147 		return 0;
148 
149 	ret = ism_cmd_simple(ism, ISM_UNREG_SBA);
150 	if (ret && ret != ISM_ERROR)
151 		return -EIO;
152 
153 	dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
154 			  ism->sba, ism->sba_dma_addr);
155 
156 	ism->sba = NULL;
157 	ism->sba_dma_addr = 0;
158 
159 	return 0;
160 }
161 
162 static int unregister_ieq(struct ism_dev *ism)
163 {
164 	int ret;
165 
166 	if (!ism->ieq)
167 		return 0;
168 
169 	ret = ism_cmd_simple(ism, ISM_UNREG_IEQ);
170 	if (ret && ret != ISM_ERROR)
171 		return -EIO;
172 
173 	dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
174 			  ism->ieq, ism->ieq_dma_addr);
175 
176 	ism->ieq = NULL;
177 	ism->ieq_dma_addr = 0;
178 
179 	return 0;
180 }
181 
182 static int ism_read_local_gid(struct ism_dev *ism)
183 {
184 	union ism_read_gid cmd;
185 	int ret;
186 
187 	memset(&cmd, 0, sizeof(cmd));
188 	cmd.request.hdr.cmd = ISM_READ_GID;
189 	cmd.request.hdr.len = sizeof(cmd.request);
190 
191 	ret = ism_cmd(ism, &cmd);
192 	if (ret)
193 		goto out;
194 
195 	ism->smcd->local_gid = cmd.response.gid;
196 out:
197 	return ret;
198 }
199 
200 static int ism_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid,
201 			  u32 vid)
202 {
203 	struct ism_dev *ism = smcd->priv;
204 	union ism_query_rgid cmd;
205 
206 	memset(&cmd, 0, sizeof(cmd));
207 	cmd.request.hdr.cmd = ISM_QUERY_RGID;
208 	cmd.request.hdr.len = sizeof(cmd.request);
209 
210 	cmd.request.rgid = rgid;
211 	cmd.request.vlan_valid = vid_valid;
212 	cmd.request.vlan_id = vid;
213 
214 	return ism_cmd(ism, &cmd);
215 }
216 
217 static void ism_free_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
218 {
219 	clear_bit(dmb->sba_idx, ism->sba_bitmap);
220 	dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
221 			  dmb->cpu_addr, dmb->dma_addr);
222 }
223 
224 static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
225 {
226 	unsigned long bit;
227 
228 	if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
229 		return -EINVAL;
230 
231 	if (!dmb->sba_idx) {
232 		bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS,
233 					 ISM_DMB_BIT_OFFSET);
234 		if (bit == ISM_NR_DMBS)
235 			return -ENOMEM;
236 
237 		dmb->sba_idx = bit;
238 	}
239 	if (dmb->sba_idx < ISM_DMB_BIT_OFFSET ||
240 	    test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
241 		return -EINVAL;
242 
243 	dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
244 					   &dmb->dma_addr,
245 					   GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC | __GFP_COMP | __GFP_NORETRY);
246 	if (!dmb->cpu_addr)
247 		clear_bit(dmb->sba_idx, ism->sba_bitmap);
248 
249 	return dmb->cpu_addr ? 0 : -ENOMEM;
250 }
251 
252 static int ism_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
253 {
254 	struct ism_dev *ism = smcd->priv;
255 	union ism_reg_dmb cmd;
256 	int ret;
257 
258 	ret = ism_alloc_dmb(ism, dmb);
259 	if (ret)
260 		goto out;
261 
262 	memset(&cmd, 0, sizeof(cmd));
263 	cmd.request.hdr.cmd = ISM_REG_DMB;
264 	cmd.request.hdr.len = sizeof(cmd.request);
265 
266 	cmd.request.dmb = dmb->dma_addr;
267 	cmd.request.dmb_len = dmb->dmb_len;
268 	cmd.request.sba_idx = dmb->sba_idx;
269 	cmd.request.vlan_valid = dmb->vlan_valid;
270 	cmd.request.vlan_id = dmb->vlan_id;
271 	cmd.request.rgid = dmb->rgid;
272 
273 	ret = ism_cmd(ism, &cmd);
274 	if (ret) {
275 		ism_free_dmb(ism, dmb);
276 		goto out;
277 	}
278 	dmb->dmb_tok = cmd.response.dmb_tok;
279 out:
280 	return ret;
281 }
282 
283 static int ism_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
284 {
285 	struct ism_dev *ism = smcd->priv;
286 	union ism_unreg_dmb cmd;
287 	int ret;
288 
289 	memset(&cmd, 0, sizeof(cmd));
290 	cmd.request.hdr.cmd = ISM_UNREG_DMB;
291 	cmd.request.hdr.len = sizeof(cmd.request);
292 
293 	cmd.request.dmb_tok = dmb->dmb_tok;
294 
295 	ret = ism_cmd(ism, &cmd);
296 	if (ret && ret != ISM_ERROR)
297 		goto out;
298 
299 	ism_free_dmb(ism, dmb);
300 out:
301 	return ret;
302 }
303 
304 static int ism_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
305 {
306 	struct ism_dev *ism = smcd->priv;
307 	union ism_set_vlan_id cmd;
308 
309 	memset(&cmd, 0, sizeof(cmd));
310 	cmd.request.hdr.cmd = ISM_ADD_VLAN_ID;
311 	cmd.request.hdr.len = sizeof(cmd.request);
312 
313 	cmd.request.vlan_id = vlan_id;
314 
315 	return ism_cmd(ism, &cmd);
316 }
317 
318 static int ism_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
319 {
320 	struct ism_dev *ism = smcd->priv;
321 	union ism_set_vlan_id cmd;
322 
323 	memset(&cmd, 0, sizeof(cmd));
324 	cmd.request.hdr.cmd = ISM_DEL_VLAN_ID;
325 	cmd.request.hdr.len = sizeof(cmd.request);
326 
327 	cmd.request.vlan_id = vlan_id;
328 
329 	return ism_cmd(ism, &cmd);
330 }
331 
332 static int ism_set_vlan_required(struct smcd_dev *smcd)
333 {
334 	return ism_cmd_simple(smcd->priv, ISM_SET_VLAN);
335 }
336 
337 static int ism_reset_vlan_required(struct smcd_dev *smcd)
338 {
339 	return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN);
340 }
341 
342 static int ism_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq,
343 			  u32 event_code, u64 info)
344 {
345 	struct ism_dev *ism = smcd->priv;
346 	union ism_sig_ieq cmd;
347 
348 	memset(&cmd, 0, sizeof(cmd));
349 	cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
350 	cmd.request.hdr.len = sizeof(cmd.request);
351 
352 	cmd.request.rgid = rgid;
353 	cmd.request.trigger_irq = trigger_irq;
354 	cmd.request.event_code = event_code;
355 	cmd.request.info = info;
356 
357 	return ism_cmd(ism, &cmd);
358 }
359 
360 static unsigned int max_bytes(unsigned int start, unsigned int len,
361 			      unsigned int boundary)
362 {
363 	return min(boundary - (start & (boundary - 1)), len);
364 }
365 
366 static int ism_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
367 		    bool sf, unsigned int offset, void *data, unsigned int size)
368 {
369 	struct ism_dev *ism = smcd->priv;
370 	unsigned int bytes;
371 	u64 dmb_req;
372 	int ret;
373 
374 	while (size) {
375 		bytes = max_bytes(offset, size, PAGE_SIZE);
376 		dmb_req = ISM_CREATE_REQ(dmb_tok, idx, size == bytes ? sf : 0,
377 					 offset);
378 
379 		ret = __ism_move(ism, dmb_req, data, bytes);
380 		if (ret)
381 			return ret;
382 
383 		size -= bytes;
384 		data += bytes;
385 		offset += bytes;
386 	}
387 
388 	return 0;
389 }
390 
391 static void ism_handle_event(struct ism_dev *ism)
392 {
393 	struct smcd_event *entry;
394 
395 	while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) {
396 		if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry))
397 			ism->ieq_idx = 0;
398 
399 		entry = &ism->ieq->entry[ism->ieq_idx];
400 		debug_event(ism_debug_info, 2, entry, sizeof(*entry));
401 		smcd_handle_event(ism->smcd, entry);
402 	}
403 }
404 
405 static irqreturn_t ism_handle_irq(int irq, void *data)
406 {
407 	struct ism_dev *ism = data;
408 	unsigned long bit, end;
409 	unsigned long *bv;
410 
411 	bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET];
412 	end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET;
413 
414 	spin_lock(&ism->lock);
415 	ism->sba->s = 0;
416 	barrier();
417 	for (bit = 0;;) {
418 		bit = find_next_bit_inv(bv, end, bit);
419 		if (bit >= end)
420 			break;
421 
422 		clear_bit_inv(bit, bv);
423 		ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
424 		barrier();
425 		smcd_handle_irq(ism->smcd, bit + ISM_DMB_BIT_OFFSET);
426 	}
427 
428 	if (ism->sba->e) {
429 		ism->sba->e = 0;
430 		barrier();
431 		ism_handle_event(ism);
432 	}
433 	spin_unlock(&ism->lock);
434 	return IRQ_HANDLED;
435 }
436 
437 static const struct smcd_ops ism_ops = {
438 	.query_remote_gid = ism_query_rgid,
439 	.register_dmb = ism_register_dmb,
440 	.unregister_dmb = ism_unregister_dmb,
441 	.add_vlan_id = ism_add_vlan_id,
442 	.del_vlan_id = ism_del_vlan_id,
443 	.set_vlan_required = ism_set_vlan_required,
444 	.reset_vlan_required = ism_reset_vlan_required,
445 	.signal_event = ism_signal_ieq,
446 	.move_data = ism_move,
447 };
448 
449 static int ism_dev_init(struct ism_dev *ism)
450 {
451 	struct pci_dev *pdev = ism->pdev;
452 	int ret;
453 
454 	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
455 	if (ret <= 0)
456 		goto out;
457 
458 	ret = request_irq(pci_irq_vector(pdev, 0), ism_handle_irq, 0,
459 			  pci_name(pdev), ism);
460 	if (ret)
461 		goto free_vectors;
462 
463 	ret = register_sba(ism);
464 	if (ret)
465 		goto free_irq;
466 
467 	ret = register_ieq(ism);
468 	if (ret)
469 		goto unreg_sba;
470 
471 	ret = ism_read_local_gid(ism);
472 	if (ret)
473 		goto unreg_ieq;
474 
475 	ret = smcd_register_dev(ism->smcd);
476 	if (ret)
477 		goto unreg_ieq;
478 
479 	query_info(ism);
480 	return 0;
481 
482 unreg_ieq:
483 	unregister_ieq(ism);
484 unreg_sba:
485 	unregister_sba(ism);
486 free_irq:
487 	free_irq(pci_irq_vector(pdev, 0), ism);
488 free_vectors:
489 	pci_free_irq_vectors(pdev);
490 out:
491 	return ret;
492 }
493 
494 static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
495 {
496 	struct ism_dev *ism;
497 	int ret;
498 
499 	ism = kzalloc(sizeof(*ism), GFP_KERNEL);
500 	if (!ism)
501 		return -ENOMEM;
502 
503 	spin_lock_init(&ism->lock);
504 	dev_set_drvdata(&pdev->dev, ism);
505 	ism->pdev = pdev;
506 
507 	ret = pci_enable_device_mem(pdev);
508 	if (ret)
509 		goto err;
510 
511 	ret = pci_request_mem_regions(pdev, DRV_NAME);
512 	if (ret)
513 		goto err_disable;
514 
515 	ism->ctl = pci_iomap(pdev, 2, 0);
516 	if (!ism->ctl)
517 		goto err_resource;
518 
519 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
520 	if (ret)
521 		goto err_unmap;
522 
523 	dma_set_seg_boundary(&pdev->dev, SZ_1M - 1);
524 	dma_set_max_seg_size(&pdev->dev, SZ_1M);
525 	pci_set_master(pdev);
526 
527 	ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops,
528 				   ISM_NR_DMBS);
529 	if (!ism->smcd)
530 		goto err_unmap;
531 
532 	ism->smcd->priv = ism;
533 	ret = ism_dev_init(ism);
534 	if (ret)
535 		goto err_free;
536 
537 	return 0;
538 
539 err_free:
540 	smcd_free_dev(ism->smcd);
541 err_unmap:
542 	pci_iounmap(pdev, ism->ctl);
543 err_resource:
544 	pci_release_mem_regions(pdev);
545 err_disable:
546 	pci_disable_device(pdev);
547 err:
548 	kfree(ism);
549 	dev_set_drvdata(&pdev->dev, NULL);
550 	return ret;
551 }
552 
553 static void ism_dev_exit(struct ism_dev *ism)
554 {
555 	struct pci_dev *pdev = ism->pdev;
556 
557 	smcd_unregister_dev(ism->smcd);
558 	unregister_ieq(ism);
559 	unregister_sba(ism);
560 	free_irq(pci_irq_vector(pdev, 0), ism);
561 	pci_free_irq_vectors(pdev);
562 }
563 
564 static void ism_remove(struct pci_dev *pdev)
565 {
566 	struct ism_dev *ism = dev_get_drvdata(&pdev->dev);
567 
568 	ism_dev_exit(ism);
569 
570 	smcd_free_dev(ism->smcd);
571 	pci_iounmap(pdev, ism->ctl);
572 	pci_release_mem_regions(pdev);
573 	pci_disable_device(pdev);
574 	dev_set_drvdata(&pdev->dev, NULL);
575 	kfree(ism);
576 }
577 
578 static int ism_suspend(struct device *dev)
579 {
580 	struct ism_dev *ism = dev_get_drvdata(dev);
581 
582 	ism_dev_exit(ism);
583 	return 0;
584 }
585 
586 static int ism_resume(struct device *dev)
587 {
588 	struct ism_dev *ism = dev_get_drvdata(dev);
589 
590 	return ism_dev_init(ism);
591 }
592 
593 static SIMPLE_DEV_PM_OPS(ism_pm_ops, ism_suspend, ism_resume);
594 
595 static struct pci_driver ism_driver = {
596 	.name	  = DRV_NAME,
597 	.id_table = ism_device_table,
598 	.probe	  = ism_probe,
599 	.remove	  = ism_remove,
600 	.driver	  = {
601 		.pm = &ism_pm_ops,
602 	},
603 };
604 
605 static int __init ism_init(void)
606 {
607 	int ret;
608 
609 	ism_debug_info = debug_register("ism", 2, 1, 16);
610 	if (!ism_debug_info)
611 		return -ENODEV;
612 
613 	debug_register_view(ism_debug_info, &debug_hex_ascii_view);
614 	ret = pci_register_driver(&ism_driver);
615 	if (ret)
616 		debug_unregister(ism_debug_info);
617 
618 	return ret;
619 }
620 
621 static void __exit ism_exit(void)
622 {
623 	pci_unregister_driver(&ism_driver);
624 	debug_unregister(ism_debug_info);
625 }
626 
627 module_init(ism_init);
628 module_exit(ism_exit);
629