xref: /openbmc/linux/drivers/s390/net/ism_drv.c (revision 5ff32883)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ISM driver for s390.
4  *
5  * Copyright IBM Corp. 2018
6  */
7 #define KMSG_COMPONENT "ism"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/interrupt.h>
13 #include <linux/device.h>
14 #include <linux/pci.h>
15 #include <linux/err.h>
16 #include <net/smc.h>
17 
18 #include <asm/debug.h>
19 
20 #include "ism.h"
21 
22 MODULE_DESCRIPTION("ISM driver for s390");
23 MODULE_LICENSE("GPL");
24 
25 #define PCI_DEVICE_ID_IBM_ISM 0x04ED
26 #define DRV_NAME "ism"
27 
28 static const struct pci_device_id ism_device_table[] = {
29 	{ PCI_VDEVICE(IBM, PCI_DEVICE_ID_IBM_ISM), 0 },
30 	{ 0, }
31 };
32 MODULE_DEVICE_TABLE(pci, ism_device_table);
33 
34 static debug_info_t *ism_debug_info;
35 
36 static int ism_cmd(struct ism_dev *ism, void *cmd)
37 {
38 	struct ism_req_hdr *req = cmd;
39 	struct ism_resp_hdr *resp = cmd;
40 
41 	memcpy_toio(ism->ctl + sizeof(*req), req + 1, req->len - sizeof(*req));
42 	memcpy_toio(ism->ctl, req, sizeof(*req));
43 
44 	WRITE_ONCE(resp->ret, ISM_ERROR);
45 
46 	memcpy_fromio(resp, ism->ctl, sizeof(*resp));
47 	if (resp->ret) {
48 		debug_text_event(ism_debug_info, 0, "cmd failure");
49 		debug_event(ism_debug_info, 0, resp, sizeof(*resp));
50 		goto out;
51 	}
52 	memcpy_fromio(resp + 1, ism->ctl + sizeof(*resp),
53 		      resp->len - sizeof(*resp));
54 out:
55 	return resp->ret;
56 }
57 
58 static int ism_cmd_simple(struct ism_dev *ism, u32 cmd_code)
59 {
60 	union ism_cmd_simple cmd;
61 
62 	memset(&cmd, 0, sizeof(cmd));
63 	cmd.request.hdr.cmd = cmd_code;
64 	cmd.request.hdr.len = sizeof(cmd.request);
65 
66 	return ism_cmd(ism, &cmd);
67 }
68 
69 static int query_info(struct ism_dev *ism)
70 {
71 	union ism_qi cmd;
72 
73 	memset(&cmd, 0, sizeof(cmd));
74 	cmd.request.hdr.cmd = ISM_QUERY_INFO;
75 	cmd.request.hdr.len = sizeof(cmd.request);
76 
77 	if (ism_cmd(ism, &cmd))
78 		goto out;
79 
80 	debug_text_event(ism_debug_info, 3, "query info");
81 	debug_event(ism_debug_info, 3, &cmd.response, sizeof(cmd.response));
82 out:
83 	return 0;
84 }
85 
86 static int register_sba(struct ism_dev *ism)
87 {
88 	union ism_reg_sba cmd;
89 	dma_addr_t dma_handle;
90 	struct ism_sba *sba;
91 
92 	sba = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
93 				 GFP_KERNEL);
94 	if (!sba)
95 		return -ENOMEM;
96 
97 	memset(&cmd, 0, sizeof(cmd));
98 	cmd.request.hdr.cmd = ISM_REG_SBA;
99 	cmd.request.hdr.len = sizeof(cmd.request);
100 	cmd.request.sba = dma_handle;
101 
102 	if (ism_cmd(ism, &cmd)) {
103 		dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, sba, dma_handle);
104 		return -EIO;
105 	}
106 
107 	ism->sba = sba;
108 	ism->sba_dma_addr = dma_handle;
109 
110 	return 0;
111 }
112 
113 static int register_ieq(struct ism_dev *ism)
114 {
115 	union ism_reg_ieq cmd;
116 	dma_addr_t dma_handle;
117 	struct ism_eq *ieq;
118 
119 	ieq = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
120 				 GFP_KERNEL);
121 	if (!ieq)
122 		return -ENOMEM;
123 
124 	memset(&cmd, 0, sizeof(cmd));
125 	cmd.request.hdr.cmd = ISM_REG_IEQ;
126 	cmd.request.hdr.len = sizeof(cmd.request);
127 	cmd.request.ieq = dma_handle;
128 	cmd.request.len = sizeof(*ieq);
129 
130 	if (ism_cmd(ism, &cmd)) {
131 		dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, ieq, dma_handle);
132 		return -EIO;
133 	}
134 
135 	ism->ieq = ieq;
136 	ism->ieq_idx = -1;
137 	ism->ieq_dma_addr = dma_handle;
138 
139 	return 0;
140 }
141 
142 static int unregister_sba(struct ism_dev *ism)
143 {
144 	if (!ism->sba)
145 		return 0;
146 
147 	if (ism_cmd_simple(ism, ISM_UNREG_SBA))
148 		return -EIO;
149 
150 	dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
151 			  ism->sba, ism->sba_dma_addr);
152 
153 	ism->sba = NULL;
154 	ism->sba_dma_addr = 0;
155 
156 	return 0;
157 }
158 
159 static int unregister_ieq(struct ism_dev *ism)
160 {
161 	if (!ism->ieq)
162 		return 0;
163 
164 	if (ism_cmd_simple(ism, ISM_UNREG_IEQ))
165 		return -EIO;
166 
167 	dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
168 			  ism->ieq, ism->ieq_dma_addr);
169 
170 	ism->ieq = NULL;
171 	ism->ieq_dma_addr = 0;
172 
173 	return 0;
174 }
175 
176 static int ism_read_local_gid(struct ism_dev *ism)
177 {
178 	union ism_read_gid cmd;
179 	int ret;
180 
181 	memset(&cmd, 0, sizeof(cmd));
182 	cmd.request.hdr.cmd = ISM_READ_GID;
183 	cmd.request.hdr.len = sizeof(cmd.request);
184 
185 	ret = ism_cmd(ism, &cmd);
186 	if (ret)
187 		goto out;
188 
189 	ism->smcd->local_gid = cmd.response.gid;
190 out:
191 	return ret;
192 }
193 
194 static int ism_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid,
195 			  u32 vid)
196 {
197 	struct ism_dev *ism = smcd->priv;
198 	union ism_query_rgid cmd;
199 
200 	memset(&cmd, 0, sizeof(cmd));
201 	cmd.request.hdr.cmd = ISM_QUERY_RGID;
202 	cmd.request.hdr.len = sizeof(cmd.request);
203 
204 	cmd.request.rgid = rgid;
205 	cmd.request.vlan_valid = vid_valid;
206 	cmd.request.vlan_id = vid;
207 
208 	return ism_cmd(ism, &cmd);
209 }
210 
211 static void ism_free_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
212 {
213 	clear_bit(dmb->sba_idx, ism->sba_bitmap);
214 	dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
215 			  dmb->cpu_addr, dmb->dma_addr);
216 }
217 
218 static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
219 {
220 	unsigned long bit;
221 
222 	if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
223 		return -EINVAL;
224 
225 	if (!dmb->sba_idx) {
226 		bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS,
227 					 ISM_DMB_BIT_OFFSET);
228 		if (bit == ISM_NR_DMBS)
229 			return -ENOMEM;
230 
231 		dmb->sba_idx = bit;
232 	}
233 	if (dmb->sba_idx < ISM_DMB_BIT_OFFSET ||
234 	    test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
235 		return -EINVAL;
236 
237 	dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
238 					   &dmb->dma_addr,
239 					   GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC | __GFP_COMP | __GFP_NORETRY);
240 	if (!dmb->cpu_addr)
241 		clear_bit(dmb->sba_idx, ism->sba_bitmap);
242 
243 	return dmb->cpu_addr ? 0 : -ENOMEM;
244 }
245 
246 static int ism_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
247 {
248 	struct ism_dev *ism = smcd->priv;
249 	union ism_reg_dmb cmd;
250 	int ret;
251 
252 	ret = ism_alloc_dmb(ism, dmb);
253 	if (ret)
254 		goto out;
255 
256 	memset(&cmd, 0, sizeof(cmd));
257 	cmd.request.hdr.cmd = ISM_REG_DMB;
258 	cmd.request.hdr.len = sizeof(cmd.request);
259 
260 	cmd.request.dmb = dmb->dma_addr;
261 	cmd.request.dmb_len = dmb->dmb_len;
262 	cmd.request.sba_idx = dmb->sba_idx;
263 	cmd.request.vlan_valid = dmb->vlan_valid;
264 	cmd.request.vlan_id = dmb->vlan_id;
265 	cmd.request.rgid = dmb->rgid;
266 
267 	ret = ism_cmd(ism, &cmd);
268 	if (ret) {
269 		ism_free_dmb(ism, dmb);
270 		goto out;
271 	}
272 	dmb->dmb_tok = cmd.response.dmb_tok;
273 out:
274 	return ret;
275 }
276 
277 static int ism_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
278 {
279 	struct ism_dev *ism = smcd->priv;
280 	union ism_unreg_dmb cmd;
281 	int ret;
282 
283 	memset(&cmd, 0, sizeof(cmd));
284 	cmd.request.hdr.cmd = ISM_UNREG_DMB;
285 	cmd.request.hdr.len = sizeof(cmd.request);
286 
287 	cmd.request.dmb_tok = dmb->dmb_tok;
288 
289 	ret = ism_cmd(ism, &cmd);
290 	if (ret)
291 		goto out;
292 
293 	ism_free_dmb(ism, dmb);
294 out:
295 	return ret;
296 }
297 
298 static int ism_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
299 {
300 	struct ism_dev *ism = smcd->priv;
301 	union ism_set_vlan_id cmd;
302 
303 	memset(&cmd, 0, sizeof(cmd));
304 	cmd.request.hdr.cmd = ISM_ADD_VLAN_ID;
305 	cmd.request.hdr.len = sizeof(cmd.request);
306 
307 	cmd.request.vlan_id = vlan_id;
308 
309 	return ism_cmd(ism, &cmd);
310 }
311 
312 static int ism_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
313 {
314 	struct ism_dev *ism = smcd->priv;
315 	union ism_set_vlan_id cmd;
316 
317 	memset(&cmd, 0, sizeof(cmd));
318 	cmd.request.hdr.cmd = ISM_DEL_VLAN_ID;
319 	cmd.request.hdr.len = sizeof(cmd.request);
320 
321 	cmd.request.vlan_id = vlan_id;
322 
323 	return ism_cmd(ism, &cmd);
324 }
325 
326 static int ism_set_vlan_required(struct smcd_dev *smcd)
327 {
328 	return ism_cmd_simple(smcd->priv, ISM_SET_VLAN);
329 }
330 
331 static int ism_reset_vlan_required(struct smcd_dev *smcd)
332 {
333 	return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN);
334 }
335 
336 static int ism_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq,
337 			  u32 event_code, u64 info)
338 {
339 	struct ism_dev *ism = smcd->priv;
340 	union ism_sig_ieq cmd;
341 
342 	memset(&cmd, 0, sizeof(cmd));
343 	cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
344 	cmd.request.hdr.len = sizeof(cmd.request);
345 
346 	cmd.request.rgid = rgid;
347 	cmd.request.trigger_irq = trigger_irq;
348 	cmd.request.event_code = event_code;
349 	cmd.request.info = info;
350 
351 	return ism_cmd(ism, &cmd);
352 }
353 
354 static unsigned int max_bytes(unsigned int start, unsigned int len,
355 			      unsigned int boundary)
356 {
357 	return min(boundary - (start & (boundary - 1)), len);
358 }
359 
360 static int ism_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
361 		    bool sf, unsigned int offset, void *data, unsigned int size)
362 {
363 	struct ism_dev *ism = smcd->priv;
364 	unsigned int bytes;
365 	u64 dmb_req;
366 	int ret;
367 
368 	while (size) {
369 		bytes = max_bytes(offset, size, PAGE_SIZE);
370 		dmb_req = ISM_CREATE_REQ(dmb_tok, idx, size == bytes ? sf : 0,
371 					 offset);
372 
373 		ret = __ism_move(ism, dmb_req, data, bytes);
374 		if (ret)
375 			return ret;
376 
377 		size -= bytes;
378 		data += bytes;
379 		offset += bytes;
380 	}
381 
382 	return 0;
383 }
384 
385 static void ism_handle_event(struct ism_dev *ism)
386 {
387 	struct smcd_event *entry;
388 
389 	while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) {
390 		if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry))
391 			ism->ieq_idx = 0;
392 
393 		entry = &ism->ieq->entry[ism->ieq_idx];
394 		debug_event(ism_debug_info, 2, entry, sizeof(*entry));
395 		smcd_handle_event(ism->smcd, entry);
396 	}
397 }
398 
399 static irqreturn_t ism_handle_irq(int irq, void *data)
400 {
401 	struct ism_dev *ism = data;
402 	unsigned long bit, end;
403 	unsigned long *bv;
404 
405 	bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET];
406 	end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET;
407 
408 	spin_lock(&ism->lock);
409 	ism->sba->s = 0;
410 	barrier();
411 	for (bit = 0;;) {
412 		bit = find_next_bit_inv(bv, end, bit);
413 		if (bit >= end)
414 			break;
415 
416 		clear_bit_inv(bit, bv);
417 		ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
418 		barrier();
419 		smcd_handle_irq(ism->smcd, bit + ISM_DMB_BIT_OFFSET);
420 	}
421 
422 	if (ism->sba->e) {
423 		ism->sba->e = 0;
424 		barrier();
425 		ism_handle_event(ism);
426 	}
427 	spin_unlock(&ism->lock);
428 	return IRQ_HANDLED;
429 }
430 
431 static const struct smcd_ops ism_ops = {
432 	.query_remote_gid = ism_query_rgid,
433 	.register_dmb = ism_register_dmb,
434 	.unregister_dmb = ism_unregister_dmb,
435 	.add_vlan_id = ism_add_vlan_id,
436 	.del_vlan_id = ism_del_vlan_id,
437 	.set_vlan_required = ism_set_vlan_required,
438 	.reset_vlan_required = ism_reset_vlan_required,
439 	.signal_event = ism_signal_ieq,
440 	.move_data = ism_move,
441 };
442 
443 static int ism_dev_init(struct ism_dev *ism)
444 {
445 	struct pci_dev *pdev = ism->pdev;
446 	int ret;
447 
448 	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
449 	if (ret <= 0)
450 		goto out;
451 
452 	ret = request_irq(pci_irq_vector(pdev, 0), ism_handle_irq, 0,
453 			  pci_name(pdev), ism);
454 	if (ret)
455 		goto free_vectors;
456 
457 	ret = register_sba(ism);
458 	if (ret)
459 		goto free_irq;
460 
461 	ret = register_ieq(ism);
462 	if (ret)
463 		goto unreg_sba;
464 
465 	ret = ism_read_local_gid(ism);
466 	if (ret)
467 		goto unreg_ieq;
468 
469 	ret = smcd_register_dev(ism->smcd);
470 	if (ret)
471 		goto unreg_ieq;
472 
473 	query_info(ism);
474 	return 0;
475 
476 unreg_ieq:
477 	unregister_ieq(ism);
478 unreg_sba:
479 	unregister_sba(ism);
480 free_irq:
481 	free_irq(pci_irq_vector(pdev, 0), ism);
482 free_vectors:
483 	pci_free_irq_vectors(pdev);
484 out:
485 	return ret;
486 }
487 
488 static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
489 {
490 	struct ism_dev *ism;
491 	int ret;
492 
493 	ism = kzalloc(sizeof(*ism), GFP_KERNEL);
494 	if (!ism)
495 		return -ENOMEM;
496 
497 	spin_lock_init(&ism->lock);
498 	dev_set_drvdata(&pdev->dev, ism);
499 	ism->pdev = pdev;
500 
501 	ret = pci_enable_device_mem(pdev);
502 	if (ret)
503 		goto err;
504 
505 	ret = pci_request_mem_regions(pdev, DRV_NAME);
506 	if (ret)
507 		goto err_disable;
508 
509 	ism->ctl = pci_iomap(pdev, 2, 0);
510 	if (!ism->ctl)
511 		goto err_resource;
512 
513 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
514 	if (ret)
515 		goto err_unmap;
516 
517 	dma_set_seg_boundary(&pdev->dev, SZ_1M - 1);
518 	dma_set_max_seg_size(&pdev->dev, SZ_1M);
519 	pci_set_master(pdev);
520 
521 	ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops,
522 				   ISM_NR_DMBS);
523 	if (!ism->smcd)
524 		goto err_unmap;
525 
526 	ism->smcd->priv = ism;
527 	ret = ism_dev_init(ism);
528 	if (ret)
529 		goto err_free;
530 
531 	return 0;
532 
533 err_free:
534 	smcd_free_dev(ism->smcd);
535 err_unmap:
536 	pci_iounmap(pdev, ism->ctl);
537 err_resource:
538 	pci_release_mem_regions(pdev);
539 err_disable:
540 	pci_disable_device(pdev);
541 err:
542 	kfree(ism);
543 	dev_set_drvdata(&pdev->dev, NULL);
544 	return ret;
545 }
546 
547 static void ism_dev_exit(struct ism_dev *ism)
548 {
549 	struct pci_dev *pdev = ism->pdev;
550 
551 	smcd_unregister_dev(ism->smcd);
552 	unregister_ieq(ism);
553 	unregister_sba(ism);
554 	free_irq(pci_irq_vector(pdev, 0), ism);
555 	pci_free_irq_vectors(pdev);
556 }
557 
558 static void ism_remove(struct pci_dev *pdev)
559 {
560 	struct ism_dev *ism = dev_get_drvdata(&pdev->dev);
561 
562 	ism_dev_exit(ism);
563 
564 	smcd_free_dev(ism->smcd);
565 	pci_iounmap(pdev, ism->ctl);
566 	pci_release_mem_regions(pdev);
567 	pci_disable_device(pdev);
568 	dev_set_drvdata(&pdev->dev, NULL);
569 	kfree(ism);
570 }
571 
572 static int ism_suspend(struct device *dev)
573 {
574 	struct ism_dev *ism = dev_get_drvdata(dev);
575 
576 	ism_dev_exit(ism);
577 	return 0;
578 }
579 
580 static int ism_resume(struct device *dev)
581 {
582 	struct ism_dev *ism = dev_get_drvdata(dev);
583 
584 	return ism_dev_init(ism);
585 }
586 
587 static SIMPLE_DEV_PM_OPS(ism_pm_ops, ism_suspend, ism_resume);
588 
589 static struct pci_driver ism_driver = {
590 	.name	  = DRV_NAME,
591 	.id_table = ism_device_table,
592 	.probe	  = ism_probe,
593 	.remove	  = ism_remove,
594 	.driver	  = {
595 		.pm = &ism_pm_ops,
596 	},
597 };
598 
599 static int __init ism_init(void)
600 {
601 	int ret;
602 
603 	ism_debug_info = debug_register("ism", 2, 1, 16);
604 	if (!ism_debug_info)
605 		return -ENODEV;
606 
607 	debug_register_view(ism_debug_info, &debug_hex_ascii_view);
608 	ret = pci_register_driver(&ism_driver);
609 	if (ret)
610 		debug_unregister(ism_debug_info);
611 
612 	return ret;
613 }
614 
615 static void __exit ism_exit(void)
616 {
617 	pci_unregister_driver(&ism_driver);
618 	debug_unregister(ism_debug_info);
619 }
620 
621 module_init(ism_init);
622 module_exit(ism_exit);
623