xref: /openbmc/linux/drivers/s390/net/ism_drv.c (revision ccb01374)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ISM driver for s390.
4  *
5  * Copyright IBM Corp. 2018
6  */
7 #define KMSG_COMPONENT "ism"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/interrupt.h>
13 #include <linux/device.h>
14 #include <linux/pci.h>
15 #include <linux/err.h>
16 #include <net/smc.h>
17 
18 #include <asm/debug.h>
19 
20 #include "ism.h"
21 
22 MODULE_DESCRIPTION("ISM driver for s390");
23 MODULE_LICENSE("GPL");
24 
25 #define PCI_DEVICE_ID_IBM_ISM 0x04ED
26 #define DRV_NAME "ism"
27 
28 static const struct pci_device_id ism_device_table[] = {
29 	{ PCI_VDEVICE(IBM, PCI_DEVICE_ID_IBM_ISM), 0 },
30 	{ 0, }
31 };
32 MODULE_DEVICE_TABLE(pci, ism_device_table);
33 
34 static debug_info_t *ism_debug_info;
35 
36 static int ism_cmd(struct ism_dev *ism, void *cmd)
37 {
38 	struct ism_req_hdr *req = cmd;
39 	struct ism_resp_hdr *resp = cmd;
40 
41 	memcpy_toio(ism->ctl + sizeof(*req), req + 1, req->len - sizeof(*req));
42 	memcpy_toio(ism->ctl, req, sizeof(*req));
43 
44 	WRITE_ONCE(resp->ret, ISM_ERROR);
45 
46 	memcpy_fromio(resp, ism->ctl, sizeof(*resp));
47 	if (resp->ret) {
48 		debug_text_event(ism_debug_info, 0, "cmd failure");
49 		debug_event(ism_debug_info, 0, resp, sizeof(*resp));
50 		goto out;
51 	}
52 	memcpy_fromio(resp + 1, ism->ctl + sizeof(*resp),
53 		      resp->len - sizeof(*resp));
54 out:
55 	return resp->ret;
56 }
57 
58 static int ism_cmd_simple(struct ism_dev *ism, u32 cmd_code)
59 {
60 	union ism_cmd_simple cmd;
61 
62 	memset(&cmd, 0, sizeof(cmd));
63 	cmd.request.hdr.cmd = cmd_code;
64 	cmd.request.hdr.len = sizeof(cmd.request);
65 
66 	return ism_cmd(ism, &cmd);
67 }
68 
69 static int query_info(struct ism_dev *ism)
70 {
71 	union ism_qi cmd;
72 
73 	memset(&cmd, 0, sizeof(cmd));
74 	cmd.request.hdr.cmd = ISM_QUERY_INFO;
75 	cmd.request.hdr.len = sizeof(cmd.request);
76 
77 	if (ism_cmd(ism, &cmd))
78 		goto out;
79 
80 	debug_text_event(ism_debug_info, 3, "query info");
81 	debug_event(ism_debug_info, 3, &cmd.response, sizeof(cmd.response));
82 out:
83 	return 0;
84 }
85 
86 static int register_sba(struct ism_dev *ism)
87 {
88 	union ism_reg_sba cmd;
89 	dma_addr_t dma_handle;
90 	struct ism_sba *sba;
91 
92 	sba = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE,
93 				  &dma_handle, GFP_KERNEL);
94 	if (!sba)
95 		return -ENOMEM;
96 
97 	memset(&cmd, 0, sizeof(cmd));
98 	cmd.request.hdr.cmd = ISM_REG_SBA;
99 	cmd.request.hdr.len = sizeof(cmd.request);
100 	cmd.request.sba = dma_handle;
101 
102 	if (ism_cmd(ism, &cmd)) {
103 		dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, sba, dma_handle);
104 		return -EIO;
105 	}
106 
107 	ism->sba = sba;
108 	ism->sba_dma_addr = dma_handle;
109 
110 	return 0;
111 }
112 
113 static int register_ieq(struct ism_dev *ism)
114 {
115 	union ism_reg_ieq cmd;
116 	dma_addr_t dma_handle;
117 	struct ism_eq *ieq;
118 
119 	ieq = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE,
120 				  &dma_handle, GFP_KERNEL);
121 	if (!ieq)
122 		return -ENOMEM;
123 
124 	memset(&cmd, 0, sizeof(cmd));
125 	cmd.request.hdr.cmd = ISM_REG_IEQ;
126 	cmd.request.hdr.len = sizeof(cmd.request);
127 	cmd.request.ieq = dma_handle;
128 	cmd.request.len = sizeof(*ieq);
129 
130 	if (ism_cmd(ism, &cmd)) {
131 		dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, ieq, dma_handle);
132 		return -EIO;
133 	}
134 
135 	ism->ieq = ieq;
136 	ism->ieq_idx = -1;
137 	ism->ieq_dma_addr = dma_handle;
138 
139 	return 0;
140 }
141 
142 static int unregister_sba(struct ism_dev *ism)
143 {
144 	if (!ism->sba)
145 		return 0;
146 
147 	if (ism_cmd_simple(ism, ISM_UNREG_SBA))
148 		return -EIO;
149 
150 	dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
151 			  ism->sba, ism->sba_dma_addr);
152 
153 	ism->sba = NULL;
154 	ism->sba_dma_addr = 0;
155 
156 	return 0;
157 }
158 
159 static int unregister_ieq(struct ism_dev *ism)
160 {
161 	if (!ism->ieq)
162 		return 0;
163 
164 	if (ism_cmd_simple(ism, ISM_UNREG_IEQ))
165 		return -EIO;
166 
167 	dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
168 			  ism->ieq, ism->ieq_dma_addr);
169 
170 	ism->ieq = NULL;
171 	ism->ieq_dma_addr = 0;
172 
173 	return 0;
174 }
175 
176 static int ism_read_local_gid(struct ism_dev *ism)
177 {
178 	union ism_read_gid cmd;
179 	int ret;
180 
181 	memset(&cmd, 0, sizeof(cmd));
182 	cmd.request.hdr.cmd = ISM_READ_GID;
183 	cmd.request.hdr.len = sizeof(cmd.request);
184 
185 	ret = ism_cmd(ism, &cmd);
186 	if (ret)
187 		goto out;
188 
189 	ism->smcd->local_gid = cmd.response.gid;
190 out:
191 	return ret;
192 }
193 
194 static int ism_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid,
195 			  u32 vid)
196 {
197 	struct ism_dev *ism = smcd->priv;
198 	union ism_query_rgid cmd;
199 
200 	memset(&cmd, 0, sizeof(cmd));
201 	cmd.request.hdr.cmd = ISM_QUERY_RGID;
202 	cmd.request.hdr.len = sizeof(cmd.request);
203 
204 	cmd.request.rgid = rgid;
205 	cmd.request.vlan_valid = vid_valid;
206 	cmd.request.vlan_id = vid;
207 
208 	return ism_cmd(ism, &cmd);
209 }
210 
211 static void ism_free_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
212 {
213 	clear_bit(dmb->sba_idx, ism->sba_bitmap);
214 	dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
215 			  dmb->cpu_addr, dmb->dma_addr);
216 }
217 
218 static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
219 {
220 	unsigned long bit;
221 
222 	if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
223 		return -EINVAL;
224 
225 	if (!dmb->sba_idx) {
226 		bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS,
227 					 ISM_DMB_BIT_OFFSET);
228 		if (bit == ISM_NR_DMBS)
229 			return -ENOMEM;
230 
231 		dmb->sba_idx = bit;
232 	}
233 	if (dmb->sba_idx < ISM_DMB_BIT_OFFSET ||
234 	    test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
235 		return -EINVAL;
236 
237 	dmb->cpu_addr = dma_zalloc_coherent(&ism->pdev->dev, dmb->dmb_len,
238 					    &dmb->dma_addr, GFP_KERNEL |
239 					    __GFP_NOWARN | __GFP_NOMEMALLOC |
240 					    __GFP_COMP | __GFP_NORETRY);
241 	if (!dmb->cpu_addr)
242 		clear_bit(dmb->sba_idx, ism->sba_bitmap);
243 
244 	return dmb->cpu_addr ? 0 : -ENOMEM;
245 }
246 
247 static int ism_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
248 {
249 	struct ism_dev *ism = smcd->priv;
250 	union ism_reg_dmb cmd;
251 	int ret;
252 
253 	ret = ism_alloc_dmb(ism, dmb);
254 	if (ret)
255 		goto out;
256 
257 	memset(&cmd, 0, sizeof(cmd));
258 	cmd.request.hdr.cmd = ISM_REG_DMB;
259 	cmd.request.hdr.len = sizeof(cmd.request);
260 
261 	cmd.request.dmb = dmb->dma_addr;
262 	cmd.request.dmb_len = dmb->dmb_len;
263 	cmd.request.sba_idx = dmb->sba_idx;
264 	cmd.request.vlan_valid = dmb->vlan_valid;
265 	cmd.request.vlan_id = dmb->vlan_id;
266 	cmd.request.rgid = dmb->rgid;
267 
268 	ret = ism_cmd(ism, &cmd);
269 	if (ret) {
270 		ism_free_dmb(ism, dmb);
271 		goto out;
272 	}
273 	dmb->dmb_tok = cmd.response.dmb_tok;
274 out:
275 	return ret;
276 }
277 
278 static int ism_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
279 {
280 	struct ism_dev *ism = smcd->priv;
281 	union ism_unreg_dmb cmd;
282 	int ret;
283 
284 	memset(&cmd, 0, sizeof(cmd));
285 	cmd.request.hdr.cmd = ISM_UNREG_DMB;
286 	cmd.request.hdr.len = sizeof(cmd.request);
287 
288 	cmd.request.dmb_tok = dmb->dmb_tok;
289 
290 	ret = ism_cmd(ism, &cmd);
291 	if (ret)
292 		goto out;
293 
294 	ism_free_dmb(ism, dmb);
295 out:
296 	return ret;
297 }
298 
299 static int ism_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
300 {
301 	struct ism_dev *ism = smcd->priv;
302 	union ism_set_vlan_id cmd;
303 
304 	memset(&cmd, 0, sizeof(cmd));
305 	cmd.request.hdr.cmd = ISM_ADD_VLAN_ID;
306 	cmd.request.hdr.len = sizeof(cmd.request);
307 
308 	cmd.request.vlan_id = vlan_id;
309 
310 	return ism_cmd(ism, &cmd);
311 }
312 
313 static int ism_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
314 {
315 	struct ism_dev *ism = smcd->priv;
316 	union ism_set_vlan_id cmd;
317 
318 	memset(&cmd, 0, sizeof(cmd));
319 	cmd.request.hdr.cmd = ISM_DEL_VLAN_ID;
320 	cmd.request.hdr.len = sizeof(cmd.request);
321 
322 	cmd.request.vlan_id = vlan_id;
323 
324 	return ism_cmd(ism, &cmd);
325 }
326 
327 static int ism_set_vlan_required(struct smcd_dev *smcd)
328 {
329 	return ism_cmd_simple(smcd->priv, ISM_SET_VLAN);
330 }
331 
332 static int ism_reset_vlan_required(struct smcd_dev *smcd)
333 {
334 	return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN);
335 }
336 
337 static int ism_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq,
338 			  u32 event_code, u64 info)
339 {
340 	struct ism_dev *ism = smcd->priv;
341 	union ism_sig_ieq cmd;
342 
343 	memset(&cmd, 0, sizeof(cmd));
344 	cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
345 	cmd.request.hdr.len = sizeof(cmd.request);
346 
347 	cmd.request.rgid = rgid;
348 	cmd.request.trigger_irq = trigger_irq;
349 	cmd.request.event_code = event_code;
350 	cmd.request.info = info;
351 
352 	return ism_cmd(ism, &cmd);
353 }
354 
355 static unsigned int max_bytes(unsigned int start, unsigned int len,
356 			      unsigned int boundary)
357 {
358 	return min(boundary - (start & (boundary - 1)), len);
359 }
360 
361 static int ism_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
362 		    bool sf, unsigned int offset, void *data, unsigned int size)
363 {
364 	struct ism_dev *ism = smcd->priv;
365 	unsigned int bytes;
366 	u64 dmb_req;
367 	int ret;
368 
369 	while (size) {
370 		bytes = max_bytes(offset, size, PAGE_SIZE);
371 		dmb_req = ISM_CREATE_REQ(dmb_tok, idx, size == bytes ? sf : 0,
372 					 offset);
373 
374 		ret = __ism_move(ism, dmb_req, data, bytes);
375 		if (ret)
376 			return ret;
377 
378 		size -= bytes;
379 		data += bytes;
380 		offset += bytes;
381 	}
382 
383 	return 0;
384 }
385 
386 static void ism_handle_event(struct ism_dev *ism)
387 {
388 	struct smcd_event *entry;
389 
390 	while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) {
391 		if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry))
392 			ism->ieq_idx = 0;
393 
394 		entry = &ism->ieq->entry[ism->ieq_idx];
395 		debug_event(ism_debug_info, 2, entry, sizeof(*entry));
396 		smcd_handle_event(ism->smcd, entry);
397 	}
398 }
399 
400 static irqreturn_t ism_handle_irq(int irq, void *data)
401 {
402 	struct ism_dev *ism = data;
403 	unsigned long bit, end;
404 	unsigned long *bv;
405 
406 	bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET];
407 	end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET;
408 
409 	spin_lock(&ism->lock);
410 	ism->sba->s = 0;
411 	barrier();
412 	for (bit = 0;;) {
413 		bit = find_next_bit_inv(bv, end, bit);
414 		if (bit >= end)
415 			break;
416 
417 		clear_bit_inv(bit, bv);
418 		ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
419 		barrier();
420 		smcd_handle_irq(ism->smcd, bit + ISM_DMB_BIT_OFFSET);
421 	}
422 
423 	if (ism->sba->e) {
424 		ism->sba->e = 0;
425 		barrier();
426 		ism_handle_event(ism);
427 	}
428 	spin_unlock(&ism->lock);
429 	return IRQ_HANDLED;
430 }
431 
432 static const struct smcd_ops ism_ops = {
433 	.query_remote_gid = ism_query_rgid,
434 	.register_dmb = ism_register_dmb,
435 	.unregister_dmb = ism_unregister_dmb,
436 	.add_vlan_id = ism_add_vlan_id,
437 	.del_vlan_id = ism_del_vlan_id,
438 	.set_vlan_required = ism_set_vlan_required,
439 	.reset_vlan_required = ism_reset_vlan_required,
440 	.signal_event = ism_signal_ieq,
441 	.move_data = ism_move,
442 };
443 
444 static int ism_dev_init(struct ism_dev *ism)
445 {
446 	struct pci_dev *pdev = ism->pdev;
447 	int ret;
448 
449 	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
450 	if (ret <= 0)
451 		goto out;
452 
453 	ret = request_irq(pci_irq_vector(pdev, 0), ism_handle_irq, 0,
454 			  pci_name(pdev), ism);
455 	if (ret)
456 		goto free_vectors;
457 
458 	ret = register_sba(ism);
459 	if (ret)
460 		goto free_irq;
461 
462 	ret = register_ieq(ism);
463 	if (ret)
464 		goto unreg_sba;
465 
466 	ret = ism_read_local_gid(ism);
467 	if (ret)
468 		goto unreg_ieq;
469 
470 	ret = smcd_register_dev(ism->smcd);
471 	if (ret)
472 		goto unreg_ieq;
473 
474 	query_info(ism);
475 	return 0;
476 
477 unreg_ieq:
478 	unregister_ieq(ism);
479 unreg_sba:
480 	unregister_sba(ism);
481 free_irq:
482 	free_irq(pci_irq_vector(pdev, 0), ism);
483 free_vectors:
484 	pci_free_irq_vectors(pdev);
485 out:
486 	return ret;
487 }
488 
489 static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
490 {
491 	struct ism_dev *ism;
492 	int ret;
493 
494 	ism = kzalloc(sizeof(*ism), GFP_KERNEL);
495 	if (!ism)
496 		return -ENOMEM;
497 
498 	spin_lock_init(&ism->lock);
499 	dev_set_drvdata(&pdev->dev, ism);
500 	ism->pdev = pdev;
501 
502 	ret = pci_enable_device_mem(pdev);
503 	if (ret)
504 		goto err;
505 
506 	ret = pci_request_mem_regions(pdev, DRV_NAME);
507 	if (ret)
508 		goto err_disable;
509 
510 	ism->ctl = pci_iomap(pdev, 2, 0);
511 	if (!ism->ctl)
512 		goto err_resource;
513 
514 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
515 	if (ret)
516 		goto err_unmap;
517 
518 	dma_set_seg_boundary(&pdev->dev, SZ_1M - 1);
519 	dma_set_max_seg_size(&pdev->dev, SZ_1M);
520 	pci_set_master(pdev);
521 
522 	ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops,
523 				   ISM_NR_DMBS);
524 	if (!ism->smcd)
525 		goto err_unmap;
526 
527 	ism->smcd->priv = ism;
528 	ret = ism_dev_init(ism);
529 	if (ret)
530 		goto err_free;
531 
532 	return 0;
533 
534 err_free:
535 	smcd_free_dev(ism->smcd);
536 err_unmap:
537 	pci_iounmap(pdev, ism->ctl);
538 err_resource:
539 	pci_release_mem_regions(pdev);
540 err_disable:
541 	pci_disable_device(pdev);
542 err:
543 	kfree(ism);
544 	dev_set_drvdata(&pdev->dev, NULL);
545 	return ret;
546 }
547 
548 static void ism_dev_exit(struct ism_dev *ism)
549 {
550 	struct pci_dev *pdev = ism->pdev;
551 
552 	smcd_unregister_dev(ism->smcd);
553 	unregister_ieq(ism);
554 	unregister_sba(ism);
555 	free_irq(pci_irq_vector(pdev, 0), ism);
556 	pci_free_irq_vectors(pdev);
557 }
558 
559 static void ism_remove(struct pci_dev *pdev)
560 {
561 	struct ism_dev *ism = dev_get_drvdata(&pdev->dev);
562 
563 	ism_dev_exit(ism);
564 
565 	smcd_free_dev(ism->smcd);
566 	pci_iounmap(pdev, ism->ctl);
567 	pci_release_mem_regions(pdev);
568 	pci_disable_device(pdev);
569 	dev_set_drvdata(&pdev->dev, NULL);
570 	kfree(ism);
571 }
572 
573 static int ism_suspend(struct device *dev)
574 {
575 	struct ism_dev *ism = dev_get_drvdata(dev);
576 
577 	ism_dev_exit(ism);
578 	return 0;
579 }
580 
581 static int ism_resume(struct device *dev)
582 {
583 	struct ism_dev *ism = dev_get_drvdata(dev);
584 
585 	return ism_dev_init(ism);
586 }
587 
588 static SIMPLE_DEV_PM_OPS(ism_pm_ops, ism_suspend, ism_resume);
589 
590 static struct pci_driver ism_driver = {
591 	.name	  = DRV_NAME,
592 	.id_table = ism_device_table,
593 	.probe	  = ism_probe,
594 	.remove	  = ism_remove,
595 	.driver	  = {
596 		.pm = &ism_pm_ops,
597 	},
598 };
599 
600 static int __init ism_init(void)
601 {
602 	int ret;
603 
604 	ism_debug_info = debug_register("ism", 2, 1, 16);
605 	if (!ism_debug_info)
606 		return -ENODEV;
607 
608 	debug_register_view(ism_debug_info, &debug_hex_ascii_view);
609 	ret = pci_register_driver(&ism_driver);
610 	if (ret)
611 		debug_unregister(ism_debug_info);
612 
613 	return ret;
614 }
615 
616 static void __exit ism_exit(void)
617 {
618 	pci_unregister_driver(&ism_driver);
619 	debug_unregister(ism_debug_info);
620 }
621 
622 module_init(ism_init);
623 module_exit(ism_exit);
624