xref: /openbmc/linux/drivers/s390/net/ism_drv.c (revision fb8d6c8d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ISM driver for s390.
4  *
5  * Copyright IBM Corp. 2018
6  */
7 #define KMSG_COMPONENT "ism"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9 
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/interrupt.h>
13 #include <linux/device.h>
14 #include <linux/pci.h>
15 #include <linux/err.h>
16 #include <net/smc.h>
17 
18 #include <asm/debug.h>
19 
20 #include "ism.h"
21 
22 MODULE_DESCRIPTION("ISM driver for s390");
23 MODULE_LICENSE("GPL");
24 
25 #define PCI_DEVICE_ID_IBM_ISM 0x04ED
26 #define DRV_NAME "ism"
27 
28 static const struct pci_device_id ism_device_table[] = {
29 	{ PCI_VDEVICE(IBM, PCI_DEVICE_ID_IBM_ISM), 0 },
30 	{ 0, }
31 };
32 MODULE_DEVICE_TABLE(pci, ism_device_table);
33 
34 static debug_info_t *ism_debug_info;
35 
36 static int ism_cmd(struct ism_dev *ism, void *cmd)
37 {
38 	struct ism_req_hdr *req = cmd;
39 	struct ism_resp_hdr *resp = cmd;
40 
41 	__ism_write_cmd(ism, req + 1, sizeof(*req), req->len - sizeof(*req));
42 	__ism_write_cmd(ism, req, 0, sizeof(*req));
43 
44 	WRITE_ONCE(resp->ret, ISM_ERROR);
45 
46 	__ism_read_cmd(ism, resp, 0, sizeof(*resp));
47 	if (resp->ret) {
48 		debug_text_event(ism_debug_info, 0, "cmd failure");
49 		debug_event(ism_debug_info, 0, resp, sizeof(*resp));
50 		goto out;
51 	}
52 	__ism_read_cmd(ism, resp + 1, sizeof(*resp), resp->len - sizeof(*resp));
53 out:
54 	return resp->ret;
55 }
56 
57 static int ism_cmd_simple(struct ism_dev *ism, u32 cmd_code)
58 {
59 	union ism_cmd_simple cmd;
60 
61 	memset(&cmd, 0, sizeof(cmd));
62 	cmd.request.hdr.cmd = cmd_code;
63 	cmd.request.hdr.len = sizeof(cmd.request);
64 
65 	return ism_cmd(ism, &cmd);
66 }
67 
68 static int query_info(struct ism_dev *ism)
69 {
70 	union ism_qi cmd;
71 
72 	memset(&cmd, 0, sizeof(cmd));
73 	cmd.request.hdr.cmd = ISM_QUERY_INFO;
74 	cmd.request.hdr.len = sizeof(cmd.request);
75 
76 	if (ism_cmd(ism, &cmd))
77 		goto out;
78 
79 	debug_text_event(ism_debug_info, 3, "query info");
80 	debug_event(ism_debug_info, 3, &cmd.response, sizeof(cmd.response));
81 out:
82 	return 0;
83 }
84 
85 static int register_sba(struct ism_dev *ism)
86 {
87 	union ism_reg_sba cmd;
88 	dma_addr_t dma_handle;
89 	struct ism_sba *sba;
90 
91 	sba = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
92 				 GFP_KERNEL);
93 	if (!sba)
94 		return -ENOMEM;
95 
96 	memset(&cmd, 0, sizeof(cmd));
97 	cmd.request.hdr.cmd = ISM_REG_SBA;
98 	cmd.request.hdr.len = sizeof(cmd.request);
99 	cmd.request.sba = dma_handle;
100 
101 	if (ism_cmd(ism, &cmd)) {
102 		dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, sba, dma_handle);
103 		return -EIO;
104 	}
105 
106 	ism->sba = sba;
107 	ism->sba_dma_addr = dma_handle;
108 
109 	return 0;
110 }
111 
112 static int register_ieq(struct ism_dev *ism)
113 {
114 	union ism_reg_ieq cmd;
115 	dma_addr_t dma_handle;
116 	struct ism_eq *ieq;
117 
118 	ieq = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle,
119 				 GFP_KERNEL);
120 	if (!ieq)
121 		return -ENOMEM;
122 
123 	memset(&cmd, 0, sizeof(cmd));
124 	cmd.request.hdr.cmd = ISM_REG_IEQ;
125 	cmd.request.hdr.len = sizeof(cmd.request);
126 	cmd.request.ieq = dma_handle;
127 	cmd.request.len = sizeof(*ieq);
128 
129 	if (ism_cmd(ism, &cmd)) {
130 		dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, ieq, dma_handle);
131 		return -EIO;
132 	}
133 
134 	ism->ieq = ieq;
135 	ism->ieq_idx = -1;
136 	ism->ieq_dma_addr = dma_handle;
137 
138 	return 0;
139 }
140 
141 static int unregister_sba(struct ism_dev *ism)
142 {
143 	int ret;
144 
145 	if (!ism->sba)
146 		return 0;
147 
148 	ret = ism_cmd_simple(ism, ISM_UNREG_SBA);
149 	if (ret && ret != ISM_ERROR)
150 		return -EIO;
151 
152 	dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
153 			  ism->sba, ism->sba_dma_addr);
154 
155 	ism->sba = NULL;
156 	ism->sba_dma_addr = 0;
157 
158 	return 0;
159 }
160 
161 static int unregister_ieq(struct ism_dev *ism)
162 {
163 	int ret;
164 
165 	if (!ism->ieq)
166 		return 0;
167 
168 	ret = ism_cmd_simple(ism, ISM_UNREG_IEQ);
169 	if (ret && ret != ISM_ERROR)
170 		return -EIO;
171 
172 	dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
173 			  ism->ieq, ism->ieq_dma_addr);
174 
175 	ism->ieq = NULL;
176 	ism->ieq_dma_addr = 0;
177 
178 	return 0;
179 }
180 
181 static int ism_read_local_gid(struct ism_dev *ism)
182 {
183 	union ism_read_gid cmd;
184 	int ret;
185 
186 	memset(&cmd, 0, sizeof(cmd));
187 	cmd.request.hdr.cmd = ISM_READ_GID;
188 	cmd.request.hdr.len = sizeof(cmd.request);
189 
190 	ret = ism_cmd(ism, &cmd);
191 	if (ret)
192 		goto out;
193 
194 	ism->smcd->local_gid = cmd.response.gid;
195 out:
196 	return ret;
197 }
198 
199 static int ism_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid,
200 			  u32 vid)
201 {
202 	struct ism_dev *ism = smcd->priv;
203 	union ism_query_rgid cmd;
204 
205 	memset(&cmd, 0, sizeof(cmd));
206 	cmd.request.hdr.cmd = ISM_QUERY_RGID;
207 	cmd.request.hdr.len = sizeof(cmd.request);
208 
209 	cmd.request.rgid = rgid;
210 	cmd.request.vlan_valid = vid_valid;
211 	cmd.request.vlan_id = vid;
212 
213 	return ism_cmd(ism, &cmd);
214 }
215 
216 static void ism_free_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
217 {
218 	clear_bit(dmb->sba_idx, ism->sba_bitmap);
219 	dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
220 			  dmb->cpu_addr, dmb->dma_addr);
221 }
222 
223 static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
224 {
225 	unsigned long bit;
226 
227 	if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
228 		return -EINVAL;
229 
230 	if (!dmb->sba_idx) {
231 		bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS,
232 					 ISM_DMB_BIT_OFFSET);
233 		if (bit == ISM_NR_DMBS)
234 			return -ENOMEM;
235 
236 		dmb->sba_idx = bit;
237 	}
238 	if (dmb->sba_idx < ISM_DMB_BIT_OFFSET ||
239 	    test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
240 		return -EINVAL;
241 
242 	dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len,
243 					   &dmb->dma_addr,
244 					   GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC | __GFP_COMP | __GFP_NORETRY);
245 	if (!dmb->cpu_addr)
246 		clear_bit(dmb->sba_idx, ism->sba_bitmap);
247 
248 	return dmb->cpu_addr ? 0 : -ENOMEM;
249 }
250 
251 static int ism_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
252 {
253 	struct ism_dev *ism = smcd->priv;
254 	union ism_reg_dmb cmd;
255 	int ret;
256 
257 	ret = ism_alloc_dmb(ism, dmb);
258 	if (ret)
259 		goto out;
260 
261 	memset(&cmd, 0, sizeof(cmd));
262 	cmd.request.hdr.cmd = ISM_REG_DMB;
263 	cmd.request.hdr.len = sizeof(cmd.request);
264 
265 	cmd.request.dmb = dmb->dma_addr;
266 	cmd.request.dmb_len = dmb->dmb_len;
267 	cmd.request.sba_idx = dmb->sba_idx;
268 	cmd.request.vlan_valid = dmb->vlan_valid;
269 	cmd.request.vlan_id = dmb->vlan_id;
270 	cmd.request.rgid = dmb->rgid;
271 
272 	ret = ism_cmd(ism, &cmd);
273 	if (ret) {
274 		ism_free_dmb(ism, dmb);
275 		goto out;
276 	}
277 	dmb->dmb_tok = cmd.response.dmb_tok;
278 out:
279 	return ret;
280 }
281 
282 static int ism_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
283 {
284 	struct ism_dev *ism = smcd->priv;
285 	union ism_unreg_dmb cmd;
286 	int ret;
287 
288 	memset(&cmd, 0, sizeof(cmd));
289 	cmd.request.hdr.cmd = ISM_UNREG_DMB;
290 	cmd.request.hdr.len = sizeof(cmd.request);
291 
292 	cmd.request.dmb_tok = dmb->dmb_tok;
293 
294 	ret = ism_cmd(ism, &cmd);
295 	if (ret && ret != ISM_ERROR)
296 		goto out;
297 
298 	ism_free_dmb(ism, dmb);
299 out:
300 	return ret;
301 }
302 
303 static int ism_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
304 {
305 	struct ism_dev *ism = smcd->priv;
306 	union ism_set_vlan_id cmd;
307 
308 	memset(&cmd, 0, sizeof(cmd));
309 	cmd.request.hdr.cmd = ISM_ADD_VLAN_ID;
310 	cmd.request.hdr.len = sizeof(cmd.request);
311 
312 	cmd.request.vlan_id = vlan_id;
313 
314 	return ism_cmd(ism, &cmd);
315 }
316 
317 static int ism_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
318 {
319 	struct ism_dev *ism = smcd->priv;
320 	union ism_set_vlan_id cmd;
321 
322 	memset(&cmd, 0, sizeof(cmd));
323 	cmd.request.hdr.cmd = ISM_DEL_VLAN_ID;
324 	cmd.request.hdr.len = sizeof(cmd.request);
325 
326 	cmd.request.vlan_id = vlan_id;
327 
328 	return ism_cmd(ism, &cmd);
329 }
330 
331 static int ism_set_vlan_required(struct smcd_dev *smcd)
332 {
333 	return ism_cmd_simple(smcd->priv, ISM_SET_VLAN);
334 }
335 
336 static int ism_reset_vlan_required(struct smcd_dev *smcd)
337 {
338 	return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN);
339 }
340 
341 static int ism_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq,
342 			  u32 event_code, u64 info)
343 {
344 	struct ism_dev *ism = smcd->priv;
345 	union ism_sig_ieq cmd;
346 
347 	memset(&cmd, 0, sizeof(cmd));
348 	cmd.request.hdr.cmd = ISM_SIGNAL_IEQ;
349 	cmd.request.hdr.len = sizeof(cmd.request);
350 
351 	cmd.request.rgid = rgid;
352 	cmd.request.trigger_irq = trigger_irq;
353 	cmd.request.event_code = event_code;
354 	cmd.request.info = info;
355 
356 	return ism_cmd(ism, &cmd);
357 }
358 
359 static unsigned int max_bytes(unsigned int start, unsigned int len,
360 			      unsigned int boundary)
361 {
362 	return min(boundary - (start & (boundary - 1)), len);
363 }
364 
365 static int ism_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
366 		    bool sf, unsigned int offset, void *data, unsigned int size)
367 {
368 	struct ism_dev *ism = smcd->priv;
369 	unsigned int bytes;
370 	u64 dmb_req;
371 	int ret;
372 
373 	while (size) {
374 		bytes = max_bytes(offset, size, PAGE_SIZE);
375 		dmb_req = ISM_CREATE_REQ(dmb_tok, idx, size == bytes ? sf : 0,
376 					 offset);
377 
378 		ret = __ism_move(ism, dmb_req, data, bytes);
379 		if (ret)
380 			return ret;
381 
382 		size -= bytes;
383 		data += bytes;
384 		offset += bytes;
385 	}
386 
387 	return 0;
388 }
389 
390 static void ism_handle_event(struct ism_dev *ism)
391 {
392 	struct smcd_event *entry;
393 
394 	while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) {
395 		if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry))
396 			ism->ieq_idx = 0;
397 
398 		entry = &ism->ieq->entry[ism->ieq_idx];
399 		debug_event(ism_debug_info, 2, entry, sizeof(*entry));
400 		smcd_handle_event(ism->smcd, entry);
401 	}
402 }
403 
404 static irqreturn_t ism_handle_irq(int irq, void *data)
405 {
406 	struct ism_dev *ism = data;
407 	unsigned long bit, end;
408 	unsigned long *bv;
409 
410 	bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET];
411 	end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET;
412 
413 	spin_lock(&ism->lock);
414 	ism->sba->s = 0;
415 	barrier();
416 	for (bit = 0;;) {
417 		bit = find_next_bit_inv(bv, end, bit);
418 		if (bit >= end)
419 			break;
420 
421 		clear_bit_inv(bit, bv);
422 		ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
423 		barrier();
424 		smcd_handle_irq(ism->smcd, bit + ISM_DMB_BIT_OFFSET);
425 	}
426 
427 	if (ism->sba->e) {
428 		ism->sba->e = 0;
429 		barrier();
430 		ism_handle_event(ism);
431 	}
432 	spin_unlock(&ism->lock);
433 	return IRQ_HANDLED;
434 }
435 
436 static const struct smcd_ops ism_ops = {
437 	.query_remote_gid = ism_query_rgid,
438 	.register_dmb = ism_register_dmb,
439 	.unregister_dmb = ism_unregister_dmb,
440 	.add_vlan_id = ism_add_vlan_id,
441 	.del_vlan_id = ism_del_vlan_id,
442 	.set_vlan_required = ism_set_vlan_required,
443 	.reset_vlan_required = ism_reset_vlan_required,
444 	.signal_event = ism_signal_ieq,
445 	.move_data = ism_move,
446 };
447 
448 static int ism_dev_init(struct ism_dev *ism)
449 {
450 	struct pci_dev *pdev = ism->pdev;
451 	int ret;
452 
453 	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
454 	if (ret <= 0)
455 		goto out;
456 
457 	ret = request_irq(pci_irq_vector(pdev, 0), ism_handle_irq, 0,
458 			  pci_name(pdev), ism);
459 	if (ret)
460 		goto free_vectors;
461 
462 	ret = register_sba(ism);
463 	if (ret)
464 		goto free_irq;
465 
466 	ret = register_ieq(ism);
467 	if (ret)
468 		goto unreg_sba;
469 
470 	ret = ism_read_local_gid(ism);
471 	if (ret)
472 		goto unreg_ieq;
473 
474 	ret = smcd_register_dev(ism->smcd);
475 	if (ret)
476 		goto unreg_ieq;
477 
478 	query_info(ism);
479 	return 0;
480 
481 unreg_ieq:
482 	unregister_ieq(ism);
483 unreg_sba:
484 	unregister_sba(ism);
485 free_irq:
486 	free_irq(pci_irq_vector(pdev, 0), ism);
487 free_vectors:
488 	pci_free_irq_vectors(pdev);
489 out:
490 	return ret;
491 }
492 
493 static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
494 {
495 	struct ism_dev *ism;
496 	int ret;
497 
498 	ism = kzalloc(sizeof(*ism), GFP_KERNEL);
499 	if (!ism)
500 		return -ENOMEM;
501 
502 	spin_lock_init(&ism->lock);
503 	dev_set_drvdata(&pdev->dev, ism);
504 	ism->pdev = pdev;
505 
506 	ret = pci_enable_device_mem(pdev);
507 	if (ret)
508 		goto err;
509 
510 	ret = pci_request_mem_regions(pdev, DRV_NAME);
511 	if (ret)
512 		goto err_disable;
513 
514 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
515 	if (ret)
516 		goto err_resource;
517 
518 	dma_set_seg_boundary(&pdev->dev, SZ_1M - 1);
519 	dma_set_max_seg_size(&pdev->dev, SZ_1M);
520 	pci_set_master(pdev);
521 
522 	ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops,
523 				   ISM_NR_DMBS);
524 	if (!ism->smcd)
525 		goto err_resource;
526 
527 	ism->smcd->priv = ism;
528 	ret = ism_dev_init(ism);
529 	if (ret)
530 		goto err_free;
531 
532 	return 0;
533 
534 err_free:
535 	smcd_free_dev(ism->smcd);
536 err_resource:
537 	pci_release_mem_regions(pdev);
538 err_disable:
539 	pci_disable_device(pdev);
540 err:
541 	kfree(ism);
542 	dev_set_drvdata(&pdev->dev, NULL);
543 	return ret;
544 }
545 
546 static void ism_dev_exit(struct ism_dev *ism)
547 {
548 	struct pci_dev *pdev = ism->pdev;
549 
550 	smcd_unregister_dev(ism->smcd);
551 	unregister_ieq(ism);
552 	unregister_sba(ism);
553 	free_irq(pci_irq_vector(pdev, 0), ism);
554 	pci_free_irq_vectors(pdev);
555 }
556 
557 static void ism_remove(struct pci_dev *pdev)
558 {
559 	struct ism_dev *ism = dev_get_drvdata(&pdev->dev);
560 
561 	ism_dev_exit(ism);
562 
563 	smcd_free_dev(ism->smcd);
564 	pci_release_mem_regions(pdev);
565 	pci_disable_device(pdev);
566 	dev_set_drvdata(&pdev->dev, NULL);
567 	kfree(ism);
568 }
569 
570 static int ism_suspend(struct device *dev)
571 {
572 	struct ism_dev *ism = dev_get_drvdata(dev);
573 
574 	ism_dev_exit(ism);
575 	return 0;
576 }
577 
578 static int ism_resume(struct device *dev)
579 {
580 	struct ism_dev *ism = dev_get_drvdata(dev);
581 
582 	return ism_dev_init(ism);
583 }
584 
585 static SIMPLE_DEV_PM_OPS(ism_pm_ops, ism_suspend, ism_resume);
586 
587 static struct pci_driver ism_driver = {
588 	.name	  = DRV_NAME,
589 	.id_table = ism_device_table,
590 	.probe	  = ism_probe,
591 	.remove	  = ism_remove,
592 	.driver	  = {
593 		.pm = &ism_pm_ops,
594 	},
595 };
596 
597 static int __init ism_init(void)
598 {
599 	int ret;
600 
601 	ism_debug_info = debug_register("ism", 2, 1, 16);
602 	if (!ism_debug_info)
603 		return -ENODEV;
604 
605 	debug_register_view(ism_debug_info, &debug_hex_ascii_view);
606 	ret = pci_register_driver(&ism_driver);
607 	if (ret)
608 		debug_unregister(ism_debug_info);
609 
610 	return ret;
611 }
612 
613 static void __exit ism_exit(void)
614 {
615 	pci_unregister_driver(&ism_driver);
616 	debug_unregister(ism_debug_info);
617 }
618 
619 module_init(ism_init);
620 module_exit(ism_exit);
621