xref: /openbmc/linux/drivers/pci/switch/switchtec.c (revision 8e8e69d6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Microsemi Switchtec(tm) PCIe Management Driver
4  * Copyright (c) 2017, Microsemi Corporation
5  */
6 
7 #include <linux/switchtec.h>
8 #include <linux/switchtec_ioctl.h>
9 
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/fs.h>
13 #include <linux/uaccess.h>
14 #include <linux/poll.h>
15 #include <linux/wait.h>
16 #include <linux/io-64-nonatomic-lo-hi.h>
17 #include <linux/nospec.h>
18 
19 MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
20 MODULE_VERSION("0.1");
21 MODULE_LICENSE("GPL");
22 MODULE_AUTHOR("Microsemi Corporation");
23 
24 static int max_devices = 16;
25 module_param(max_devices, int, 0644);
26 MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
27 
28 static bool use_dma_mrpc = 1;
29 module_param(use_dma_mrpc, bool, 0644);
30 MODULE_PARM_DESC(use_dma_mrpc,
31 		 "Enable the use of the DMA MRPC feature");
32 
33 static dev_t switchtec_devt;
34 static DEFINE_IDA(switchtec_minor_ida);
35 
36 struct class *switchtec_class;
37 EXPORT_SYMBOL_GPL(switchtec_class);
38 
39 enum mrpc_state {
40 	MRPC_IDLE = 0,
41 	MRPC_QUEUED,
42 	MRPC_RUNNING,
43 	MRPC_DONE,
44 };
45 
46 struct switchtec_user {
47 	struct switchtec_dev *stdev;
48 
49 	enum mrpc_state state;
50 
51 	struct completion comp;
52 	struct kref kref;
53 	struct list_head list;
54 
55 	u32 cmd;
56 	u32 status;
57 	u32 return_code;
58 	size_t data_len;
59 	size_t read_len;
60 	unsigned char data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
61 	int event_cnt;
62 };
63 
64 static struct switchtec_user *stuser_create(struct switchtec_dev *stdev)
65 {
66 	struct switchtec_user *stuser;
67 
68 	stuser = kzalloc(sizeof(*stuser), GFP_KERNEL);
69 	if (!stuser)
70 		return ERR_PTR(-ENOMEM);
71 
72 	get_device(&stdev->dev);
73 	stuser->stdev = stdev;
74 	kref_init(&stuser->kref);
75 	INIT_LIST_HEAD(&stuser->list);
76 	init_completion(&stuser->comp);
77 	stuser->event_cnt = atomic_read(&stdev->event_cnt);
78 
79 	dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
80 
81 	return stuser;
82 }
83 
84 static void stuser_free(struct kref *kref)
85 {
86 	struct switchtec_user *stuser;
87 
88 	stuser = container_of(kref, struct switchtec_user, kref);
89 
90 	dev_dbg(&stuser->stdev->dev, "%s: %p\n", __func__, stuser);
91 
92 	put_device(&stuser->stdev->dev);
93 	kfree(stuser);
94 }
95 
96 static void stuser_put(struct switchtec_user *stuser)
97 {
98 	kref_put(&stuser->kref, stuser_free);
99 }
100 
101 static void stuser_set_state(struct switchtec_user *stuser,
102 			     enum mrpc_state state)
103 {
104 	/* requires the mrpc_mutex to already be held when called */
105 
106 	const char * const state_names[] = {
107 		[MRPC_IDLE] = "IDLE",
108 		[MRPC_QUEUED] = "QUEUED",
109 		[MRPC_RUNNING] = "RUNNING",
110 		[MRPC_DONE] = "DONE",
111 	};
112 
113 	stuser->state = state;
114 
115 	dev_dbg(&stuser->stdev->dev, "stuser state %p -> %s",
116 		stuser, state_names[state]);
117 }
118 
119 static void mrpc_complete_cmd(struct switchtec_dev *stdev);
120 
121 static void flush_wc_buf(struct switchtec_dev *stdev)
122 {
123 	struct ntb_dbmsg_regs __iomem *mmio_dbmsg;
124 
125 	/*
126 	 * odb (outbound doorbell) register is processed by low latency
127 	 * hardware and w/o side effect
128 	 */
129 	mmio_dbmsg = (void __iomem *)stdev->mmio_ntb +
130 		SWITCHTEC_NTB_REG_DBMSG_OFFSET;
131 	ioread32(&mmio_dbmsg->odb);
132 }
133 
134 static void mrpc_cmd_submit(struct switchtec_dev *stdev)
135 {
136 	/* requires the mrpc_mutex to already be held when called */
137 
138 	struct switchtec_user *stuser;
139 
140 	if (stdev->mrpc_busy)
141 		return;
142 
143 	if (list_empty(&stdev->mrpc_queue))
144 		return;
145 
146 	stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
147 			    list);
148 
149 	if (stdev->dma_mrpc) {
150 		stdev->dma_mrpc->status = SWITCHTEC_MRPC_STATUS_INPROGRESS;
151 		memset(stdev->dma_mrpc->data, 0xFF, SWITCHTEC_MRPC_PAYLOAD_SIZE);
152 	}
153 
154 	stuser_set_state(stuser, MRPC_RUNNING);
155 	stdev->mrpc_busy = 1;
156 	memcpy_toio(&stdev->mmio_mrpc->input_data,
157 		    stuser->data, stuser->data_len);
158 	flush_wc_buf(stdev);
159 	iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd);
160 
161 	schedule_delayed_work(&stdev->mrpc_timeout,
162 			      msecs_to_jiffies(500));
163 }
164 
165 static int mrpc_queue_cmd(struct switchtec_user *stuser)
166 {
167 	/* requires the mrpc_mutex to already be held when called */
168 
169 	struct switchtec_dev *stdev = stuser->stdev;
170 
171 	kref_get(&stuser->kref);
172 	stuser->read_len = sizeof(stuser->data);
173 	stuser_set_state(stuser, MRPC_QUEUED);
174 	init_completion(&stuser->comp);
175 	list_add_tail(&stuser->list, &stdev->mrpc_queue);
176 
177 	mrpc_cmd_submit(stdev);
178 
179 	return 0;
180 }
181 
182 static void mrpc_complete_cmd(struct switchtec_dev *stdev)
183 {
184 	/* requires the mrpc_mutex to already be held when called */
185 	struct switchtec_user *stuser;
186 
187 	if (list_empty(&stdev->mrpc_queue))
188 		return;
189 
190 	stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
191 			    list);
192 
193 	if (stdev->dma_mrpc)
194 		stuser->status = stdev->dma_mrpc->status;
195 	else
196 		stuser->status = ioread32(&stdev->mmio_mrpc->status);
197 
198 	if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS)
199 		return;
200 
201 	stuser_set_state(stuser, MRPC_DONE);
202 	stuser->return_code = 0;
203 
204 	if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE)
205 		goto out;
206 
207 	if (stdev->dma_mrpc)
208 		stuser->return_code = stdev->dma_mrpc->rtn_code;
209 	else
210 		stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
211 	if (stuser->return_code != 0)
212 		goto out;
213 
214 	if (stdev->dma_mrpc)
215 		memcpy(stuser->data, &stdev->dma_mrpc->data,
216 			      stuser->read_len);
217 	else
218 		memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
219 			      stuser->read_len);
220 out:
221 	complete_all(&stuser->comp);
222 	list_del_init(&stuser->list);
223 	stuser_put(stuser);
224 	stdev->mrpc_busy = 0;
225 
226 	mrpc_cmd_submit(stdev);
227 }
228 
229 static void mrpc_event_work(struct work_struct *work)
230 {
231 	struct switchtec_dev *stdev;
232 
233 	stdev = container_of(work, struct switchtec_dev, mrpc_work);
234 
235 	dev_dbg(&stdev->dev, "%s\n", __func__);
236 
237 	mutex_lock(&stdev->mrpc_mutex);
238 	cancel_delayed_work(&stdev->mrpc_timeout);
239 	mrpc_complete_cmd(stdev);
240 	mutex_unlock(&stdev->mrpc_mutex);
241 }
242 
243 static void mrpc_timeout_work(struct work_struct *work)
244 {
245 	struct switchtec_dev *stdev;
246 	u32 status;
247 
248 	stdev = container_of(work, struct switchtec_dev, mrpc_timeout.work);
249 
250 	dev_dbg(&stdev->dev, "%s\n", __func__);
251 
252 	mutex_lock(&stdev->mrpc_mutex);
253 
254 	if (stdev->dma_mrpc)
255 		status = stdev->dma_mrpc->status;
256 	else
257 		status = ioread32(&stdev->mmio_mrpc->status);
258 	if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) {
259 		schedule_delayed_work(&stdev->mrpc_timeout,
260 				      msecs_to_jiffies(500));
261 		goto out;
262 	}
263 
264 	mrpc_complete_cmd(stdev);
265 out:
266 	mutex_unlock(&stdev->mrpc_mutex);
267 }
268 
269 static ssize_t device_version_show(struct device *dev,
270 	struct device_attribute *attr, char *buf)
271 {
272 	struct switchtec_dev *stdev = to_stdev(dev);
273 	u32 ver;
274 
275 	ver = ioread32(&stdev->mmio_sys_info->device_version);
276 
277 	return sprintf(buf, "%x\n", ver);
278 }
279 static DEVICE_ATTR_RO(device_version);
280 
281 static ssize_t fw_version_show(struct device *dev,
282 	struct device_attribute *attr, char *buf)
283 {
284 	struct switchtec_dev *stdev = to_stdev(dev);
285 	u32 ver;
286 
287 	ver = ioread32(&stdev->mmio_sys_info->firmware_version);
288 
289 	return sprintf(buf, "%08x\n", ver);
290 }
291 static DEVICE_ATTR_RO(fw_version);
292 
293 static ssize_t io_string_show(char *buf, void __iomem *attr, size_t len)
294 {
295 	int i;
296 
297 	memcpy_fromio(buf, attr, len);
298 	buf[len] = '\n';
299 	buf[len + 1] = 0;
300 
301 	for (i = len - 1; i > 0; i--) {
302 		if (buf[i] != ' ')
303 			break;
304 		buf[i] = '\n';
305 		buf[i + 1] = 0;
306 	}
307 
308 	return strlen(buf);
309 }
310 
311 #define DEVICE_ATTR_SYS_INFO_STR(field) \
312 static ssize_t field ## _show(struct device *dev, \
313 	struct device_attribute *attr, char *buf) \
314 { \
315 	struct switchtec_dev *stdev = to_stdev(dev); \
316 	return io_string_show(buf, &stdev->mmio_sys_info->field, \
317 			    sizeof(stdev->mmio_sys_info->field)); \
318 } \
319 \
320 static DEVICE_ATTR_RO(field)
321 
322 DEVICE_ATTR_SYS_INFO_STR(vendor_id);
323 DEVICE_ATTR_SYS_INFO_STR(product_id);
324 DEVICE_ATTR_SYS_INFO_STR(product_revision);
325 DEVICE_ATTR_SYS_INFO_STR(component_vendor);
326 
327 static ssize_t component_id_show(struct device *dev,
328 	struct device_attribute *attr, char *buf)
329 {
330 	struct switchtec_dev *stdev = to_stdev(dev);
331 	int id = ioread16(&stdev->mmio_sys_info->component_id);
332 
333 	return sprintf(buf, "PM%04X\n", id);
334 }
335 static DEVICE_ATTR_RO(component_id);
336 
337 static ssize_t component_revision_show(struct device *dev,
338 	struct device_attribute *attr, char *buf)
339 {
340 	struct switchtec_dev *stdev = to_stdev(dev);
341 	int rev = ioread8(&stdev->mmio_sys_info->component_revision);
342 
343 	return sprintf(buf, "%d\n", rev);
344 }
345 static DEVICE_ATTR_RO(component_revision);
346 
347 static ssize_t partition_show(struct device *dev,
348 	struct device_attribute *attr, char *buf)
349 {
350 	struct switchtec_dev *stdev = to_stdev(dev);
351 
352 	return sprintf(buf, "%d\n", stdev->partition);
353 }
354 static DEVICE_ATTR_RO(partition);
355 
356 static ssize_t partition_count_show(struct device *dev,
357 	struct device_attribute *attr, char *buf)
358 {
359 	struct switchtec_dev *stdev = to_stdev(dev);
360 
361 	return sprintf(buf, "%d\n", stdev->partition_count);
362 }
363 static DEVICE_ATTR_RO(partition_count);
364 
365 static struct attribute *switchtec_device_attrs[] = {
366 	&dev_attr_device_version.attr,
367 	&dev_attr_fw_version.attr,
368 	&dev_attr_vendor_id.attr,
369 	&dev_attr_product_id.attr,
370 	&dev_attr_product_revision.attr,
371 	&dev_attr_component_vendor.attr,
372 	&dev_attr_component_id.attr,
373 	&dev_attr_component_revision.attr,
374 	&dev_attr_partition.attr,
375 	&dev_attr_partition_count.attr,
376 	NULL,
377 };
378 
379 ATTRIBUTE_GROUPS(switchtec_device);
380 
381 static int switchtec_dev_open(struct inode *inode, struct file *filp)
382 {
383 	struct switchtec_dev *stdev;
384 	struct switchtec_user *stuser;
385 
386 	stdev = container_of(inode->i_cdev, struct switchtec_dev, cdev);
387 
388 	stuser = stuser_create(stdev);
389 	if (IS_ERR(stuser))
390 		return PTR_ERR(stuser);
391 
392 	filp->private_data = stuser;
393 	stream_open(inode, filp);
394 
395 	dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
396 
397 	return 0;
398 }
399 
400 static int switchtec_dev_release(struct inode *inode, struct file *filp)
401 {
402 	struct switchtec_user *stuser = filp->private_data;
403 
404 	stuser_put(stuser);
405 
406 	return 0;
407 }
408 
409 static int lock_mutex_and_test_alive(struct switchtec_dev *stdev)
410 {
411 	if (mutex_lock_interruptible(&stdev->mrpc_mutex))
412 		return -EINTR;
413 
414 	if (!stdev->alive) {
415 		mutex_unlock(&stdev->mrpc_mutex);
416 		return -ENODEV;
417 	}
418 
419 	return 0;
420 }
421 
422 static ssize_t switchtec_dev_write(struct file *filp, const char __user *data,
423 				   size_t size, loff_t *off)
424 {
425 	struct switchtec_user *stuser = filp->private_data;
426 	struct switchtec_dev *stdev = stuser->stdev;
427 	int rc;
428 
429 	if (size < sizeof(stuser->cmd) ||
430 	    size > sizeof(stuser->cmd) + sizeof(stuser->data))
431 		return -EINVAL;
432 
433 	stuser->data_len = size - sizeof(stuser->cmd);
434 
435 	rc = lock_mutex_and_test_alive(stdev);
436 	if (rc)
437 		return rc;
438 
439 	if (stuser->state != MRPC_IDLE) {
440 		rc = -EBADE;
441 		goto out;
442 	}
443 
444 	rc = copy_from_user(&stuser->cmd, data, sizeof(stuser->cmd));
445 	if (rc) {
446 		rc = -EFAULT;
447 		goto out;
448 	}
449 
450 	data += sizeof(stuser->cmd);
451 	rc = copy_from_user(&stuser->data, data, size - sizeof(stuser->cmd));
452 	if (rc) {
453 		rc = -EFAULT;
454 		goto out;
455 	}
456 
457 	rc = mrpc_queue_cmd(stuser);
458 
459 out:
460 	mutex_unlock(&stdev->mrpc_mutex);
461 
462 	if (rc)
463 		return rc;
464 
465 	return size;
466 }
467 
468 static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
469 				  size_t size, loff_t *off)
470 {
471 	struct switchtec_user *stuser = filp->private_data;
472 	struct switchtec_dev *stdev = stuser->stdev;
473 	int rc;
474 
475 	if (size < sizeof(stuser->cmd) ||
476 	    size > sizeof(stuser->cmd) + sizeof(stuser->data))
477 		return -EINVAL;
478 
479 	rc = lock_mutex_and_test_alive(stdev);
480 	if (rc)
481 		return rc;
482 
483 	if (stuser->state == MRPC_IDLE) {
484 		mutex_unlock(&stdev->mrpc_mutex);
485 		return -EBADE;
486 	}
487 
488 	stuser->read_len = size - sizeof(stuser->return_code);
489 
490 	mutex_unlock(&stdev->mrpc_mutex);
491 
492 	if (filp->f_flags & O_NONBLOCK) {
493 		if (!try_wait_for_completion(&stuser->comp))
494 			return -EAGAIN;
495 	} else {
496 		rc = wait_for_completion_interruptible(&stuser->comp);
497 		if (rc < 0)
498 			return rc;
499 	}
500 
501 	rc = lock_mutex_and_test_alive(stdev);
502 	if (rc)
503 		return rc;
504 
505 	if (stuser->state != MRPC_DONE) {
506 		mutex_unlock(&stdev->mrpc_mutex);
507 		return -EBADE;
508 	}
509 
510 	rc = copy_to_user(data, &stuser->return_code,
511 			  sizeof(stuser->return_code));
512 	if (rc) {
513 		rc = -EFAULT;
514 		goto out;
515 	}
516 
517 	data += sizeof(stuser->return_code);
518 	rc = copy_to_user(data, &stuser->data,
519 			  size - sizeof(stuser->return_code));
520 	if (rc) {
521 		rc = -EFAULT;
522 		goto out;
523 	}
524 
525 	stuser_set_state(stuser, MRPC_IDLE);
526 
527 out:
528 	mutex_unlock(&stdev->mrpc_mutex);
529 
530 	if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE)
531 		return size;
532 	else if (stuser->status == SWITCHTEC_MRPC_STATUS_INTERRUPTED)
533 		return -ENXIO;
534 	else
535 		return -EBADMSG;
536 }
537 
538 static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait)
539 {
540 	struct switchtec_user *stuser = filp->private_data;
541 	struct switchtec_dev *stdev = stuser->stdev;
542 	__poll_t ret = 0;
543 
544 	poll_wait(filp, &stuser->comp.wait, wait);
545 	poll_wait(filp, &stdev->event_wq, wait);
546 
547 	if (lock_mutex_and_test_alive(stdev))
548 		return EPOLLIN | EPOLLRDHUP | EPOLLOUT | EPOLLERR | EPOLLHUP;
549 
550 	mutex_unlock(&stdev->mrpc_mutex);
551 
552 	if (try_wait_for_completion(&stuser->comp))
553 		ret |= EPOLLIN | EPOLLRDNORM;
554 
555 	if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
556 		ret |= EPOLLPRI | EPOLLRDBAND;
557 
558 	return ret;
559 }
560 
561 static int ioctl_flash_info(struct switchtec_dev *stdev,
562 			    struct switchtec_ioctl_flash_info __user *uinfo)
563 {
564 	struct switchtec_ioctl_flash_info info = {0};
565 	struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
566 
567 	info.flash_length = ioread32(&fi->flash_length);
568 	info.num_partitions = SWITCHTEC_IOCTL_NUM_PARTITIONS;
569 
570 	if (copy_to_user(uinfo, &info, sizeof(info)))
571 		return -EFAULT;
572 
573 	return 0;
574 }
575 
576 static void set_fw_info_part(struct switchtec_ioctl_flash_part_info *info,
577 			     struct partition_info __iomem *pi)
578 {
579 	info->address = ioread32(&pi->address);
580 	info->length = ioread32(&pi->length);
581 }
582 
583 static int ioctl_flash_part_info(struct switchtec_dev *stdev,
584 	struct switchtec_ioctl_flash_part_info __user *uinfo)
585 {
586 	struct switchtec_ioctl_flash_part_info info = {0};
587 	struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
588 	struct sys_info_regs __iomem *si = stdev->mmio_sys_info;
589 	u32 active_addr = -1;
590 
591 	if (copy_from_user(&info, uinfo, sizeof(info)))
592 		return -EFAULT;
593 
594 	switch (info.flash_partition) {
595 	case SWITCHTEC_IOCTL_PART_CFG0:
596 		active_addr = ioread32(&fi->active_cfg);
597 		set_fw_info_part(&info, &fi->cfg0);
598 		if (ioread16(&si->cfg_running) == SWITCHTEC_CFG0_RUNNING)
599 			info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
600 		break;
601 	case SWITCHTEC_IOCTL_PART_CFG1:
602 		active_addr = ioread32(&fi->active_cfg);
603 		set_fw_info_part(&info, &fi->cfg1);
604 		if (ioread16(&si->cfg_running) == SWITCHTEC_CFG1_RUNNING)
605 			info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
606 		break;
607 	case SWITCHTEC_IOCTL_PART_IMG0:
608 		active_addr = ioread32(&fi->active_img);
609 		set_fw_info_part(&info, &fi->img0);
610 		if (ioread16(&si->img_running) == SWITCHTEC_IMG0_RUNNING)
611 			info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
612 		break;
613 	case SWITCHTEC_IOCTL_PART_IMG1:
614 		active_addr = ioread32(&fi->active_img);
615 		set_fw_info_part(&info, &fi->img1);
616 		if (ioread16(&si->img_running) == SWITCHTEC_IMG1_RUNNING)
617 			info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
618 		break;
619 	case SWITCHTEC_IOCTL_PART_NVLOG:
620 		set_fw_info_part(&info, &fi->nvlog);
621 		break;
622 	case SWITCHTEC_IOCTL_PART_VENDOR0:
623 		set_fw_info_part(&info, &fi->vendor[0]);
624 		break;
625 	case SWITCHTEC_IOCTL_PART_VENDOR1:
626 		set_fw_info_part(&info, &fi->vendor[1]);
627 		break;
628 	case SWITCHTEC_IOCTL_PART_VENDOR2:
629 		set_fw_info_part(&info, &fi->vendor[2]);
630 		break;
631 	case SWITCHTEC_IOCTL_PART_VENDOR3:
632 		set_fw_info_part(&info, &fi->vendor[3]);
633 		break;
634 	case SWITCHTEC_IOCTL_PART_VENDOR4:
635 		set_fw_info_part(&info, &fi->vendor[4]);
636 		break;
637 	case SWITCHTEC_IOCTL_PART_VENDOR5:
638 		set_fw_info_part(&info, &fi->vendor[5]);
639 		break;
640 	case SWITCHTEC_IOCTL_PART_VENDOR6:
641 		set_fw_info_part(&info, &fi->vendor[6]);
642 		break;
643 	case SWITCHTEC_IOCTL_PART_VENDOR7:
644 		set_fw_info_part(&info, &fi->vendor[7]);
645 		break;
646 	default:
647 		return -EINVAL;
648 	}
649 
650 	if (info.address == active_addr)
651 		info.active |= SWITCHTEC_IOCTL_PART_ACTIVE;
652 
653 	if (copy_to_user(uinfo, &info, sizeof(info)))
654 		return -EFAULT;
655 
656 	return 0;
657 }
658 
659 static int ioctl_event_summary(struct switchtec_dev *stdev,
660 	struct switchtec_user *stuser,
661 	struct switchtec_ioctl_event_summary __user *usum,
662 	size_t size)
663 {
664 	struct switchtec_ioctl_event_summary *s;
665 	int i;
666 	u32 reg;
667 	int ret = 0;
668 
669 	s = kzalloc(sizeof(*s), GFP_KERNEL);
670 	if (!s)
671 		return -ENOMEM;
672 
673 	s->global = ioread32(&stdev->mmio_sw_event->global_summary);
674 	s->part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap);
675 	s->local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
676 
677 	for (i = 0; i < stdev->partition_count; i++) {
678 		reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary);
679 		s->part[i] = reg;
680 	}
681 
682 	for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
683 		reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
684 		if (reg != PCI_VENDOR_ID_MICROSEMI)
685 			break;
686 
687 		reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
688 		s->pff[i] = reg;
689 	}
690 
691 	if (copy_to_user(usum, s, size)) {
692 		ret = -EFAULT;
693 		goto error_case;
694 	}
695 
696 	stuser->event_cnt = atomic_read(&stdev->event_cnt);
697 
698 error_case:
699 	kfree(s);
700 	return ret;
701 }
702 
703 static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev,
704 				  size_t offset, int index)
705 {
706 	return (void __iomem *)stdev->mmio_sw_event + offset;
707 }
708 
709 static u32 __iomem *part_ev_reg(struct switchtec_dev *stdev,
710 				size_t offset, int index)
711 {
712 	return (void __iomem *)&stdev->mmio_part_cfg_all[index] + offset;
713 }
714 
715 static u32 __iomem *pff_ev_reg(struct switchtec_dev *stdev,
716 			       size_t offset, int index)
717 {
718 	return (void __iomem *)&stdev->mmio_pff_csr[index] + offset;
719 }
720 
721 #define EV_GLB(i, r)[i] = {offsetof(struct sw_event_regs, r), global_ev_reg}
722 #define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg}
723 #define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg}
724 
725 static const struct event_reg {
726 	size_t offset;
727 	u32 __iomem *(*map_reg)(struct switchtec_dev *stdev,
728 				size_t offset, int index);
729 } event_regs[] = {
730 	EV_GLB(SWITCHTEC_IOCTL_EVENT_STACK_ERROR, stack_error_event_hdr),
731 	EV_GLB(SWITCHTEC_IOCTL_EVENT_PPU_ERROR, ppu_error_event_hdr),
732 	EV_GLB(SWITCHTEC_IOCTL_EVENT_ISP_ERROR, isp_error_event_hdr),
733 	EV_GLB(SWITCHTEC_IOCTL_EVENT_SYS_RESET, sys_reset_event_hdr),
734 	EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_EXC, fw_exception_hdr),
735 	EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NMI, fw_nmi_hdr),
736 	EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL, fw_non_fatal_hdr),
737 	EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_FATAL, fw_fatal_hdr),
738 	EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP, twi_mrpc_comp_hdr),
739 	EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC,
740 	       twi_mrpc_comp_async_hdr),
741 	EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP, cli_mrpc_comp_hdr),
742 	EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC,
743 	       cli_mrpc_comp_async_hdr),
744 	EV_GLB(SWITCHTEC_IOCTL_EVENT_GPIO_INT, gpio_interrupt_hdr),
745 	EV_GLB(SWITCHTEC_IOCTL_EVENT_GFMS, gfms_event_hdr),
746 	EV_PAR(SWITCHTEC_IOCTL_EVENT_PART_RESET, part_reset_hdr),
747 	EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP, mrpc_comp_hdr),
748 	EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC, mrpc_comp_async_hdr),
749 	EV_PAR(SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP, dyn_binding_hdr),
750 	EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_P2P, aer_in_p2p_hdr),
751 	EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_VEP, aer_in_vep_hdr),
752 	EV_PFF(SWITCHTEC_IOCTL_EVENT_DPC, dpc_hdr),
753 	EV_PFF(SWITCHTEC_IOCTL_EVENT_CTS, cts_hdr),
754 	EV_PFF(SWITCHTEC_IOCTL_EVENT_HOTPLUG, hotplug_hdr),
755 	EV_PFF(SWITCHTEC_IOCTL_EVENT_IER, ier_hdr),
756 	EV_PFF(SWITCHTEC_IOCTL_EVENT_THRESH, threshold_hdr),
757 	EV_PFF(SWITCHTEC_IOCTL_EVENT_POWER_MGMT, power_mgmt_hdr),
758 	EV_PFF(SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING, tlp_throttling_hdr),
759 	EV_PFF(SWITCHTEC_IOCTL_EVENT_FORCE_SPEED, force_speed_hdr),
760 	EV_PFF(SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT, credit_timeout_hdr),
761 	EV_PFF(SWITCHTEC_IOCTL_EVENT_LINK_STATE, link_state_hdr),
762 };
763 
764 static u32 __iomem *event_hdr_addr(struct switchtec_dev *stdev,
765 				   int event_id, int index)
766 {
767 	size_t off;
768 
769 	if (event_id < 0 || event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
770 		return ERR_PTR(-EINVAL);
771 
772 	off = event_regs[event_id].offset;
773 
774 	if (event_regs[event_id].map_reg == part_ev_reg) {
775 		if (index == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
776 			index = stdev->partition;
777 		else if (index < 0 || index >= stdev->partition_count)
778 			return ERR_PTR(-EINVAL);
779 	} else if (event_regs[event_id].map_reg == pff_ev_reg) {
780 		if (index < 0 || index >= stdev->pff_csr_count)
781 			return ERR_PTR(-EINVAL);
782 	}
783 
784 	return event_regs[event_id].map_reg(stdev, off, index);
785 }
786 
787 static int event_ctl(struct switchtec_dev *stdev,
788 		     struct switchtec_ioctl_event_ctl *ctl)
789 {
790 	int i;
791 	u32 __iomem *reg;
792 	u32 hdr;
793 
794 	reg = event_hdr_addr(stdev, ctl->event_id, ctl->index);
795 	if (IS_ERR(reg))
796 		return PTR_ERR(reg);
797 
798 	hdr = ioread32(reg);
799 	for (i = 0; i < ARRAY_SIZE(ctl->data); i++)
800 		ctl->data[i] = ioread32(&reg[i + 1]);
801 
802 	ctl->occurred = hdr & SWITCHTEC_EVENT_OCCURRED;
803 	ctl->count = (hdr >> 5) & 0xFF;
804 
805 	if (!(ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR))
806 		hdr &= ~SWITCHTEC_EVENT_CLEAR;
807 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL)
808 		hdr |= SWITCHTEC_EVENT_EN_IRQ;
809 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL)
810 		hdr &= ~SWITCHTEC_EVENT_EN_IRQ;
811 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG)
812 		hdr |= SWITCHTEC_EVENT_EN_LOG;
813 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG)
814 		hdr &= ~SWITCHTEC_EVENT_EN_LOG;
815 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI)
816 		hdr |= SWITCHTEC_EVENT_EN_CLI;
817 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI)
818 		hdr &= ~SWITCHTEC_EVENT_EN_CLI;
819 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL)
820 		hdr |= SWITCHTEC_EVENT_FATAL;
821 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL)
822 		hdr &= ~SWITCHTEC_EVENT_FATAL;
823 
824 	if (ctl->flags)
825 		iowrite32(hdr, reg);
826 
827 	ctl->flags = 0;
828 	if (hdr & SWITCHTEC_EVENT_EN_IRQ)
829 		ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL;
830 	if (hdr & SWITCHTEC_EVENT_EN_LOG)
831 		ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG;
832 	if (hdr & SWITCHTEC_EVENT_EN_CLI)
833 		ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI;
834 	if (hdr & SWITCHTEC_EVENT_FATAL)
835 		ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL;
836 
837 	return 0;
838 }
839 
840 static int ioctl_event_ctl(struct switchtec_dev *stdev,
841 	struct switchtec_ioctl_event_ctl __user *uctl)
842 {
843 	int ret;
844 	int nr_idxs;
845 	unsigned int event_flags;
846 	struct switchtec_ioctl_event_ctl ctl;
847 
848 	if (copy_from_user(&ctl, uctl, sizeof(ctl)))
849 		return -EFAULT;
850 
851 	if (ctl.event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
852 		return -EINVAL;
853 
854 	if (ctl.flags & SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED)
855 		return -EINVAL;
856 
857 	if (ctl.index == SWITCHTEC_IOCTL_EVENT_IDX_ALL) {
858 		if (event_regs[ctl.event_id].map_reg == global_ev_reg)
859 			nr_idxs = 1;
860 		else if (event_regs[ctl.event_id].map_reg == part_ev_reg)
861 			nr_idxs = stdev->partition_count;
862 		else if (event_regs[ctl.event_id].map_reg == pff_ev_reg)
863 			nr_idxs = stdev->pff_csr_count;
864 		else
865 			return -EINVAL;
866 
867 		event_flags = ctl.flags;
868 		for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) {
869 			ctl.flags = event_flags;
870 			ret = event_ctl(stdev, &ctl);
871 			if (ret < 0)
872 				return ret;
873 		}
874 	} else {
875 		ret = event_ctl(stdev, &ctl);
876 		if (ret < 0)
877 			return ret;
878 	}
879 
880 	if (copy_to_user(uctl, &ctl, sizeof(ctl)))
881 		return -EFAULT;
882 
883 	return 0;
884 }
885 
886 static int ioctl_pff_to_port(struct switchtec_dev *stdev,
887 			     struct switchtec_ioctl_pff_port *up)
888 {
889 	int i, part;
890 	u32 reg;
891 	struct part_cfg_regs *pcfg;
892 	struct switchtec_ioctl_pff_port p;
893 
894 	if (copy_from_user(&p, up, sizeof(p)))
895 		return -EFAULT;
896 
897 	p.port = -1;
898 	for (part = 0; part < stdev->partition_count; part++) {
899 		pcfg = &stdev->mmio_part_cfg_all[part];
900 		p.partition = part;
901 
902 		reg = ioread32(&pcfg->usp_pff_inst_id);
903 		if (reg == p.pff) {
904 			p.port = 0;
905 			break;
906 		}
907 
908 		reg = ioread32(&pcfg->vep_pff_inst_id);
909 		if (reg == p.pff) {
910 			p.port = SWITCHTEC_IOCTL_PFF_VEP;
911 			break;
912 		}
913 
914 		for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
915 			reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
916 			if (reg != p.pff)
917 				continue;
918 
919 			p.port = i + 1;
920 			break;
921 		}
922 
923 		if (p.port != -1)
924 			break;
925 	}
926 
927 	if (copy_to_user(up, &p, sizeof(p)))
928 		return -EFAULT;
929 
930 	return 0;
931 }
932 
933 static int ioctl_port_to_pff(struct switchtec_dev *stdev,
934 			     struct switchtec_ioctl_pff_port *up)
935 {
936 	struct switchtec_ioctl_pff_port p;
937 	struct part_cfg_regs *pcfg;
938 
939 	if (copy_from_user(&p, up, sizeof(p)))
940 		return -EFAULT;
941 
942 	if (p.partition == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
943 		pcfg = stdev->mmio_part_cfg;
944 	else if (p.partition < stdev->partition_count)
945 		pcfg = &stdev->mmio_part_cfg_all[p.partition];
946 	else
947 		return -EINVAL;
948 
949 	switch (p.port) {
950 	case 0:
951 		p.pff = ioread32(&pcfg->usp_pff_inst_id);
952 		break;
953 	case SWITCHTEC_IOCTL_PFF_VEP:
954 		p.pff = ioread32(&pcfg->vep_pff_inst_id);
955 		break;
956 	default:
957 		if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
958 			return -EINVAL;
959 		p.port = array_index_nospec(p.port,
960 					ARRAY_SIZE(pcfg->dsp_pff_inst_id) + 1);
961 		p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
962 		break;
963 	}
964 
965 	if (copy_to_user(up, &p, sizeof(p)))
966 		return -EFAULT;
967 
968 	return 0;
969 }
970 
971 static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd,
972 				unsigned long arg)
973 {
974 	struct switchtec_user *stuser = filp->private_data;
975 	struct switchtec_dev *stdev = stuser->stdev;
976 	int rc;
977 	void __user *argp = (void __user *)arg;
978 
979 	rc = lock_mutex_and_test_alive(stdev);
980 	if (rc)
981 		return rc;
982 
983 	switch (cmd) {
984 	case SWITCHTEC_IOCTL_FLASH_INFO:
985 		rc = ioctl_flash_info(stdev, argp);
986 		break;
987 	case SWITCHTEC_IOCTL_FLASH_PART_INFO:
988 		rc = ioctl_flash_part_info(stdev, argp);
989 		break;
990 	case SWITCHTEC_IOCTL_EVENT_SUMMARY_LEGACY:
991 		rc = ioctl_event_summary(stdev, stuser, argp,
992 					 sizeof(struct switchtec_ioctl_event_summary_legacy));
993 		break;
994 	case SWITCHTEC_IOCTL_EVENT_CTL:
995 		rc = ioctl_event_ctl(stdev, argp);
996 		break;
997 	case SWITCHTEC_IOCTL_PFF_TO_PORT:
998 		rc = ioctl_pff_to_port(stdev, argp);
999 		break;
1000 	case SWITCHTEC_IOCTL_PORT_TO_PFF:
1001 		rc = ioctl_port_to_pff(stdev, argp);
1002 		break;
1003 	case SWITCHTEC_IOCTL_EVENT_SUMMARY:
1004 		rc = ioctl_event_summary(stdev, stuser, argp,
1005 					 sizeof(struct switchtec_ioctl_event_summary));
1006 		break;
1007 	default:
1008 		rc = -ENOTTY;
1009 		break;
1010 	}
1011 
1012 	mutex_unlock(&stdev->mrpc_mutex);
1013 	return rc;
1014 }
1015 
1016 static const struct file_operations switchtec_fops = {
1017 	.owner = THIS_MODULE,
1018 	.open = switchtec_dev_open,
1019 	.release = switchtec_dev_release,
1020 	.write = switchtec_dev_write,
1021 	.read = switchtec_dev_read,
1022 	.poll = switchtec_dev_poll,
1023 	.unlocked_ioctl = switchtec_dev_ioctl,
1024 	.compat_ioctl = switchtec_dev_ioctl,
1025 };
1026 
1027 static void link_event_work(struct work_struct *work)
1028 {
1029 	struct switchtec_dev *stdev;
1030 
1031 	stdev = container_of(work, struct switchtec_dev, link_event_work);
1032 
1033 	if (stdev->link_notifier)
1034 		stdev->link_notifier(stdev);
1035 }
1036 
1037 static void check_link_state_events(struct switchtec_dev *stdev)
1038 {
1039 	int idx;
1040 	u32 reg;
1041 	int count;
1042 	int occurred = 0;
1043 
1044 	for (idx = 0; idx < stdev->pff_csr_count; idx++) {
1045 		reg = ioread32(&stdev->mmio_pff_csr[idx].link_state_hdr);
1046 		dev_dbg(&stdev->dev, "link_state: %d->%08x\n", idx, reg);
1047 		count = (reg >> 5) & 0xFF;
1048 
1049 		if (count != stdev->link_event_count[idx]) {
1050 			occurred = 1;
1051 			stdev->link_event_count[idx] = count;
1052 		}
1053 	}
1054 
1055 	if (occurred)
1056 		schedule_work(&stdev->link_event_work);
1057 }
1058 
1059 static void enable_link_state_events(struct switchtec_dev *stdev)
1060 {
1061 	int idx;
1062 
1063 	for (idx = 0; idx < stdev->pff_csr_count; idx++) {
1064 		iowrite32(SWITCHTEC_EVENT_CLEAR |
1065 			  SWITCHTEC_EVENT_EN_IRQ,
1066 			  &stdev->mmio_pff_csr[idx].link_state_hdr);
1067 	}
1068 }
1069 
1070 static void enable_dma_mrpc(struct switchtec_dev *stdev)
1071 {
1072 	writeq(stdev->dma_mrpc_dma_addr, &stdev->mmio_mrpc->dma_addr);
1073 	flush_wc_buf(stdev);
1074 	iowrite32(SWITCHTEC_DMA_MRPC_EN, &stdev->mmio_mrpc->dma_en);
1075 }
1076 
1077 static void stdev_release(struct device *dev)
1078 {
1079 	struct switchtec_dev *stdev = to_stdev(dev);
1080 
1081 	if (stdev->dma_mrpc) {
1082 		iowrite32(0, &stdev->mmio_mrpc->dma_en);
1083 		flush_wc_buf(stdev);
1084 		writeq(0, &stdev->mmio_mrpc->dma_addr);
1085 		dma_free_coherent(&stdev->pdev->dev, sizeof(*stdev->dma_mrpc),
1086 				stdev->dma_mrpc, stdev->dma_mrpc_dma_addr);
1087 	}
1088 	kfree(stdev);
1089 }
1090 
1091 static void stdev_kill(struct switchtec_dev *stdev)
1092 {
1093 	struct switchtec_user *stuser, *tmpuser;
1094 
1095 	pci_clear_master(stdev->pdev);
1096 
1097 	cancel_delayed_work_sync(&stdev->mrpc_timeout);
1098 
1099 	/* Mark the hardware as unavailable and complete all completions */
1100 	mutex_lock(&stdev->mrpc_mutex);
1101 	stdev->alive = false;
1102 
1103 	/* Wake up and kill any users waiting on an MRPC request */
1104 	list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
1105 		complete_all(&stuser->comp);
1106 		list_del_init(&stuser->list);
1107 		stuser_put(stuser);
1108 	}
1109 
1110 	mutex_unlock(&stdev->mrpc_mutex);
1111 
1112 	/* Wake up any users waiting on event_wq */
1113 	wake_up_interruptible(&stdev->event_wq);
1114 }
1115 
1116 static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
1117 {
1118 	struct switchtec_dev *stdev;
1119 	int minor;
1120 	struct device *dev;
1121 	struct cdev *cdev;
1122 	int rc;
1123 
1124 	stdev = kzalloc_node(sizeof(*stdev), GFP_KERNEL,
1125 			     dev_to_node(&pdev->dev));
1126 	if (!stdev)
1127 		return ERR_PTR(-ENOMEM);
1128 
1129 	stdev->alive = true;
1130 	stdev->pdev = pdev;
1131 	INIT_LIST_HEAD(&stdev->mrpc_queue);
1132 	mutex_init(&stdev->mrpc_mutex);
1133 	stdev->mrpc_busy = 0;
1134 	INIT_WORK(&stdev->mrpc_work, mrpc_event_work);
1135 	INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work);
1136 	INIT_WORK(&stdev->link_event_work, link_event_work);
1137 	init_waitqueue_head(&stdev->event_wq);
1138 	atomic_set(&stdev->event_cnt, 0);
1139 
1140 	dev = &stdev->dev;
1141 	device_initialize(dev);
1142 	dev->class = switchtec_class;
1143 	dev->parent = &pdev->dev;
1144 	dev->groups = switchtec_device_groups;
1145 	dev->release = stdev_release;
1146 
1147 	minor = ida_simple_get(&switchtec_minor_ida, 0, 0,
1148 			       GFP_KERNEL);
1149 	if (minor < 0) {
1150 		rc = minor;
1151 		goto err_put;
1152 	}
1153 
1154 	dev->devt = MKDEV(MAJOR(switchtec_devt), minor);
1155 	dev_set_name(dev, "switchtec%d", minor);
1156 
1157 	cdev = &stdev->cdev;
1158 	cdev_init(cdev, &switchtec_fops);
1159 	cdev->owner = THIS_MODULE;
1160 
1161 	return stdev;
1162 
1163 err_put:
1164 	put_device(&stdev->dev);
1165 	return ERR_PTR(rc);
1166 }
1167 
1168 static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
1169 {
1170 	size_t off = event_regs[eid].offset;
1171 	u32 __iomem *hdr_reg;
1172 	u32 hdr;
1173 
1174 	hdr_reg = event_regs[eid].map_reg(stdev, off, idx);
1175 	hdr = ioread32(hdr_reg);
1176 
1177 	if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
1178 		return 0;
1179 
1180 	if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE ||
1181 	    eid == SWITCHTEC_IOCTL_EVENT_MRPC_COMP)
1182 		return 0;
1183 
1184 	dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
1185 	hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
1186 	iowrite32(hdr, hdr_reg);
1187 
1188 	return 1;
1189 }
1190 
1191 static int mask_all_events(struct switchtec_dev *stdev, int eid)
1192 {
1193 	int idx;
1194 	int count = 0;
1195 
1196 	if (event_regs[eid].map_reg == part_ev_reg) {
1197 		for (idx = 0; idx < stdev->partition_count; idx++)
1198 			count += mask_event(stdev, eid, idx);
1199 	} else if (event_regs[eid].map_reg == pff_ev_reg) {
1200 		for (idx = 0; idx < stdev->pff_csr_count; idx++) {
1201 			if (!stdev->pff_local[idx])
1202 				continue;
1203 
1204 			count += mask_event(stdev, eid, idx);
1205 		}
1206 	} else {
1207 		count += mask_event(stdev, eid, 0);
1208 	}
1209 
1210 	return count;
1211 }
1212 
1213 static irqreturn_t switchtec_event_isr(int irq, void *dev)
1214 {
1215 	struct switchtec_dev *stdev = dev;
1216 	u32 reg;
1217 	irqreturn_t ret = IRQ_NONE;
1218 	int eid, event_count = 0;
1219 
1220 	reg = ioread32(&stdev->mmio_part_cfg->mrpc_comp_hdr);
1221 	if (reg & SWITCHTEC_EVENT_OCCURRED) {
1222 		dev_dbg(&stdev->dev, "%s: mrpc comp\n", __func__);
1223 		ret = IRQ_HANDLED;
1224 		schedule_work(&stdev->mrpc_work);
1225 		iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr);
1226 	}
1227 
1228 	check_link_state_events(stdev);
1229 
1230 	for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++)
1231 		event_count += mask_all_events(stdev, eid);
1232 
1233 	if (event_count) {
1234 		atomic_inc(&stdev->event_cnt);
1235 		wake_up_interruptible(&stdev->event_wq);
1236 		dev_dbg(&stdev->dev, "%s: %d events\n", __func__,
1237 			event_count);
1238 		return IRQ_HANDLED;
1239 	}
1240 
1241 	return ret;
1242 }
1243 
1244 
1245 static irqreturn_t switchtec_dma_mrpc_isr(int irq, void *dev)
1246 {
1247 	struct switchtec_dev *stdev = dev;
1248 	irqreturn_t ret = IRQ_NONE;
1249 
1250 	iowrite32(SWITCHTEC_EVENT_CLEAR |
1251 		  SWITCHTEC_EVENT_EN_IRQ,
1252 		  &stdev->mmio_part_cfg->mrpc_comp_hdr);
1253 	schedule_work(&stdev->mrpc_work);
1254 
1255 	ret = IRQ_HANDLED;
1256 	return ret;
1257 }
1258 
1259 static int switchtec_init_isr(struct switchtec_dev *stdev)
1260 {
1261 	int nvecs;
1262 	int event_irq;
1263 	int dma_mrpc_irq;
1264 	int rc;
1265 
1266 	nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, 4,
1267 				      PCI_IRQ_MSIX | PCI_IRQ_MSI);
1268 	if (nvecs < 0)
1269 		return nvecs;
1270 
1271 	event_irq = ioread32(&stdev->mmio_part_cfg->vep_vector_number);
1272 	if (event_irq < 0 || event_irq >= nvecs)
1273 		return -EFAULT;
1274 
1275 	event_irq = pci_irq_vector(stdev->pdev, event_irq);
1276 	if (event_irq < 0)
1277 		return event_irq;
1278 
1279 	rc = devm_request_irq(&stdev->pdev->dev, event_irq,
1280 				switchtec_event_isr, 0,
1281 				KBUILD_MODNAME, stdev);
1282 
1283 	if (rc)
1284 		return rc;
1285 
1286 	if (!stdev->dma_mrpc)
1287 		return rc;
1288 
1289 	dma_mrpc_irq = ioread32(&stdev->mmio_mrpc->dma_vector);
1290 	if (dma_mrpc_irq < 0 || dma_mrpc_irq >= nvecs)
1291 		return -EFAULT;
1292 
1293 	dma_mrpc_irq  = pci_irq_vector(stdev->pdev, dma_mrpc_irq);
1294 	if (dma_mrpc_irq < 0)
1295 		return dma_mrpc_irq;
1296 
1297 	rc = devm_request_irq(&stdev->pdev->dev, dma_mrpc_irq,
1298 				switchtec_dma_mrpc_isr, 0,
1299 				KBUILD_MODNAME, stdev);
1300 
1301 	return rc;
1302 }
1303 
1304 static void init_pff(struct switchtec_dev *stdev)
1305 {
1306 	int i;
1307 	u32 reg;
1308 	struct part_cfg_regs *pcfg = stdev->mmio_part_cfg;
1309 
1310 	for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
1311 		reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
1312 		if (reg != PCI_VENDOR_ID_MICROSEMI)
1313 			break;
1314 	}
1315 
1316 	stdev->pff_csr_count = i;
1317 
1318 	reg = ioread32(&pcfg->usp_pff_inst_id);
1319 	if (reg < SWITCHTEC_MAX_PFF_CSR)
1320 		stdev->pff_local[reg] = 1;
1321 
1322 	reg = ioread32(&pcfg->vep_pff_inst_id);
1323 	if (reg < SWITCHTEC_MAX_PFF_CSR)
1324 		stdev->pff_local[reg] = 1;
1325 
1326 	for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
1327 		reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
1328 		if (reg < SWITCHTEC_MAX_PFF_CSR)
1329 			stdev->pff_local[reg] = 1;
1330 	}
1331 }
1332 
1333 static int switchtec_init_pci(struct switchtec_dev *stdev,
1334 			      struct pci_dev *pdev)
1335 {
1336 	int rc;
1337 	void __iomem *map;
1338 	unsigned long res_start, res_len;
1339 
1340 	rc = pcim_enable_device(pdev);
1341 	if (rc)
1342 		return rc;
1343 
1344 	rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1345 	if (rc)
1346 		return rc;
1347 
1348 	pci_set_master(pdev);
1349 
1350 	res_start = pci_resource_start(pdev, 0);
1351 	res_len = pci_resource_len(pdev, 0);
1352 
1353 	if (!devm_request_mem_region(&pdev->dev, res_start,
1354 				     res_len, KBUILD_MODNAME))
1355 		return -EBUSY;
1356 
1357 	stdev->mmio_mrpc = devm_ioremap_wc(&pdev->dev, res_start,
1358 					   SWITCHTEC_GAS_TOP_CFG_OFFSET);
1359 	if (!stdev->mmio_mrpc)
1360 		return -ENOMEM;
1361 
1362 	map = devm_ioremap(&pdev->dev,
1363 			   res_start + SWITCHTEC_GAS_TOP_CFG_OFFSET,
1364 			   res_len - SWITCHTEC_GAS_TOP_CFG_OFFSET);
1365 	if (!map)
1366 		return -ENOMEM;
1367 
1368 	stdev->mmio = map - SWITCHTEC_GAS_TOP_CFG_OFFSET;
1369 	stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET;
1370 	stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
1371 	stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
1372 	stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
1373 	stdev->partition = ioread8(&stdev->mmio_sys_info->partition_id);
1374 	stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
1375 	stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
1376 	stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
1377 	stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET;
1378 
1379 	if (stdev->partition_count < 1)
1380 		stdev->partition_count = 1;
1381 
1382 	init_pff(stdev);
1383 
1384 	pci_set_drvdata(pdev, stdev);
1385 
1386 	if (!use_dma_mrpc)
1387 		return 0;
1388 
1389 	if (ioread32(&stdev->mmio_mrpc->dma_ver) == 0)
1390 		return 0;
1391 
1392 	stdev->dma_mrpc = dma_alloc_coherent(&stdev->pdev->dev,
1393 					     sizeof(*stdev->dma_mrpc),
1394 					     &stdev->dma_mrpc_dma_addr,
1395 					     GFP_KERNEL);
1396 	if (stdev->dma_mrpc == NULL)
1397 		return -ENOMEM;
1398 
1399 	return 0;
1400 }
1401 
1402 static int switchtec_pci_probe(struct pci_dev *pdev,
1403 			       const struct pci_device_id *id)
1404 {
1405 	struct switchtec_dev *stdev;
1406 	int rc;
1407 
1408 	if (pdev->class == (PCI_CLASS_BRIDGE_OTHER << 8))
1409 		request_module_nowait("ntb_hw_switchtec");
1410 
1411 	stdev = stdev_create(pdev);
1412 	if (IS_ERR(stdev))
1413 		return PTR_ERR(stdev);
1414 
1415 	rc = switchtec_init_pci(stdev, pdev);
1416 	if (rc)
1417 		goto err_put;
1418 
1419 	rc = switchtec_init_isr(stdev);
1420 	if (rc) {
1421 		dev_err(&stdev->dev, "failed to init isr.\n");
1422 		goto err_put;
1423 	}
1424 
1425 	iowrite32(SWITCHTEC_EVENT_CLEAR |
1426 		  SWITCHTEC_EVENT_EN_IRQ,
1427 		  &stdev->mmio_part_cfg->mrpc_comp_hdr);
1428 	enable_link_state_events(stdev);
1429 
1430 	if (stdev->dma_mrpc)
1431 		enable_dma_mrpc(stdev);
1432 
1433 	rc = cdev_device_add(&stdev->cdev, &stdev->dev);
1434 	if (rc)
1435 		goto err_devadd;
1436 
1437 	dev_info(&stdev->dev, "Management device registered.\n");
1438 
1439 	return 0;
1440 
1441 err_devadd:
1442 	stdev_kill(stdev);
1443 err_put:
1444 	ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
1445 	put_device(&stdev->dev);
1446 	return rc;
1447 }
1448 
1449 static void switchtec_pci_remove(struct pci_dev *pdev)
1450 {
1451 	struct switchtec_dev *stdev = pci_get_drvdata(pdev);
1452 
1453 	pci_set_drvdata(pdev, NULL);
1454 
1455 	cdev_device_del(&stdev->cdev, &stdev->dev);
1456 	ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
1457 	dev_info(&stdev->dev, "unregistered.\n");
1458 	stdev_kill(stdev);
1459 	put_device(&stdev->dev);
1460 }
1461 
1462 #define SWITCHTEC_PCI_DEVICE(device_id) \
1463 	{ \
1464 		.vendor     = PCI_VENDOR_ID_MICROSEMI, \
1465 		.device     = device_id, \
1466 		.subvendor  = PCI_ANY_ID, \
1467 		.subdevice  = PCI_ANY_ID, \
1468 		.class      = (PCI_CLASS_MEMORY_OTHER << 8), \
1469 		.class_mask = 0xFFFFFFFF, \
1470 	}, \
1471 	{ \
1472 		.vendor     = PCI_VENDOR_ID_MICROSEMI, \
1473 		.device     = device_id, \
1474 		.subvendor  = PCI_ANY_ID, \
1475 		.subdevice  = PCI_ANY_ID, \
1476 		.class      = (PCI_CLASS_BRIDGE_OTHER << 8), \
1477 		.class_mask = 0xFFFFFFFF, \
1478 	}
1479 
1480 static const struct pci_device_id switchtec_pci_tbl[] = {
1481 	SWITCHTEC_PCI_DEVICE(0x8531),  //PFX 24xG3
1482 	SWITCHTEC_PCI_DEVICE(0x8532),  //PFX 32xG3
1483 	SWITCHTEC_PCI_DEVICE(0x8533),  //PFX 48xG3
1484 	SWITCHTEC_PCI_DEVICE(0x8534),  //PFX 64xG3
1485 	SWITCHTEC_PCI_DEVICE(0x8535),  //PFX 80xG3
1486 	SWITCHTEC_PCI_DEVICE(0x8536),  //PFX 96xG3
1487 	SWITCHTEC_PCI_DEVICE(0x8541),  //PSX 24xG3
1488 	SWITCHTEC_PCI_DEVICE(0x8542),  //PSX 32xG3
1489 	SWITCHTEC_PCI_DEVICE(0x8543),  //PSX 48xG3
1490 	SWITCHTEC_PCI_DEVICE(0x8544),  //PSX 64xG3
1491 	SWITCHTEC_PCI_DEVICE(0x8545),  //PSX 80xG3
1492 	SWITCHTEC_PCI_DEVICE(0x8546),  //PSX 96xG3
1493 	SWITCHTEC_PCI_DEVICE(0x8551),  //PAX 24XG3
1494 	SWITCHTEC_PCI_DEVICE(0x8552),  //PAX 32XG3
1495 	SWITCHTEC_PCI_DEVICE(0x8553),  //PAX 48XG3
1496 	SWITCHTEC_PCI_DEVICE(0x8554),  //PAX 64XG3
1497 	SWITCHTEC_PCI_DEVICE(0x8555),  //PAX 80XG3
1498 	SWITCHTEC_PCI_DEVICE(0x8556),  //PAX 96XG3
1499 	SWITCHTEC_PCI_DEVICE(0x8561),  //PFXL 24XG3
1500 	SWITCHTEC_PCI_DEVICE(0x8562),  //PFXL 32XG3
1501 	SWITCHTEC_PCI_DEVICE(0x8563),  //PFXL 48XG3
1502 	SWITCHTEC_PCI_DEVICE(0x8564),  //PFXL 64XG3
1503 	SWITCHTEC_PCI_DEVICE(0x8565),  //PFXL 80XG3
1504 	SWITCHTEC_PCI_DEVICE(0x8566),  //PFXL 96XG3
1505 	SWITCHTEC_PCI_DEVICE(0x8571),  //PFXI 24XG3
1506 	SWITCHTEC_PCI_DEVICE(0x8572),  //PFXI 32XG3
1507 	SWITCHTEC_PCI_DEVICE(0x8573),  //PFXI 48XG3
1508 	SWITCHTEC_PCI_DEVICE(0x8574),  //PFXI 64XG3
1509 	SWITCHTEC_PCI_DEVICE(0x8575),  //PFXI 80XG3
1510 	SWITCHTEC_PCI_DEVICE(0x8576),  //PFXI 96XG3
1511 	{0}
1512 };
1513 MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
1514 
1515 static struct pci_driver switchtec_pci_driver = {
1516 	.name		= KBUILD_MODNAME,
1517 	.id_table	= switchtec_pci_tbl,
1518 	.probe		= switchtec_pci_probe,
1519 	.remove		= switchtec_pci_remove,
1520 };
1521 
1522 static int __init switchtec_init(void)
1523 {
1524 	int rc;
1525 
1526 	rc = alloc_chrdev_region(&switchtec_devt, 0, max_devices,
1527 				 "switchtec");
1528 	if (rc)
1529 		return rc;
1530 
1531 	switchtec_class = class_create(THIS_MODULE, "switchtec");
1532 	if (IS_ERR(switchtec_class)) {
1533 		rc = PTR_ERR(switchtec_class);
1534 		goto err_create_class;
1535 	}
1536 
1537 	rc = pci_register_driver(&switchtec_pci_driver);
1538 	if (rc)
1539 		goto err_pci_register;
1540 
1541 	pr_info(KBUILD_MODNAME ": loaded.\n");
1542 
1543 	return 0;
1544 
1545 err_pci_register:
1546 	class_destroy(switchtec_class);
1547 
1548 err_create_class:
1549 	unregister_chrdev_region(switchtec_devt, max_devices);
1550 
1551 	return rc;
1552 }
1553 module_init(switchtec_init);
1554 
1555 static void __exit switchtec_exit(void)
1556 {
1557 	pci_unregister_driver(&switchtec_pci_driver);
1558 	class_destroy(switchtec_class);
1559 	unregister_chrdev_region(switchtec_devt, max_devices);
1560 	ida_destroy(&switchtec_minor_ida);
1561 
1562 	pr_info(KBUILD_MODNAME ": unloaded.\n");
1563 }
1564 module_exit(switchtec_exit);
1565