xref: /openbmc/linux/drivers/pci/switch/switchtec.c (revision 6b5fc336)
1 /*
2  * Microsemi Switchtec(tm) PCIe Management Driver
3  * Copyright (c) 2017, Microsemi Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  */
15 
16 #include <linux/switchtec_ioctl.h>
17 
18 #include <linux/interrupt.h>
19 #include <linux/module.h>
20 #include <linux/fs.h>
21 #include <linux/uaccess.h>
22 #include <linux/poll.h>
23 #include <linux/pci.h>
24 #include <linux/cdev.h>
25 #include <linux/wait.h>
26 
27 MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
28 MODULE_VERSION("0.1");
29 MODULE_LICENSE("GPL");
30 MODULE_AUTHOR("Microsemi Corporation");
31 
32 static int max_devices = 16;
33 module_param(max_devices, int, 0644);
34 MODULE_PARM_DESC(max_devices, "max number of switchtec device instances");
35 
36 static dev_t switchtec_devt;
37 static struct class *switchtec_class;
38 static DEFINE_IDA(switchtec_minor_ida);
39 
40 #define MICROSEMI_VENDOR_ID         0x11f8
41 #define MICROSEMI_NTB_CLASSCODE     0x068000
42 #define MICROSEMI_MGMT_CLASSCODE    0x058000
43 
44 #define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024
45 #define SWITCHTEC_MAX_PFF_CSR 48
46 
47 #define SWITCHTEC_EVENT_OCCURRED BIT(0)
48 #define SWITCHTEC_EVENT_CLEAR    BIT(0)
49 #define SWITCHTEC_EVENT_EN_LOG   BIT(1)
50 #define SWITCHTEC_EVENT_EN_CLI   BIT(2)
51 #define SWITCHTEC_EVENT_EN_IRQ   BIT(3)
52 #define SWITCHTEC_EVENT_FATAL    BIT(4)
53 
54 enum {
55 	SWITCHTEC_GAS_MRPC_OFFSET       = 0x0000,
56 	SWITCHTEC_GAS_TOP_CFG_OFFSET    = 0x1000,
57 	SWITCHTEC_GAS_SW_EVENT_OFFSET   = 0x1800,
58 	SWITCHTEC_GAS_SYS_INFO_OFFSET   = 0x2000,
59 	SWITCHTEC_GAS_FLASH_INFO_OFFSET = 0x2200,
60 	SWITCHTEC_GAS_PART_CFG_OFFSET   = 0x4000,
61 	SWITCHTEC_GAS_NTB_OFFSET        = 0x10000,
62 	SWITCHTEC_GAS_PFF_CSR_OFFSET    = 0x134000,
63 };
64 
65 struct mrpc_regs {
66 	u8 input_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
67 	u8 output_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
68 	u32 cmd;
69 	u32 status;
70 	u32 ret_value;
71 } __packed;
72 
73 enum mrpc_status {
74 	SWITCHTEC_MRPC_STATUS_INPROGRESS = 1,
75 	SWITCHTEC_MRPC_STATUS_DONE = 2,
76 	SWITCHTEC_MRPC_STATUS_ERROR = 0xFF,
77 	SWITCHTEC_MRPC_STATUS_INTERRUPTED = 0x100,
78 };
79 
80 struct sw_event_regs {
81 	u64 event_report_ctrl;
82 	u64 reserved1;
83 	u64 part_event_bitmap;
84 	u64 reserved2;
85 	u32 global_summary;
86 	u32 reserved3[3];
87 	u32 stack_error_event_hdr;
88 	u32 stack_error_event_data;
89 	u32 reserved4[4];
90 	u32 ppu_error_event_hdr;
91 	u32 ppu_error_event_data;
92 	u32 reserved5[4];
93 	u32 isp_error_event_hdr;
94 	u32 isp_error_event_data;
95 	u32 reserved6[4];
96 	u32 sys_reset_event_hdr;
97 	u32 reserved7[5];
98 	u32 fw_exception_hdr;
99 	u32 reserved8[5];
100 	u32 fw_nmi_hdr;
101 	u32 reserved9[5];
102 	u32 fw_non_fatal_hdr;
103 	u32 reserved10[5];
104 	u32 fw_fatal_hdr;
105 	u32 reserved11[5];
106 	u32 twi_mrpc_comp_hdr;
107 	u32 twi_mrpc_comp_data;
108 	u32 reserved12[4];
109 	u32 twi_mrpc_comp_async_hdr;
110 	u32 twi_mrpc_comp_async_data;
111 	u32 reserved13[4];
112 	u32 cli_mrpc_comp_hdr;
113 	u32 cli_mrpc_comp_data;
114 	u32 reserved14[4];
115 	u32 cli_mrpc_comp_async_hdr;
116 	u32 cli_mrpc_comp_async_data;
117 	u32 reserved15[4];
118 	u32 gpio_interrupt_hdr;
119 	u32 gpio_interrupt_data;
120 	u32 reserved16[4];
121 } __packed;
122 
123 enum {
124 	SWITCHTEC_CFG0_RUNNING = 0x04,
125 	SWITCHTEC_CFG1_RUNNING = 0x05,
126 	SWITCHTEC_IMG0_RUNNING = 0x03,
127 	SWITCHTEC_IMG1_RUNNING = 0x07,
128 };
129 
130 struct sys_info_regs {
131 	u32 device_id;
132 	u32 device_version;
133 	u32 firmware_version;
134 	u32 reserved1;
135 	u32 vendor_table_revision;
136 	u32 table_format_version;
137 	u32 partition_id;
138 	u32 cfg_file_fmt_version;
139 	u16 cfg_running;
140 	u16 img_running;
141 	u32 reserved2[57];
142 	char vendor_id[8];
143 	char product_id[16];
144 	char product_revision[4];
145 	char component_vendor[8];
146 	u16 component_id;
147 	u8 component_revision;
148 } __packed;
149 
150 struct flash_info_regs {
151 	u32 flash_part_map_upd_idx;
152 
153 	struct active_partition_info {
154 		u32 address;
155 		u32 build_version;
156 		u32 build_string;
157 	} active_img;
158 
159 	struct active_partition_info active_cfg;
160 	struct active_partition_info inactive_img;
161 	struct active_partition_info inactive_cfg;
162 
163 	u32 flash_length;
164 
165 	struct partition_info {
166 		u32 address;
167 		u32 length;
168 	} cfg0;
169 
170 	struct partition_info cfg1;
171 	struct partition_info img0;
172 	struct partition_info img1;
173 	struct partition_info nvlog;
174 	struct partition_info vendor[8];
175 };
176 
177 struct ntb_info_regs {
178 	u8  partition_count;
179 	u8  partition_id;
180 	u16 reserved1;
181 	u64 ep_map;
182 	u16 requester_id;
183 } __packed;
184 
185 struct part_cfg_regs {
186 	u32 status;
187 	u32 state;
188 	u32 port_cnt;
189 	u32 usp_port_mode;
190 	u32 usp_pff_inst_id;
191 	u32 vep_pff_inst_id;
192 	u32 dsp_pff_inst_id[47];
193 	u32 reserved1[11];
194 	u16 vep_vector_number;
195 	u16 usp_vector_number;
196 	u32 port_event_bitmap;
197 	u32 reserved2[3];
198 	u32 part_event_summary;
199 	u32 reserved3[3];
200 	u32 part_reset_hdr;
201 	u32 part_reset_data[5];
202 	u32 mrpc_comp_hdr;
203 	u32 mrpc_comp_data[5];
204 	u32 mrpc_comp_async_hdr;
205 	u32 mrpc_comp_async_data[5];
206 	u32 dyn_binding_hdr;
207 	u32 dyn_binding_data[5];
208 	u32 reserved4[159];
209 } __packed;
210 
211 enum {
212 	SWITCHTEC_PART_CFG_EVENT_RESET = 1 << 0,
213 	SWITCHTEC_PART_CFG_EVENT_MRPC_CMP = 1 << 1,
214 	SWITCHTEC_PART_CFG_EVENT_MRPC_ASYNC_CMP = 1 << 2,
215 	SWITCHTEC_PART_CFG_EVENT_DYN_PART_CMP = 1 << 3,
216 };
217 
218 struct pff_csr_regs {
219 	u16 vendor_id;
220 	u16 device_id;
221 	u32 pci_cfg_header[15];
222 	u32 pci_cap_region[48];
223 	u32 pcie_cap_region[448];
224 	u32 indirect_gas_window[128];
225 	u32 indirect_gas_window_off;
226 	u32 reserved[127];
227 	u32 pff_event_summary;
228 	u32 reserved2[3];
229 	u32 aer_in_p2p_hdr;
230 	u32 aer_in_p2p_data[5];
231 	u32 aer_in_vep_hdr;
232 	u32 aer_in_vep_data[5];
233 	u32 dpc_hdr;
234 	u32 dpc_data[5];
235 	u32 cts_hdr;
236 	u32 cts_data[5];
237 	u32 reserved3[6];
238 	u32 hotplug_hdr;
239 	u32 hotplug_data[5];
240 	u32 ier_hdr;
241 	u32 ier_data[5];
242 	u32 threshold_hdr;
243 	u32 threshold_data[5];
244 	u32 power_mgmt_hdr;
245 	u32 power_mgmt_data[5];
246 	u32 tlp_throttling_hdr;
247 	u32 tlp_throttling_data[5];
248 	u32 force_speed_hdr;
249 	u32 force_speed_data[5];
250 	u32 credit_timeout_hdr;
251 	u32 credit_timeout_data[5];
252 	u32 link_state_hdr;
253 	u32 link_state_data[5];
254 	u32 reserved4[174];
255 } __packed;
256 
257 struct switchtec_dev {
258 	struct pci_dev *pdev;
259 	struct device dev;
260 	struct cdev cdev;
261 
262 	int partition;
263 	int partition_count;
264 	int pff_csr_count;
265 	char pff_local[SWITCHTEC_MAX_PFF_CSR];
266 
267 	void __iomem *mmio;
268 	struct mrpc_regs __iomem *mmio_mrpc;
269 	struct sw_event_regs __iomem *mmio_sw_event;
270 	struct sys_info_regs __iomem *mmio_sys_info;
271 	struct flash_info_regs __iomem *mmio_flash_info;
272 	struct ntb_info_regs __iomem *mmio_ntb;
273 	struct part_cfg_regs __iomem *mmio_part_cfg;
274 	struct part_cfg_regs __iomem *mmio_part_cfg_all;
275 	struct pff_csr_regs __iomem *mmio_pff_csr;
276 
277 	/*
278 	 * The mrpc mutex must be held when accessing the other
279 	 * mrpc_ fields, alive flag and stuser->state field
280 	 */
281 	struct mutex mrpc_mutex;
282 	struct list_head mrpc_queue;
283 	int mrpc_busy;
284 	struct work_struct mrpc_work;
285 	struct delayed_work mrpc_timeout;
286 	bool alive;
287 
288 	wait_queue_head_t event_wq;
289 	atomic_t event_cnt;
290 };
291 
292 static struct switchtec_dev *to_stdev(struct device *dev)
293 {
294 	return container_of(dev, struct switchtec_dev, dev);
295 }
296 
297 enum mrpc_state {
298 	MRPC_IDLE = 0,
299 	MRPC_QUEUED,
300 	MRPC_RUNNING,
301 	MRPC_DONE,
302 };
303 
304 struct switchtec_user {
305 	struct switchtec_dev *stdev;
306 
307 	enum mrpc_state state;
308 
309 	struct completion comp;
310 	struct kref kref;
311 	struct list_head list;
312 
313 	u32 cmd;
314 	u32 status;
315 	u32 return_code;
316 	size_t data_len;
317 	size_t read_len;
318 	unsigned char data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
319 	int event_cnt;
320 };
321 
322 static struct switchtec_user *stuser_create(struct switchtec_dev *stdev)
323 {
324 	struct switchtec_user *stuser;
325 
326 	stuser = kzalloc(sizeof(*stuser), GFP_KERNEL);
327 	if (!stuser)
328 		return ERR_PTR(-ENOMEM);
329 
330 	get_device(&stdev->dev);
331 	stuser->stdev = stdev;
332 	kref_init(&stuser->kref);
333 	INIT_LIST_HEAD(&stuser->list);
334 	init_completion(&stuser->comp);
335 	stuser->event_cnt = atomic_read(&stdev->event_cnt);
336 
337 	dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
338 
339 	return stuser;
340 }
341 
342 static void stuser_free(struct kref *kref)
343 {
344 	struct switchtec_user *stuser;
345 
346 	stuser = container_of(kref, struct switchtec_user, kref);
347 
348 	dev_dbg(&stuser->stdev->dev, "%s: %p\n", __func__, stuser);
349 
350 	put_device(&stuser->stdev->dev);
351 	kfree(stuser);
352 }
353 
354 static void stuser_put(struct switchtec_user *stuser)
355 {
356 	kref_put(&stuser->kref, stuser_free);
357 }
358 
359 static void stuser_set_state(struct switchtec_user *stuser,
360 			     enum mrpc_state state)
361 {
362 	/* requires the mrpc_mutex to already be held when called */
363 
364 	const char * const state_names[] = {
365 		[MRPC_IDLE] = "IDLE",
366 		[MRPC_QUEUED] = "QUEUED",
367 		[MRPC_RUNNING] = "RUNNING",
368 		[MRPC_DONE] = "DONE",
369 	};
370 
371 	stuser->state = state;
372 
373 	dev_dbg(&stuser->stdev->dev, "stuser state %p -> %s",
374 		stuser, state_names[state]);
375 }
376 
377 static void mrpc_complete_cmd(struct switchtec_dev *stdev);
378 
379 static void mrpc_cmd_submit(struct switchtec_dev *stdev)
380 {
381 	/* requires the mrpc_mutex to already be held when called */
382 
383 	struct switchtec_user *stuser;
384 
385 	if (stdev->mrpc_busy)
386 		return;
387 
388 	if (list_empty(&stdev->mrpc_queue))
389 		return;
390 
391 	stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
392 			    list);
393 
394 	stuser_set_state(stuser, MRPC_RUNNING);
395 	stdev->mrpc_busy = 1;
396 	memcpy_toio(&stdev->mmio_mrpc->input_data,
397 		    stuser->data, stuser->data_len);
398 	iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd);
399 
400 	stuser->status = ioread32(&stdev->mmio_mrpc->status);
401 	if (stuser->status != SWITCHTEC_MRPC_STATUS_INPROGRESS)
402 		mrpc_complete_cmd(stdev);
403 
404 	schedule_delayed_work(&stdev->mrpc_timeout,
405 			      msecs_to_jiffies(500));
406 }
407 
408 static int mrpc_queue_cmd(struct switchtec_user *stuser)
409 {
410 	/* requires the mrpc_mutex to already be held when called */
411 
412 	struct switchtec_dev *stdev = stuser->stdev;
413 
414 	kref_get(&stuser->kref);
415 	stuser->read_len = sizeof(stuser->data);
416 	stuser_set_state(stuser, MRPC_QUEUED);
417 	init_completion(&stuser->comp);
418 	list_add_tail(&stuser->list, &stdev->mrpc_queue);
419 
420 	mrpc_cmd_submit(stdev);
421 
422 	return 0;
423 }
424 
425 static void mrpc_complete_cmd(struct switchtec_dev *stdev)
426 {
427 	/* requires the mrpc_mutex to already be held when called */
428 	struct switchtec_user *stuser;
429 
430 	if (list_empty(&stdev->mrpc_queue))
431 		return;
432 
433 	stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user,
434 			    list);
435 
436 	stuser->status = ioread32(&stdev->mmio_mrpc->status);
437 	if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS)
438 		return;
439 
440 	stuser_set_state(stuser, MRPC_DONE);
441 	stuser->return_code = 0;
442 
443 	if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE)
444 		goto out;
445 
446 	stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value);
447 	if (stuser->return_code != 0)
448 		goto out;
449 
450 	memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data,
451 		      stuser->read_len);
452 
453 out:
454 	complete_all(&stuser->comp);
455 	list_del_init(&stuser->list);
456 	stuser_put(stuser);
457 	stdev->mrpc_busy = 0;
458 
459 	mrpc_cmd_submit(stdev);
460 }
461 
462 static void mrpc_event_work(struct work_struct *work)
463 {
464 	struct switchtec_dev *stdev;
465 
466 	stdev = container_of(work, struct switchtec_dev, mrpc_work);
467 
468 	dev_dbg(&stdev->dev, "%s\n", __func__);
469 
470 	mutex_lock(&stdev->mrpc_mutex);
471 	cancel_delayed_work(&stdev->mrpc_timeout);
472 	mrpc_complete_cmd(stdev);
473 	mutex_unlock(&stdev->mrpc_mutex);
474 }
475 
476 static void mrpc_timeout_work(struct work_struct *work)
477 {
478 	struct switchtec_dev *stdev;
479 	u32 status;
480 
481 	stdev = container_of(work, struct switchtec_dev, mrpc_timeout.work);
482 
483 	dev_dbg(&stdev->dev, "%s\n", __func__);
484 
485 	mutex_lock(&stdev->mrpc_mutex);
486 
487 	status = ioread32(&stdev->mmio_mrpc->status);
488 	if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) {
489 		schedule_delayed_work(&stdev->mrpc_timeout,
490 				      msecs_to_jiffies(500));
491 		goto out;
492 	}
493 
494 	mrpc_complete_cmd(stdev);
495 
496 out:
497 	mutex_unlock(&stdev->mrpc_mutex);
498 }
499 
500 static ssize_t device_version_show(struct device *dev,
501 	struct device_attribute *attr, char *buf)
502 {
503 	struct switchtec_dev *stdev = to_stdev(dev);
504 	u32 ver;
505 
506 	ver = ioread32(&stdev->mmio_sys_info->device_version);
507 
508 	return sprintf(buf, "%x\n", ver);
509 }
510 static DEVICE_ATTR_RO(device_version);
511 
512 static ssize_t fw_version_show(struct device *dev,
513 	struct device_attribute *attr, char *buf)
514 {
515 	struct switchtec_dev *stdev = to_stdev(dev);
516 	u32 ver;
517 
518 	ver = ioread32(&stdev->mmio_sys_info->firmware_version);
519 
520 	return sprintf(buf, "%08x\n", ver);
521 }
522 static DEVICE_ATTR_RO(fw_version);
523 
524 static ssize_t io_string_show(char *buf, void __iomem *attr, size_t len)
525 {
526 	int i;
527 
528 	memcpy_fromio(buf, attr, len);
529 	buf[len] = '\n';
530 	buf[len + 1] = 0;
531 
532 	for (i = len - 1; i > 0; i--) {
533 		if (buf[i] != ' ')
534 			break;
535 		buf[i] = '\n';
536 		buf[i + 1] = 0;
537 	}
538 
539 	return strlen(buf);
540 }
541 
542 #define DEVICE_ATTR_SYS_INFO_STR(field) \
543 static ssize_t field ## _show(struct device *dev, \
544 	struct device_attribute *attr, char *buf) \
545 { \
546 	struct switchtec_dev *stdev = to_stdev(dev); \
547 	return io_string_show(buf, &stdev->mmio_sys_info->field, \
548 			    sizeof(stdev->mmio_sys_info->field)); \
549 } \
550 \
551 static DEVICE_ATTR_RO(field)
552 
553 DEVICE_ATTR_SYS_INFO_STR(vendor_id);
554 DEVICE_ATTR_SYS_INFO_STR(product_id);
555 DEVICE_ATTR_SYS_INFO_STR(product_revision);
556 DEVICE_ATTR_SYS_INFO_STR(component_vendor);
557 
558 static ssize_t component_id_show(struct device *dev,
559 	struct device_attribute *attr, char *buf)
560 {
561 	struct switchtec_dev *stdev = to_stdev(dev);
562 	int id = ioread16(&stdev->mmio_sys_info->component_id);
563 
564 	return sprintf(buf, "PM%04X\n", id);
565 }
566 static DEVICE_ATTR_RO(component_id);
567 
568 static ssize_t component_revision_show(struct device *dev,
569 	struct device_attribute *attr, char *buf)
570 {
571 	struct switchtec_dev *stdev = to_stdev(dev);
572 	int rev = ioread8(&stdev->mmio_sys_info->component_revision);
573 
574 	return sprintf(buf, "%d\n", rev);
575 }
576 static DEVICE_ATTR_RO(component_revision);
577 
578 static ssize_t partition_show(struct device *dev,
579 	struct device_attribute *attr, char *buf)
580 {
581 	struct switchtec_dev *stdev = to_stdev(dev);
582 
583 	return sprintf(buf, "%d\n", stdev->partition);
584 }
585 static DEVICE_ATTR_RO(partition);
586 
587 static ssize_t partition_count_show(struct device *dev,
588 	struct device_attribute *attr, char *buf)
589 {
590 	struct switchtec_dev *stdev = to_stdev(dev);
591 
592 	return sprintf(buf, "%d\n", stdev->partition_count);
593 }
594 static DEVICE_ATTR_RO(partition_count);
595 
596 static struct attribute *switchtec_device_attrs[] = {
597 	&dev_attr_device_version.attr,
598 	&dev_attr_fw_version.attr,
599 	&dev_attr_vendor_id.attr,
600 	&dev_attr_product_id.attr,
601 	&dev_attr_product_revision.attr,
602 	&dev_attr_component_vendor.attr,
603 	&dev_attr_component_id.attr,
604 	&dev_attr_component_revision.attr,
605 	&dev_attr_partition.attr,
606 	&dev_attr_partition_count.attr,
607 	NULL,
608 };
609 
610 ATTRIBUTE_GROUPS(switchtec_device);
611 
612 static int switchtec_dev_open(struct inode *inode, struct file *filp)
613 {
614 	struct switchtec_dev *stdev;
615 	struct switchtec_user *stuser;
616 
617 	stdev = container_of(inode->i_cdev, struct switchtec_dev, cdev);
618 
619 	stuser = stuser_create(stdev);
620 	if (IS_ERR(stuser))
621 		return PTR_ERR(stuser);
622 
623 	filp->private_data = stuser;
624 	nonseekable_open(inode, filp);
625 
626 	dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser);
627 
628 	return 0;
629 }
630 
631 static int switchtec_dev_release(struct inode *inode, struct file *filp)
632 {
633 	struct switchtec_user *stuser = filp->private_data;
634 
635 	stuser_put(stuser);
636 
637 	return 0;
638 }
639 
640 static int lock_mutex_and_test_alive(struct switchtec_dev *stdev)
641 {
642 	if (mutex_lock_interruptible(&stdev->mrpc_mutex))
643 		return -EINTR;
644 
645 	if (!stdev->alive) {
646 		mutex_unlock(&stdev->mrpc_mutex);
647 		return -ENODEV;
648 	}
649 
650 	return 0;
651 }
652 
653 static ssize_t switchtec_dev_write(struct file *filp, const char __user *data,
654 				   size_t size, loff_t *off)
655 {
656 	struct switchtec_user *stuser = filp->private_data;
657 	struct switchtec_dev *stdev = stuser->stdev;
658 	int rc;
659 
660 	if (size < sizeof(stuser->cmd) ||
661 	    size > sizeof(stuser->cmd) + sizeof(stuser->data))
662 		return -EINVAL;
663 
664 	stuser->data_len = size - sizeof(stuser->cmd);
665 
666 	rc = lock_mutex_and_test_alive(stdev);
667 	if (rc)
668 		return rc;
669 
670 	if (stuser->state != MRPC_IDLE) {
671 		rc = -EBADE;
672 		goto out;
673 	}
674 
675 	rc = copy_from_user(&stuser->cmd, data, sizeof(stuser->cmd));
676 	if (rc) {
677 		rc = -EFAULT;
678 		goto out;
679 	}
680 
681 	data += sizeof(stuser->cmd);
682 	rc = copy_from_user(&stuser->data, data, size - sizeof(stuser->cmd));
683 	if (rc) {
684 		rc = -EFAULT;
685 		goto out;
686 	}
687 
688 	rc = mrpc_queue_cmd(stuser);
689 
690 out:
691 	mutex_unlock(&stdev->mrpc_mutex);
692 
693 	if (rc)
694 		return rc;
695 
696 	return size;
697 }
698 
699 static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
700 				  size_t size, loff_t *off)
701 {
702 	struct switchtec_user *stuser = filp->private_data;
703 	struct switchtec_dev *stdev = stuser->stdev;
704 	int rc;
705 
706 	if (size < sizeof(stuser->cmd) ||
707 	    size > sizeof(stuser->cmd) + sizeof(stuser->data))
708 		return -EINVAL;
709 
710 	rc = lock_mutex_and_test_alive(stdev);
711 	if (rc)
712 		return rc;
713 
714 	if (stuser->state == MRPC_IDLE) {
715 		mutex_unlock(&stdev->mrpc_mutex);
716 		return -EBADE;
717 	}
718 
719 	stuser->read_len = size - sizeof(stuser->return_code);
720 
721 	mutex_unlock(&stdev->mrpc_mutex);
722 
723 	if (filp->f_flags & O_NONBLOCK) {
724 		if (!try_wait_for_completion(&stuser->comp))
725 			return -EAGAIN;
726 	} else {
727 		rc = wait_for_completion_interruptible(&stuser->comp);
728 		if (rc < 0)
729 			return rc;
730 	}
731 
732 	rc = lock_mutex_and_test_alive(stdev);
733 	if (rc)
734 		return rc;
735 
736 	if (stuser->state != MRPC_DONE) {
737 		mutex_unlock(&stdev->mrpc_mutex);
738 		return -EBADE;
739 	}
740 
741 	rc = copy_to_user(data, &stuser->return_code,
742 			  sizeof(stuser->return_code));
743 	if (rc) {
744 		rc = -EFAULT;
745 		goto out;
746 	}
747 
748 	data += sizeof(stuser->return_code);
749 	rc = copy_to_user(data, &stuser->data,
750 			  size - sizeof(stuser->return_code));
751 	if (rc) {
752 		rc = -EFAULT;
753 		goto out;
754 	}
755 
756 	stuser_set_state(stuser, MRPC_IDLE);
757 
758 out:
759 	mutex_unlock(&stdev->mrpc_mutex);
760 
761 	if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE)
762 		return size;
763 	else if (stuser->status == SWITCHTEC_MRPC_STATUS_INTERRUPTED)
764 		return -ENXIO;
765 	else
766 		return -EBADMSG;
767 }
768 
769 static unsigned int switchtec_dev_poll(struct file *filp, poll_table *wait)
770 {
771 	struct switchtec_user *stuser = filp->private_data;
772 	struct switchtec_dev *stdev = stuser->stdev;
773 	int ret = 0;
774 
775 	poll_wait(filp, &stuser->comp.wait, wait);
776 	poll_wait(filp, &stdev->event_wq, wait);
777 
778 	if (lock_mutex_and_test_alive(stdev))
779 		return POLLIN | POLLRDHUP | POLLOUT | POLLERR | POLLHUP;
780 
781 	mutex_unlock(&stdev->mrpc_mutex);
782 
783 	if (try_wait_for_completion(&stuser->comp))
784 		ret |= POLLIN | POLLRDNORM;
785 
786 	if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
787 		ret |= POLLPRI | POLLRDBAND;
788 
789 	return ret;
790 }
791 
792 static int ioctl_flash_info(struct switchtec_dev *stdev,
793 			    struct switchtec_ioctl_flash_info __user *uinfo)
794 {
795 	struct switchtec_ioctl_flash_info info = {0};
796 	struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
797 
798 	info.flash_length = ioread32(&fi->flash_length);
799 	info.num_partitions = SWITCHTEC_IOCTL_NUM_PARTITIONS;
800 
801 	if (copy_to_user(uinfo, &info, sizeof(info)))
802 		return -EFAULT;
803 
804 	return 0;
805 }
806 
807 static void set_fw_info_part(struct switchtec_ioctl_flash_part_info *info,
808 			     struct partition_info __iomem *pi)
809 {
810 	info->address = ioread32(&pi->address);
811 	info->length = ioread32(&pi->length);
812 }
813 
814 static int ioctl_flash_part_info(struct switchtec_dev *stdev,
815 	struct switchtec_ioctl_flash_part_info __user *uinfo)
816 {
817 	struct switchtec_ioctl_flash_part_info info = {0};
818 	struct flash_info_regs __iomem *fi = stdev->mmio_flash_info;
819 	struct sys_info_regs __iomem *si = stdev->mmio_sys_info;
820 	u32 active_addr = -1;
821 
822 	if (copy_from_user(&info, uinfo, sizeof(info)))
823 		return -EFAULT;
824 
825 	switch (info.flash_partition) {
826 	case SWITCHTEC_IOCTL_PART_CFG0:
827 		active_addr = ioread32(&fi->active_cfg);
828 		set_fw_info_part(&info, &fi->cfg0);
829 		if (ioread16(&si->cfg_running) == SWITCHTEC_CFG0_RUNNING)
830 			info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
831 		break;
832 	case SWITCHTEC_IOCTL_PART_CFG1:
833 		active_addr = ioread32(&fi->active_cfg);
834 		set_fw_info_part(&info, &fi->cfg1);
835 		if (ioread16(&si->cfg_running) == SWITCHTEC_CFG1_RUNNING)
836 			info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
837 		break;
838 	case SWITCHTEC_IOCTL_PART_IMG0:
839 		active_addr = ioread32(&fi->active_img);
840 		set_fw_info_part(&info, &fi->img0);
841 		if (ioread16(&si->img_running) == SWITCHTEC_IMG0_RUNNING)
842 			info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
843 		break;
844 	case SWITCHTEC_IOCTL_PART_IMG1:
845 		active_addr = ioread32(&fi->active_img);
846 		set_fw_info_part(&info, &fi->img1);
847 		if (ioread16(&si->img_running) == SWITCHTEC_IMG1_RUNNING)
848 			info.active |= SWITCHTEC_IOCTL_PART_RUNNING;
849 		break;
850 	case SWITCHTEC_IOCTL_PART_NVLOG:
851 		set_fw_info_part(&info, &fi->nvlog);
852 		break;
853 	case SWITCHTEC_IOCTL_PART_VENDOR0:
854 		set_fw_info_part(&info, &fi->vendor[0]);
855 		break;
856 	case SWITCHTEC_IOCTL_PART_VENDOR1:
857 		set_fw_info_part(&info, &fi->vendor[1]);
858 		break;
859 	case SWITCHTEC_IOCTL_PART_VENDOR2:
860 		set_fw_info_part(&info, &fi->vendor[2]);
861 		break;
862 	case SWITCHTEC_IOCTL_PART_VENDOR3:
863 		set_fw_info_part(&info, &fi->vendor[3]);
864 		break;
865 	case SWITCHTEC_IOCTL_PART_VENDOR4:
866 		set_fw_info_part(&info, &fi->vendor[4]);
867 		break;
868 	case SWITCHTEC_IOCTL_PART_VENDOR5:
869 		set_fw_info_part(&info, &fi->vendor[5]);
870 		break;
871 	case SWITCHTEC_IOCTL_PART_VENDOR6:
872 		set_fw_info_part(&info, &fi->vendor[6]);
873 		break;
874 	case SWITCHTEC_IOCTL_PART_VENDOR7:
875 		set_fw_info_part(&info, &fi->vendor[7]);
876 		break;
877 	default:
878 		return -EINVAL;
879 	}
880 
881 	if (info.address == active_addr)
882 		info.active |= SWITCHTEC_IOCTL_PART_ACTIVE;
883 
884 	if (copy_to_user(uinfo, &info, sizeof(info)))
885 		return -EFAULT;
886 
887 	return 0;
888 }
889 
890 static int ioctl_event_summary(struct switchtec_dev *stdev,
891 	struct switchtec_user *stuser,
892 	struct switchtec_ioctl_event_summary __user *usum)
893 {
894 	struct switchtec_ioctl_event_summary s = {0};
895 	int i;
896 	u32 reg;
897 
898 	s.global = ioread32(&stdev->mmio_sw_event->global_summary);
899 	s.part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap);
900 	s.local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary);
901 
902 	for (i = 0; i < stdev->partition_count; i++) {
903 		reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary);
904 		s.part[i] = reg;
905 	}
906 
907 	for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
908 		reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
909 		if (reg != MICROSEMI_VENDOR_ID)
910 			break;
911 
912 		reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary);
913 		s.pff[i] = reg;
914 	}
915 
916 	if (copy_to_user(usum, &s, sizeof(s)))
917 		return -EFAULT;
918 
919 	stuser->event_cnt = atomic_read(&stdev->event_cnt);
920 
921 	return 0;
922 }
923 
924 static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev,
925 				  size_t offset, int index)
926 {
927 	return (void __iomem *)stdev->mmio_sw_event + offset;
928 }
929 
930 static u32 __iomem *part_ev_reg(struct switchtec_dev *stdev,
931 				size_t offset, int index)
932 {
933 	return (void __iomem *)&stdev->mmio_part_cfg_all[index] + offset;
934 }
935 
936 static u32 __iomem *pff_ev_reg(struct switchtec_dev *stdev,
937 			       size_t offset, int index)
938 {
939 	return (void __iomem *)&stdev->mmio_pff_csr[index] + offset;
940 }
941 
942 #define EV_GLB(i, r)[i] = {offsetof(struct sw_event_regs, r), global_ev_reg}
943 #define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg}
944 #define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg}
945 
946 const struct event_reg {
947 	size_t offset;
948 	u32 __iomem *(*map_reg)(struct switchtec_dev *stdev,
949 				size_t offset, int index);
950 } event_regs[] = {
951 	EV_GLB(SWITCHTEC_IOCTL_EVENT_STACK_ERROR, stack_error_event_hdr),
952 	EV_GLB(SWITCHTEC_IOCTL_EVENT_PPU_ERROR, ppu_error_event_hdr),
953 	EV_GLB(SWITCHTEC_IOCTL_EVENT_ISP_ERROR, isp_error_event_hdr),
954 	EV_GLB(SWITCHTEC_IOCTL_EVENT_SYS_RESET, sys_reset_event_hdr),
955 	EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_EXC, fw_exception_hdr),
956 	EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NMI, fw_nmi_hdr),
957 	EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL, fw_non_fatal_hdr),
958 	EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_FATAL, fw_fatal_hdr),
959 	EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP, twi_mrpc_comp_hdr),
960 	EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC,
961 	       twi_mrpc_comp_async_hdr),
962 	EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP, cli_mrpc_comp_hdr),
963 	EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC,
964 	       cli_mrpc_comp_async_hdr),
965 	EV_GLB(SWITCHTEC_IOCTL_EVENT_GPIO_INT, gpio_interrupt_hdr),
966 	EV_PAR(SWITCHTEC_IOCTL_EVENT_PART_RESET, part_reset_hdr),
967 	EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP, mrpc_comp_hdr),
968 	EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC, mrpc_comp_async_hdr),
969 	EV_PAR(SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP, dyn_binding_hdr),
970 	EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_P2P, aer_in_p2p_hdr),
971 	EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_VEP, aer_in_vep_hdr),
972 	EV_PFF(SWITCHTEC_IOCTL_EVENT_DPC, dpc_hdr),
973 	EV_PFF(SWITCHTEC_IOCTL_EVENT_CTS, cts_hdr),
974 	EV_PFF(SWITCHTEC_IOCTL_EVENT_HOTPLUG, hotplug_hdr),
975 	EV_PFF(SWITCHTEC_IOCTL_EVENT_IER, ier_hdr),
976 	EV_PFF(SWITCHTEC_IOCTL_EVENT_THRESH, threshold_hdr),
977 	EV_PFF(SWITCHTEC_IOCTL_EVENT_POWER_MGMT, power_mgmt_hdr),
978 	EV_PFF(SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING, tlp_throttling_hdr),
979 	EV_PFF(SWITCHTEC_IOCTL_EVENT_FORCE_SPEED, force_speed_hdr),
980 	EV_PFF(SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT, credit_timeout_hdr),
981 	EV_PFF(SWITCHTEC_IOCTL_EVENT_LINK_STATE, link_state_hdr),
982 };
983 
984 static u32 __iomem *event_hdr_addr(struct switchtec_dev *stdev,
985 				   int event_id, int index)
986 {
987 	size_t off;
988 
989 	if (event_id < 0 || event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
990 		return ERR_PTR(-EINVAL);
991 
992 	off = event_regs[event_id].offset;
993 
994 	if (event_regs[event_id].map_reg == part_ev_reg) {
995 		if (index == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
996 			index = stdev->partition;
997 		else if (index < 0 || index >= stdev->partition_count)
998 			return ERR_PTR(-EINVAL);
999 	} else if (event_regs[event_id].map_reg == pff_ev_reg) {
1000 		if (index < 0 || index >= stdev->pff_csr_count)
1001 			return ERR_PTR(-EINVAL);
1002 	}
1003 
1004 	return event_regs[event_id].map_reg(stdev, off, index);
1005 }
1006 
1007 static int event_ctl(struct switchtec_dev *stdev,
1008 		     struct switchtec_ioctl_event_ctl *ctl)
1009 {
1010 	int i;
1011 	u32 __iomem *reg;
1012 	u32 hdr;
1013 
1014 	reg = event_hdr_addr(stdev, ctl->event_id, ctl->index);
1015 	if (IS_ERR(reg))
1016 		return PTR_ERR(reg);
1017 
1018 	hdr = ioread32(reg);
1019 	for (i = 0; i < ARRAY_SIZE(ctl->data); i++)
1020 		ctl->data[i] = ioread32(&reg[i + 1]);
1021 
1022 	ctl->occurred = hdr & SWITCHTEC_EVENT_OCCURRED;
1023 	ctl->count = (hdr >> 5) & 0xFF;
1024 
1025 	if (!(ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR))
1026 		hdr &= ~SWITCHTEC_EVENT_CLEAR;
1027 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL)
1028 		hdr |= SWITCHTEC_EVENT_EN_IRQ;
1029 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL)
1030 		hdr &= ~SWITCHTEC_EVENT_EN_IRQ;
1031 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG)
1032 		hdr |= SWITCHTEC_EVENT_EN_LOG;
1033 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG)
1034 		hdr &= ~SWITCHTEC_EVENT_EN_LOG;
1035 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI)
1036 		hdr |= SWITCHTEC_EVENT_EN_CLI;
1037 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI)
1038 		hdr &= ~SWITCHTEC_EVENT_EN_CLI;
1039 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL)
1040 		hdr |= SWITCHTEC_EVENT_FATAL;
1041 	if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL)
1042 		hdr &= ~SWITCHTEC_EVENT_FATAL;
1043 
1044 	if (ctl->flags)
1045 		iowrite32(hdr, reg);
1046 
1047 	ctl->flags = 0;
1048 	if (hdr & SWITCHTEC_EVENT_EN_IRQ)
1049 		ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL;
1050 	if (hdr & SWITCHTEC_EVENT_EN_LOG)
1051 		ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG;
1052 	if (hdr & SWITCHTEC_EVENT_EN_CLI)
1053 		ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI;
1054 	if (hdr & SWITCHTEC_EVENT_FATAL)
1055 		ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL;
1056 
1057 	return 0;
1058 }
1059 
1060 static int ioctl_event_ctl(struct switchtec_dev *stdev,
1061 	struct switchtec_ioctl_event_ctl __user *uctl)
1062 {
1063 	int ret;
1064 	int nr_idxs;
1065 	struct switchtec_ioctl_event_ctl ctl;
1066 
1067 	if (copy_from_user(&ctl, uctl, sizeof(ctl)))
1068 		return -EFAULT;
1069 
1070 	if (ctl.event_id >= SWITCHTEC_IOCTL_MAX_EVENTS)
1071 		return -EINVAL;
1072 
1073 	if (ctl.flags & SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED)
1074 		return -EINVAL;
1075 
1076 	if (ctl.index == SWITCHTEC_IOCTL_EVENT_IDX_ALL) {
1077 		if (event_regs[ctl.event_id].map_reg == global_ev_reg)
1078 			nr_idxs = 1;
1079 		else if (event_regs[ctl.event_id].map_reg == part_ev_reg)
1080 			nr_idxs = stdev->partition_count;
1081 		else if (event_regs[ctl.event_id].map_reg == pff_ev_reg)
1082 			nr_idxs = stdev->pff_csr_count;
1083 		else
1084 			return -EINVAL;
1085 
1086 		for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) {
1087 			ret = event_ctl(stdev, &ctl);
1088 			if (ret < 0)
1089 				return ret;
1090 		}
1091 	} else {
1092 		ret = event_ctl(stdev, &ctl);
1093 		if (ret < 0)
1094 			return ret;
1095 	}
1096 
1097 	if (copy_to_user(uctl, &ctl, sizeof(ctl)))
1098 		return -EFAULT;
1099 
1100 	return 0;
1101 }
1102 
1103 static int ioctl_pff_to_port(struct switchtec_dev *stdev,
1104 			     struct switchtec_ioctl_pff_port *up)
1105 {
1106 	int i, part;
1107 	u32 reg;
1108 	struct part_cfg_regs *pcfg;
1109 	struct switchtec_ioctl_pff_port p;
1110 
1111 	if (copy_from_user(&p, up, sizeof(p)))
1112 		return -EFAULT;
1113 
1114 	p.port = -1;
1115 	for (part = 0; part < stdev->partition_count; part++) {
1116 		pcfg = &stdev->mmio_part_cfg_all[part];
1117 		p.partition = part;
1118 
1119 		reg = ioread32(&pcfg->usp_pff_inst_id);
1120 		if (reg == p.pff) {
1121 			p.port = 0;
1122 			break;
1123 		}
1124 
1125 		reg = ioread32(&pcfg->vep_pff_inst_id);
1126 		if (reg == p.pff) {
1127 			p.port = SWITCHTEC_IOCTL_PFF_VEP;
1128 			break;
1129 		}
1130 
1131 		for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
1132 			reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
1133 			if (reg != p.pff)
1134 				continue;
1135 
1136 			p.port = i + 1;
1137 			break;
1138 		}
1139 
1140 		if (p.port != -1)
1141 			break;
1142 	}
1143 
1144 	if (copy_to_user(up, &p, sizeof(p)))
1145 		return -EFAULT;
1146 
1147 	return 0;
1148 }
1149 
1150 static int ioctl_port_to_pff(struct switchtec_dev *stdev,
1151 			     struct switchtec_ioctl_pff_port *up)
1152 {
1153 	struct switchtec_ioctl_pff_port p;
1154 	struct part_cfg_regs *pcfg;
1155 
1156 	if (copy_from_user(&p, up, sizeof(p)))
1157 		return -EFAULT;
1158 
1159 	if (p.partition == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX)
1160 		pcfg = stdev->mmio_part_cfg;
1161 	else if (p.partition < stdev->partition_count)
1162 		pcfg = &stdev->mmio_part_cfg_all[p.partition];
1163 	else
1164 		return -EINVAL;
1165 
1166 	switch (p.port) {
1167 	case 0:
1168 		p.pff = ioread32(&pcfg->usp_pff_inst_id);
1169 		break;
1170 	case SWITCHTEC_IOCTL_PFF_VEP:
1171 		p.pff = ioread32(&pcfg->vep_pff_inst_id);
1172 		break;
1173 	default:
1174 		if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
1175 			return -EINVAL;
1176 		p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
1177 		break;
1178 	}
1179 
1180 	if (copy_to_user(up, &p, sizeof(p)))
1181 		return -EFAULT;
1182 
1183 	return 0;
1184 }
1185 
1186 static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd,
1187 				unsigned long arg)
1188 {
1189 	struct switchtec_user *stuser = filp->private_data;
1190 	struct switchtec_dev *stdev = stuser->stdev;
1191 	int rc;
1192 	void __user *argp = (void __user *)arg;
1193 
1194 	rc = lock_mutex_and_test_alive(stdev);
1195 	if (rc)
1196 		return rc;
1197 
1198 	switch (cmd) {
1199 	case SWITCHTEC_IOCTL_FLASH_INFO:
1200 		rc = ioctl_flash_info(stdev, argp);
1201 		break;
1202 	case SWITCHTEC_IOCTL_FLASH_PART_INFO:
1203 		rc = ioctl_flash_part_info(stdev, argp);
1204 		break;
1205 	case SWITCHTEC_IOCTL_EVENT_SUMMARY:
1206 		rc = ioctl_event_summary(stdev, stuser, argp);
1207 		break;
1208 	case SWITCHTEC_IOCTL_EVENT_CTL:
1209 		rc = ioctl_event_ctl(stdev, argp);
1210 		break;
1211 	case SWITCHTEC_IOCTL_PFF_TO_PORT:
1212 		rc = ioctl_pff_to_port(stdev, argp);
1213 		break;
1214 	case SWITCHTEC_IOCTL_PORT_TO_PFF:
1215 		rc = ioctl_port_to_pff(stdev, argp);
1216 		break;
1217 	default:
1218 		rc = -ENOTTY;
1219 		break;
1220 	}
1221 
1222 	mutex_unlock(&stdev->mrpc_mutex);
1223 	return rc;
1224 }
1225 
1226 static const struct file_operations switchtec_fops = {
1227 	.owner = THIS_MODULE,
1228 	.open = switchtec_dev_open,
1229 	.release = switchtec_dev_release,
1230 	.write = switchtec_dev_write,
1231 	.read = switchtec_dev_read,
1232 	.poll = switchtec_dev_poll,
1233 	.unlocked_ioctl = switchtec_dev_ioctl,
1234 	.compat_ioctl = switchtec_dev_ioctl,
1235 };
1236 
1237 static void stdev_release(struct device *dev)
1238 {
1239 	struct switchtec_dev *stdev = to_stdev(dev);
1240 
1241 	kfree(stdev);
1242 }
1243 
1244 static void stdev_kill(struct switchtec_dev *stdev)
1245 {
1246 	struct switchtec_user *stuser, *tmpuser;
1247 
1248 	pci_clear_master(stdev->pdev);
1249 
1250 	cancel_delayed_work_sync(&stdev->mrpc_timeout);
1251 
1252 	/* Mark the hardware as unavailable and complete all completions */
1253 	mutex_lock(&stdev->mrpc_mutex);
1254 	stdev->alive = false;
1255 
1256 	/* Wake up and kill any users waiting on an MRPC request */
1257 	list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
1258 		complete_all(&stuser->comp);
1259 		list_del_init(&stuser->list);
1260 		stuser_put(stuser);
1261 	}
1262 
1263 	mutex_unlock(&stdev->mrpc_mutex);
1264 
1265 	/* Wake up any users waiting on event_wq */
1266 	wake_up_interruptible(&stdev->event_wq);
1267 }
1268 
1269 static struct switchtec_dev *stdev_create(struct pci_dev *pdev)
1270 {
1271 	struct switchtec_dev *stdev;
1272 	int minor;
1273 	struct device *dev;
1274 	struct cdev *cdev;
1275 	int rc;
1276 
1277 	stdev = kzalloc_node(sizeof(*stdev), GFP_KERNEL,
1278 			     dev_to_node(&pdev->dev));
1279 	if (!stdev)
1280 		return ERR_PTR(-ENOMEM);
1281 
1282 	stdev->alive = true;
1283 	stdev->pdev = pdev;
1284 	INIT_LIST_HEAD(&stdev->mrpc_queue);
1285 	mutex_init(&stdev->mrpc_mutex);
1286 	stdev->mrpc_busy = 0;
1287 	INIT_WORK(&stdev->mrpc_work, mrpc_event_work);
1288 	INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work);
1289 	init_waitqueue_head(&stdev->event_wq);
1290 	atomic_set(&stdev->event_cnt, 0);
1291 
1292 	dev = &stdev->dev;
1293 	device_initialize(dev);
1294 	dev->class = switchtec_class;
1295 	dev->parent = &pdev->dev;
1296 	dev->groups = switchtec_device_groups;
1297 	dev->release = stdev_release;
1298 
1299 	minor = ida_simple_get(&switchtec_minor_ida, 0, 0,
1300 			       GFP_KERNEL);
1301 	if (minor < 0) {
1302 		rc = minor;
1303 		goto err_put;
1304 	}
1305 
1306 	dev->devt = MKDEV(MAJOR(switchtec_devt), minor);
1307 	dev_set_name(dev, "switchtec%d", minor);
1308 
1309 	cdev = &stdev->cdev;
1310 	cdev_init(cdev, &switchtec_fops);
1311 	cdev->owner = THIS_MODULE;
1312 
1313 	return stdev;
1314 
1315 err_put:
1316 	put_device(&stdev->dev);
1317 	return ERR_PTR(rc);
1318 }
1319 
1320 static int mask_event(struct switchtec_dev *stdev, int eid, int idx)
1321 {
1322 	size_t off = event_regs[eid].offset;
1323 	u32 __iomem *hdr_reg;
1324 	u32 hdr;
1325 
1326 	hdr_reg = event_regs[eid].map_reg(stdev, off, idx);
1327 	hdr = ioread32(hdr_reg);
1328 
1329 	if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ))
1330 		return 0;
1331 
1332 	dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr);
1333 	hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED);
1334 	iowrite32(hdr, hdr_reg);
1335 
1336 	return 1;
1337 }
1338 
1339 static int mask_all_events(struct switchtec_dev *stdev, int eid)
1340 {
1341 	int idx;
1342 	int count = 0;
1343 
1344 	if (event_regs[eid].map_reg == part_ev_reg) {
1345 		for (idx = 0; idx < stdev->partition_count; idx++)
1346 			count += mask_event(stdev, eid, idx);
1347 	} else if (event_regs[eid].map_reg == pff_ev_reg) {
1348 		for (idx = 0; idx < stdev->pff_csr_count; idx++) {
1349 			if (!stdev->pff_local[idx])
1350 				continue;
1351 			count += mask_event(stdev, eid, idx);
1352 		}
1353 	} else {
1354 		count += mask_event(stdev, eid, 0);
1355 	}
1356 
1357 	return count;
1358 }
1359 
1360 static irqreturn_t switchtec_event_isr(int irq, void *dev)
1361 {
1362 	struct switchtec_dev *stdev = dev;
1363 	u32 reg;
1364 	irqreturn_t ret = IRQ_NONE;
1365 	int eid, event_count = 0;
1366 
1367 	reg = ioread32(&stdev->mmio_part_cfg->mrpc_comp_hdr);
1368 	if (reg & SWITCHTEC_EVENT_OCCURRED) {
1369 		dev_dbg(&stdev->dev, "%s: mrpc comp\n", __func__);
1370 		ret = IRQ_HANDLED;
1371 		schedule_work(&stdev->mrpc_work);
1372 		iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr);
1373 	}
1374 
1375 	for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++)
1376 		event_count += mask_all_events(stdev, eid);
1377 
1378 	if (event_count) {
1379 		atomic_inc(&stdev->event_cnt);
1380 		wake_up_interruptible(&stdev->event_wq);
1381 		dev_dbg(&stdev->dev, "%s: %d events\n", __func__,
1382 			event_count);
1383 		return IRQ_HANDLED;
1384 	}
1385 
1386 	return ret;
1387 }
1388 
1389 static int switchtec_init_isr(struct switchtec_dev *stdev)
1390 {
1391 	int nvecs;
1392 	int event_irq;
1393 
1394 	nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, 4,
1395 				      PCI_IRQ_MSIX | PCI_IRQ_MSI);
1396 	if (nvecs < 0)
1397 		return nvecs;
1398 
1399 	event_irq = ioread32(&stdev->mmio_part_cfg->vep_vector_number);
1400 	if (event_irq < 0 || event_irq >= nvecs)
1401 		return -EFAULT;
1402 
1403 	event_irq = pci_irq_vector(stdev->pdev, event_irq);
1404 	if (event_irq < 0)
1405 		return event_irq;
1406 
1407 	return devm_request_irq(&stdev->pdev->dev, event_irq,
1408 				switchtec_event_isr, 0,
1409 				KBUILD_MODNAME, stdev);
1410 }
1411 
1412 static void init_pff(struct switchtec_dev *stdev)
1413 {
1414 	int i;
1415 	u32 reg;
1416 	struct part_cfg_regs *pcfg = stdev->mmio_part_cfg;
1417 
1418 	for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) {
1419 		reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id);
1420 		if (reg != MICROSEMI_VENDOR_ID)
1421 			break;
1422 	}
1423 
1424 	stdev->pff_csr_count = i;
1425 
1426 	reg = ioread32(&pcfg->usp_pff_inst_id);
1427 	if (reg < SWITCHTEC_MAX_PFF_CSR)
1428 		stdev->pff_local[reg] = 1;
1429 
1430 	reg = ioread32(&pcfg->vep_pff_inst_id);
1431 	if (reg < SWITCHTEC_MAX_PFF_CSR)
1432 		stdev->pff_local[reg] = 1;
1433 
1434 	for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) {
1435 		reg = ioread32(&pcfg->dsp_pff_inst_id[i]);
1436 		if (reg < SWITCHTEC_MAX_PFF_CSR)
1437 			stdev->pff_local[reg] = 1;
1438 	}
1439 }
1440 
1441 static int switchtec_init_pci(struct switchtec_dev *stdev,
1442 			      struct pci_dev *pdev)
1443 {
1444 	int rc;
1445 
1446 	rc = pcim_enable_device(pdev);
1447 	if (rc)
1448 		return rc;
1449 
1450 	rc = pcim_iomap_regions(pdev, 0x1, KBUILD_MODNAME);
1451 	if (rc)
1452 		return rc;
1453 
1454 	pci_set_master(pdev);
1455 
1456 	stdev->mmio = pcim_iomap_table(pdev)[0];
1457 	stdev->mmio_mrpc = stdev->mmio + SWITCHTEC_GAS_MRPC_OFFSET;
1458 	stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET;
1459 	stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
1460 	stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET;
1461 	stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET;
1462 	stdev->partition = ioread8(&stdev->mmio_sys_info->partition_id);
1463 	stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count);
1464 	stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET;
1465 	stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition];
1466 	stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET;
1467 
1468 	if (stdev->partition_count < 1)
1469 		stdev->partition_count = 1;
1470 
1471 	init_pff(stdev);
1472 
1473 	pci_set_drvdata(pdev, stdev);
1474 
1475 	return 0;
1476 }
1477 
1478 static int switchtec_pci_probe(struct pci_dev *pdev,
1479 			       const struct pci_device_id *id)
1480 {
1481 	struct switchtec_dev *stdev;
1482 	int rc;
1483 
1484 	stdev = stdev_create(pdev);
1485 	if (IS_ERR(stdev))
1486 		return PTR_ERR(stdev);
1487 
1488 	rc = switchtec_init_pci(stdev, pdev);
1489 	if (rc)
1490 		goto err_put;
1491 
1492 	rc = switchtec_init_isr(stdev);
1493 	if (rc) {
1494 		dev_err(&stdev->dev, "failed to init isr.\n");
1495 		goto err_put;
1496 	}
1497 
1498 	iowrite32(SWITCHTEC_EVENT_CLEAR |
1499 		  SWITCHTEC_EVENT_EN_IRQ,
1500 		  &stdev->mmio_part_cfg->mrpc_comp_hdr);
1501 
1502 	rc = cdev_device_add(&stdev->cdev, &stdev->dev);
1503 	if (rc)
1504 		goto err_devadd;
1505 
1506 	dev_info(&stdev->dev, "Management device registered.\n");
1507 
1508 	return 0;
1509 
1510 err_devadd:
1511 	stdev_kill(stdev);
1512 err_put:
1513 	ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
1514 	put_device(&stdev->dev);
1515 	return rc;
1516 }
1517 
1518 static void switchtec_pci_remove(struct pci_dev *pdev)
1519 {
1520 	struct switchtec_dev *stdev = pci_get_drvdata(pdev);
1521 
1522 	pci_set_drvdata(pdev, NULL);
1523 
1524 	cdev_device_del(&stdev->cdev, &stdev->dev);
1525 	ida_simple_remove(&switchtec_minor_ida, MINOR(stdev->dev.devt));
1526 	dev_info(&stdev->dev, "unregistered.\n");
1527 
1528 	stdev_kill(stdev);
1529 	put_device(&stdev->dev);
1530 }
1531 
1532 #define SWITCHTEC_PCI_DEVICE(device_id) \
1533 	{ \
1534 		.vendor     = MICROSEMI_VENDOR_ID, \
1535 		.device     = device_id, \
1536 		.subvendor  = PCI_ANY_ID, \
1537 		.subdevice  = PCI_ANY_ID, \
1538 		.class      = MICROSEMI_MGMT_CLASSCODE, \
1539 		.class_mask = 0xFFFFFFFF, \
1540 	}, \
1541 	{ \
1542 		.vendor     = MICROSEMI_VENDOR_ID, \
1543 		.device     = device_id, \
1544 		.subvendor  = PCI_ANY_ID, \
1545 		.subdevice  = PCI_ANY_ID, \
1546 		.class      = MICROSEMI_NTB_CLASSCODE, \
1547 		.class_mask = 0xFFFFFFFF, \
1548 	}
1549 
1550 static const struct pci_device_id switchtec_pci_tbl[] = {
1551 	SWITCHTEC_PCI_DEVICE(0x8531),  //PFX 24xG3
1552 	SWITCHTEC_PCI_DEVICE(0x8532),  //PFX 32xG3
1553 	SWITCHTEC_PCI_DEVICE(0x8533),  //PFX 48xG3
1554 	SWITCHTEC_PCI_DEVICE(0x8534),  //PFX 64xG3
1555 	SWITCHTEC_PCI_DEVICE(0x8535),  //PFX 80xG3
1556 	SWITCHTEC_PCI_DEVICE(0x8536),  //PFX 96xG3
1557 	SWITCHTEC_PCI_DEVICE(0x8543),  //PSX 48xG3
1558 	SWITCHTEC_PCI_DEVICE(0x8544),  //PSX 64xG3
1559 	SWITCHTEC_PCI_DEVICE(0x8545),  //PSX 80xG3
1560 	SWITCHTEC_PCI_DEVICE(0x8546),  //PSX 96xG3
1561 	SWITCHTEC_PCI_DEVICE(0x8551),  //PAX 24XG3
1562 	SWITCHTEC_PCI_DEVICE(0x8552),  //PAX 32XG3
1563 	SWITCHTEC_PCI_DEVICE(0x8553),  //PAX 48XG3
1564 	SWITCHTEC_PCI_DEVICE(0x8554),  //PAX 64XG3
1565 	SWITCHTEC_PCI_DEVICE(0x8555),  //PAX 80XG3
1566 	SWITCHTEC_PCI_DEVICE(0x8556),  //PAX 96XG3
1567 	SWITCHTEC_PCI_DEVICE(0x8561),  //PFXL 24XG3
1568 	SWITCHTEC_PCI_DEVICE(0x8562),  //PFXL 32XG3
1569 	SWITCHTEC_PCI_DEVICE(0x8563),  //PFXL 48XG3
1570 	SWITCHTEC_PCI_DEVICE(0x8564),  //PFXL 64XG3
1571 	SWITCHTEC_PCI_DEVICE(0x8565),  //PFXL 80XG3
1572 	SWITCHTEC_PCI_DEVICE(0x8566),  //PFXL 96XG3
1573 	SWITCHTEC_PCI_DEVICE(0x8571),  //PFXI 24XG3
1574 	SWITCHTEC_PCI_DEVICE(0x8572),  //PFXI 32XG3
1575 	SWITCHTEC_PCI_DEVICE(0x8573),  //PFXI 48XG3
1576 	SWITCHTEC_PCI_DEVICE(0x8574),  //PFXI 64XG3
1577 	SWITCHTEC_PCI_DEVICE(0x8575),  //PFXI 80XG3
1578 	SWITCHTEC_PCI_DEVICE(0x8576),  //PFXI 96XG3
1579 	{0}
1580 };
1581 MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
1582 
1583 static struct pci_driver switchtec_pci_driver = {
1584 	.name		= KBUILD_MODNAME,
1585 	.id_table	= switchtec_pci_tbl,
1586 	.probe		= switchtec_pci_probe,
1587 	.remove		= switchtec_pci_remove,
1588 };
1589 
1590 static int __init switchtec_init(void)
1591 {
1592 	int rc;
1593 
1594 	rc = alloc_chrdev_region(&switchtec_devt, 0, max_devices,
1595 				 "switchtec");
1596 	if (rc)
1597 		return rc;
1598 
1599 	switchtec_class = class_create(THIS_MODULE, "switchtec");
1600 	if (IS_ERR(switchtec_class)) {
1601 		rc = PTR_ERR(switchtec_class);
1602 		goto err_create_class;
1603 	}
1604 
1605 	rc = pci_register_driver(&switchtec_pci_driver);
1606 	if (rc)
1607 		goto err_pci_register;
1608 
1609 	pr_info(KBUILD_MODNAME ": loaded.\n");
1610 
1611 	return 0;
1612 
1613 err_pci_register:
1614 	class_destroy(switchtec_class);
1615 
1616 err_create_class:
1617 	unregister_chrdev_region(switchtec_devt, max_devices);
1618 
1619 	return rc;
1620 }
1621 module_init(switchtec_init);
1622 
1623 static void __exit switchtec_exit(void)
1624 {
1625 	pci_unregister_driver(&switchtec_pci_driver);
1626 	class_destroy(switchtec_class);
1627 	unregister_chrdev_region(switchtec_devt, max_devices);
1628 	ida_destroy(&switchtec_minor_ida);
1629 
1630 	pr_info(KBUILD_MODNAME ": unloaded.\n");
1631 }
1632 module_exit(switchtec_exit);
1633