xref: /openbmc/linux/drivers/platform/mellanox/mlxreg-hotplug.c (revision 45cc842d5b75ba8f9a958f2dd12b95c6dd0452bd)
1 /*
2  * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved.
3  * Copyright (c) 2016-2018 Vadim Pasternak <vadimp@mellanox.com>
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. Neither the names of the copyright holders nor the names of its
14  *    contributors may be used to endorse or promote products derived from
15  *    this software without specific prior written permission.
16  *
17  * Alternatively, this software may be distributed under the terms of the
18  * GNU General Public License ("GPL") version 2 as published by the Free
19  * Software Foundation.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <linux/bitops.h>
35 #include <linux/device.h>
36 #include <linux/hwmon.h>
37 #include <linux/hwmon-sysfs.h>
38 #include <linux/i2c.h>
39 #include <linux/interrupt.h>
40 #include <linux/module.h>
41 #include <linux/of_device.h>
42 #include <linux/platform_data/mlxreg.h>
43 #include <linux/platform_device.h>
44 #include <linux/spinlock.h>
45 #include <linux/regmap.h>
46 #include <linux/workqueue.h>
47 
48 /* Offset of event and mask registers from status register. */
49 #define MLXREG_HOTPLUG_EVENT_OFF	1
50 #define MLXREG_HOTPLUG_MASK_OFF		2
51 #define MLXREG_HOTPLUG_AGGR_MASK_OFF	1
52 
53 /* ASIC health parameters. */
54 #define MLXREG_HOTPLUG_HEALTH_MASK	0x02
55 #define MLXREG_HOTPLUG_RST_CNTR		3
56 
57 #define MLXREG_HOTPLUG_ATTRS_MAX	24
58 
59 /**
60  * struct mlxreg_hotplug_priv_data - platform private data:
61  * @irq: platform device interrupt number;
62  * @pdev: platform device;
63  * @plat: platform data;
64  * @dwork: delayed work template;
65  * @lock: spin lock;
66  * @hwmon: hwmon device;
67  * @mlxreg_hotplug_attr: sysfs attributes array;
68  * @mlxreg_hotplug_dev_attr: sysfs sensor device attribute array;
69  * @group: sysfs attribute group;
70  * @groups: list of sysfs attribute group for hwmon registration;
71  * @cell: location of top aggregation interrupt register;
72  * @mask: top aggregation interrupt common mask;
73  * @aggr_cache: last value of aggregation register status;
74  */
75 struct mlxreg_hotplug_priv_data {
76 	int irq;
77 	struct device *dev;
78 	struct platform_device *pdev;
79 	struct mlxreg_hotplug_platform_data *plat;
80 	struct regmap *regmap;
81 	struct delayed_work dwork_irq;
82 	struct delayed_work dwork;
83 	spinlock_t lock; /* sync with interrupt */
84 	struct device *hwmon;
85 	struct attribute *mlxreg_hotplug_attr[MLXREG_HOTPLUG_ATTRS_MAX + 1];
86 	struct sensor_device_attribute_2
87 			mlxreg_hotplug_dev_attr[MLXREG_HOTPLUG_ATTRS_MAX];
88 	struct attribute_group group;
89 	const struct attribute_group *groups[2];
90 	u32 cell;
91 	u32 mask;
92 	u32 aggr_cache;
93 	bool after_probe;
94 };
95 
96 static int mlxreg_hotplug_device_create(struct device *dev,
97 					struct mlxreg_core_data *data)
98 {
99 	/*
100 	 * Return if adapter number is negative. It could be in case hotplug
101 	 * event is not associated with hotplug device.
102 	 */
103 	if (data->hpdev.nr < 0)
104 		return 0;
105 
106 	data->hpdev.adapter = i2c_get_adapter(data->hpdev.nr);
107 	if (!data->hpdev.adapter) {
108 		dev_err(dev, "Failed to get adapter for bus %d\n",
109 			data->hpdev.nr);
110 		return -EFAULT;
111 	}
112 
113 	data->hpdev.client = i2c_new_device(data->hpdev.adapter,
114 					    data->hpdev.brdinfo);
115 	if (!data->hpdev.client) {
116 		dev_err(dev, "Failed to create client %s at bus %d at addr 0x%02x\n",
117 			data->hpdev.brdinfo->type, data->hpdev.nr,
118 			data->hpdev.brdinfo->addr);
119 
120 		i2c_put_adapter(data->hpdev.adapter);
121 		data->hpdev.adapter = NULL;
122 		return -EFAULT;
123 	}
124 
125 	return 0;
126 }
127 
128 static void mlxreg_hotplug_device_destroy(struct mlxreg_core_data *data)
129 {
130 	if (data->hpdev.client) {
131 		i2c_unregister_device(data->hpdev.client);
132 		data->hpdev.client = NULL;
133 	}
134 
135 	if (data->hpdev.adapter) {
136 		i2c_put_adapter(data->hpdev.adapter);
137 		data->hpdev.adapter = NULL;
138 	}
139 }
140 
141 static ssize_t mlxreg_hotplug_attr_show(struct device *dev,
142 					struct device_attribute *attr,
143 					char *buf)
144 {
145 	struct mlxreg_hotplug_priv_data *priv = dev_get_drvdata(dev);
146 	struct mlxreg_core_hotplug_platform_data *pdata;
147 	int index = to_sensor_dev_attr_2(attr)->index;
148 	int nr = to_sensor_dev_attr_2(attr)->nr;
149 	struct mlxreg_core_item *item;
150 	struct mlxreg_core_data *data;
151 	u32 regval;
152 	int ret;
153 
154 	pdata = dev_get_platdata(&priv->pdev->dev);
155 	item = pdata->items + nr;
156 	data = item->data + index;
157 
158 	ret = regmap_read(priv->regmap, data->reg, &regval);
159 	if (ret)
160 		return ret;
161 
162 	if (item->health) {
163 		regval &= data->mask;
164 	} else {
165 		/* Bit = 0 : functional if item->inversed is true. */
166 		if (item->inversed)
167 			regval = !(regval & data->mask);
168 		else
169 			regval = !!(regval & data->mask);
170 	}
171 
172 	return sprintf(buf, "%u\n", regval);
173 }
174 
175 #define PRIV_ATTR(i) priv->mlxreg_hotplug_attr[i]
176 #define PRIV_DEV_ATTR(i) priv->mlxreg_hotplug_dev_attr[i]
177 
178 static int mlxreg_hotplug_attr_init(struct mlxreg_hotplug_priv_data *priv)
179 {
180 	struct mlxreg_core_hotplug_platform_data *pdata;
181 	struct mlxreg_core_item *item;
182 	struct mlxreg_core_data *data;
183 	int num_attrs = 0, id = 0, i, j;
184 
185 	pdata = dev_get_platdata(&priv->pdev->dev);
186 	item = pdata->items;
187 
188 	/* Go over all kinds of items - psu, pwr, fan. */
189 	for (i = 0; i < pdata->counter; i++, item++) {
190 		num_attrs += item->count;
191 		data = item->data;
192 		/* Go over all units within the item. */
193 		for (j = 0; j < item->count; j++, data++, id++) {
194 			PRIV_ATTR(id) = &PRIV_DEV_ATTR(id).dev_attr.attr;
195 			PRIV_ATTR(id)->name = devm_kasprintf(&priv->pdev->dev,
196 							     GFP_KERNEL,
197 							     data->label);
198 
199 			if (!PRIV_ATTR(id)->name) {
200 				dev_err(priv->dev, "Memory allocation failed for attr %d.\n",
201 					id);
202 				return -ENOMEM;
203 			}
204 
205 			PRIV_DEV_ATTR(id).dev_attr.attr.name =
206 							PRIV_ATTR(id)->name;
207 			PRIV_DEV_ATTR(id).dev_attr.attr.mode = 0444;
208 			PRIV_DEV_ATTR(id).dev_attr.show =
209 						mlxreg_hotplug_attr_show;
210 			PRIV_DEV_ATTR(id).nr = i;
211 			PRIV_DEV_ATTR(id).index = j;
212 			sysfs_attr_init(&PRIV_DEV_ATTR(id).dev_attr.attr);
213 		}
214 	}
215 
216 	priv->group.attrs = devm_kzalloc(&priv->pdev->dev, num_attrs *
217 					 sizeof(struct attribute *),
218 					 GFP_KERNEL);
219 	if (!priv->group.attrs)
220 		return -ENOMEM;
221 
222 	priv->group.attrs = priv->mlxreg_hotplug_attr;
223 	priv->groups[0] = &priv->group;
224 	priv->groups[1] = NULL;
225 
226 	return 0;
227 }
228 
229 static void
230 mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv,
231 			   struct mlxreg_core_item *item)
232 {
233 	struct mlxreg_core_data *data;
234 	u32 asserted, regval, bit;
235 	int ret;
236 
237 	/*
238 	 * Validate if item related to received signal type is valid.
239 	 * It should never happen, excepted the situation when some
240 	 * piece of hardware is broken. In such situation just produce
241 	 * error message and return. Caller must continue to handle the
242 	 * signals from other devices if any.
243 	 */
244 	if (unlikely(!item)) {
245 		dev_err(priv->dev, "False signal: at offset:mask 0x%02x:0x%02x.\n",
246 			item->reg, item->mask);
247 
248 		return;
249 	}
250 
251 	/* Mask event. */
252 	ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF,
253 			   0);
254 	if (ret)
255 		goto out;
256 
257 	/* Read status. */
258 	ret = regmap_read(priv->regmap, item->reg, &regval);
259 	if (ret)
260 		goto out;
261 
262 	/* Set asserted bits and save last status. */
263 	regval &= item->mask;
264 	asserted = item->cache ^ regval;
265 	item->cache = regval;
266 
267 	for_each_set_bit(bit, (unsigned long *)&asserted, 8) {
268 		data = item->data + bit;
269 		if (regval & BIT(bit)) {
270 			if (item->inversed)
271 				mlxreg_hotplug_device_destroy(data);
272 			else
273 				mlxreg_hotplug_device_create(priv->dev, data);
274 		} else {
275 			if (item->inversed)
276 				mlxreg_hotplug_device_create(priv->dev, data);
277 			else
278 				mlxreg_hotplug_device_destroy(data);
279 		}
280 	}
281 
282 	/* Acknowledge event. */
283 	ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_EVENT_OFF,
284 			   0);
285 	if (ret)
286 		goto out;
287 
288 	/* Unmask event. */
289 	ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF,
290 			   item->mask);
291 
292  out:
293 	if (ret)
294 		dev_err(priv->dev, "Failed to complete workqueue.\n");
295 }
296 
297 static void
298 mlxreg_hotplug_health_work_helper(struct mlxreg_hotplug_priv_data *priv,
299 				  struct mlxreg_core_item *item)
300 {
301 	struct mlxreg_core_data *data = item->data;
302 	u32 regval;
303 	int i, ret = 0;
304 
305 	for (i = 0; i < item->count; i++, data++) {
306 		/* Mask event. */
307 		ret = regmap_write(priv->regmap, data->reg +
308 				   MLXREG_HOTPLUG_MASK_OFF, 0);
309 		if (ret)
310 			goto out;
311 
312 		/* Read status. */
313 		ret = regmap_read(priv->regmap, data->reg, &regval);
314 		if (ret)
315 			goto out;
316 
317 		regval &= data->mask;
318 		item->cache = regval;
319 		if (regval == MLXREG_HOTPLUG_HEALTH_MASK) {
320 			if ((data->health_cntr++ == MLXREG_HOTPLUG_RST_CNTR) ||
321 			    !priv->after_probe) {
322 				mlxreg_hotplug_device_create(priv->dev, data);
323 				data->attached = true;
324 			}
325 		} else {
326 			if (data->attached) {
327 				mlxreg_hotplug_device_destroy(data);
328 				data->attached = false;
329 				data->health_cntr = 0;
330 			}
331 		}
332 
333 		/* Acknowledge event. */
334 		ret = regmap_write(priv->regmap, data->reg +
335 				   MLXREG_HOTPLUG_EVENT_OFF, 0);
336 		if (ret)
337 			goto out;
338 
339 		/* Unmask event. */
340 		ret = regmap_write(priv->regmap, data->reg +
341 				   MLXREG_HOTPLUG_MASK_OFF, data->mask);
342 		if (ret)
343 			goto out;
344 	}
345 
346  out:
347 	if (ret)
348 		dev_err(priv->dev, "Failed to complete workqueue.\n");
349 }
350 
351 /*
352  * mlxreg_hotplug_work_handler - performs traversing of device interrupt
353  * registers according to the below hierarchy schema:
354  *
355  *				Aggregation registers (status/mask)
356  * PSU registers:		*---*
357  * *-----------------*		|   |
358  * |status/event/mask|----->    | * |
359  * *-----------------*		|   |
360  * Power registers:		|   |
361  * *-----------------*		|   |
362  * |status/event/mask|----->    | * |
363  * *-----------------*		|   |
364  * FAN registers:		|   |--> CPU
365  * *-----------------*		|   |
366  * |status/event/mask|----->    | * |
367  * *-----------------*		|   |
368  * ASIC registers:		|   |
369  * *-----------------*		|   |
370  * |status/event/mask|----->    | * |
371  * *-----------------*		|   |
372  *				*---*
373  *
374  * In case some system changed are detected: FAN in/out, PSU in/out, power
375  * cable attached/detached, ASIC health good/bad, relevant device is created
376  * or destroyed.
377  */
378 static void mlxreg_hotplug_work_handler(struct work_struct *work)
379 {
380 	struct mlxreg_core_hotplug_platform_data *pdata;
381 	struct mlxreg_hotplug_priv_data *priv;
382 	struct mlxreg_core_item *item;
383 	u32 regval, aggr_asserted;
384 	unsigned long flags;
385 	int i, ret;
386 
387 	priv = container_of(work, struct mlxreg_hotplug_priv_data,
388 			    dwork_irq.work);
389 	pdata = dev_get_platdata(&priv->pdev->dev);
390 	item = pdata->items;
391 
392 	/* Mask aggregation event. */
393 	ret = regmap_write(priv->regmap, pdata->cell +
394 			   MLXREG_HOTPLUG_AGGR_MASK_OFF, 0);
395 	if (ret < 0)
396 		goto out;
397 
398 	/* Read aggregation status. */
399 	ret = regmap_read(priv->regmap, pdata->cell, &regval);
400 	if (ret)
401 		goto out;
402 
403 	regval &= pdata->mask;
404 	aggr_asserted = priv->aggr_cache ^ regval;
405 	priv->aggr_cache = regval;
406 
407 	/* Handle topology and health configuration changes. */
408 	for (i = 0; i < pdata->counter; i++, item++) {
409 		if (aggr_asserted & item->aggr_mask) {
410 			if (item->health)
411 				mlxreg_hotplug_health_work_helper(priv, item);
412 			else
413 				mlxreg_hotplug_work_helper(priv, item);
414 		}
415 	}
416 
417 	if (aggr_asserted) {
418 		spin_lock_irqsave(&priv->lock, flags);
419 
420 		/*
421 		 * It is possible, that some signals have been inserted, while
422 		 * interrupt has been masked by mlxreg_hotplug_work_handler.
423 		 * In this case such signals will be missed. In order to handle
424 		 * these signals delayed work is canceled and work task
425 		 * re-scheduled for immediate execution. It allows to handle
426 		 * missed signals, if any. In other case work handler just
427 		 * validates that no new signals have been received during
428 		 * masking.
429 		 */
430 		cancel_delayed_work(&priv->dwork_irq);
431 		schedule_delayed_work(&priv->dwork_irq, 0);
432 
433 		spin_unlock_irqrestore(&priv->lock, flags);
434 
435 		return;
436 	}
437 
438 	/* Unmask aggregation event (no need acknowledge). */
439 	ret = regmap_write(priv->regmap, pdata->cell +
440 			   MLXREG_HOTPLUG_AGGR_MASK_OFF, pdata->mask);
441 
442  out:
443 	if (ret)
444 		dev_err(priv->dev, "Failed to complete workqueue.\n");
445 }
446 
447 static int mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data *priv)
448 {
449 	struct mlxreg_core_hotplug_platform_data *pdata;
450 	struct mlxreg_core_item *item;
451 	int i, ret;
452 
453 	pdata = dev_get_platdata(&priv->pdev->dev);
454 	item = pdata->items;
455 
456 	for (i = 0; i < pdata->counter; i++, item++) {
457 		/* Clear group presense event. */
458 		ret = regmap_write(priv->regmap, item->reg +
459 				   MLXREG_HOTPLUG_EVENT_OFF, 0);
460 		if (ret)
461 			goto out;
462 
463 		/* Set group initial status as mask and unmask group event. */
464 		if (item->inversed) {
465 			item->cache = item->mask;
466 			ret = regmap_write(priv->regmap, item->reg +
467 					   MLXREG_HOTPLUG_MASK_OFF,
468 					   item->mask);
469 			if (ret)
470 				goto out;
471 		}
472 	}
473 
474 	/* Keep aggregation initial status as zero and unmask events. */
475 	ret = regmap_write(priv->regmap, pdata->cell +
476 			   MLXREG_HOTPLUG_AGGR_MASK_OFF, pdata->mask);
477 	if (ret)
478 		goto out;
479 
480 	/* Keep low aggregation initial status as zero and unmask events. */
481 	if (pdata->cell_low) {
482 		ret = regmap_write(priv->regmap, pdata->cell_low +
483 				   MLXREG_HOTPLUG_AGGR_MASK_OFF,
484 				   pdata->mask_low);
485 		if (ret)
486 			goto out;
487 	}
488 
489 	/* Invoke work handler for initializing hot plug devices setting. */
490 	mlxreg_hotplug_work_handler(&priv->dwork_irq.work);
491 
492  out:
493 	if (ret)
494 		dev_err(priv->dev, "Failed to set interrupts.\n");
495 	enable_irq(priv->irq);
496 	return ret;
497 }
498 
499 static void mlxreg_hotplug_unset_irq(struct mlxreg_hotplug_priv_data *priv)
500 {
501 	struct mlxreg_core_hotplug_platform_data *pdata;
502 	struct mlxreg_core_item *item;
503 	struct mlxreg_core_data *data;
504 	int count, i, j;
505 
506 	pdata = dev_get_platdata(&priv->pdev->dev);
507 	item = pdata->items;
508 	disable_irq(priv->irq);
509 	cancel_delayed_work_sync(&priv->dwork_irq);
510 
511 	/* Mask low aggregation event, if defined. */
512 	if (pdata->cell_low)
513 		regmap_write(priv->regmap, pdata->cell_low +
514 			     MLXREG_HOTPLUG_AGGR_MASK_OFF, 0);
515 
516 	/* Mask aggregation event. */
517 	regmap_write(priv->regmap, pdata->cell + MLXREG_HOTPLUG_AGGR_MASK_OFF,
518 		     0);
519 
520 	/* Clear topology configurations. */
521 	for (i = 0; i < pdata->counter; i++, item++) {
522 		data = item->data;
523 		/* Mask group presense event. */
524 		regmap_write(priv->regmap, data->reg + MLXREG_HOTPLUG_MASK_OFF,
525 			     0);
526 		/* Clear group presense event. */
527 		regmap_write(priv->regmap, data->reg +
528 			     MLXREG_HOTPLUG_EVENT_OFF, 0);
529 
530 		/* Remove all the attached devices in group. */
531 		count = item->count;
532 		for (j = 0; j < count; j++, data++)
533 			mlxreg_hotplug_device_destroy(data);
534 	}
535 }
536 
537 static irqreturn_t mlxreg_hotplug_irq_handler(int irq, void *dev)
538 {
539 	struct mlxreg_hotplug_priv_data *priv;
540 
541 	priv = (struct mlxreg_hotplug_priv_data *)dev;
542 
543 	/* Schedule work task for immediate execution.*/
544 	schedule_delayed_work(&priv->dwork_irq, 0);
545 
546 	return IRQ_HANDLED;
547 }
548 
549 static int mlxreg_hotplug_probe(struct platform_device *pdev)
550 {
551 	struct mlxreg_core_hotplug_platform_data *pdata;
552 	struct mlxreg_hotplug_priv_data *priv;
553 	int err;
554 
555 	pdata = dev_get_platdata(&pdev->dev);
556 	if (!pdata) {
557 		dev_err(&pdev->dev, "Failed to get platform data.\n");
558 		return -EINVAL;
559 	}
560 
561 	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
562 	if (!priv)
563 		return -ENOMEM;
564 
565 	if (pdata->irq) {
566 		priv->irq = pdata->irq;
567 	} else {
568 		priv->irq = platform_get_irq(pdev, 0);
569 		if (priv->irq < 0) {
570 			dev_err(&pdev->dev, "Failed to get platform irq: %d\n",
571 				priv->irq);
572 			return priv->irq;
573 		}
574 	}
575 
576 	priv->regmap = pdata->regmap;
577 	priv->dev = pdev->dev.parent;
578 	priv->pdev = pdev;
579 
580 	err = devm_request_irq(&pdev->dev, priv->irq,
581 			       mlxreg_hotplug_irq_handler, IRQF_TRIGGER_FALLING
582 			       | IRQF_SHARED, "mlxreg-hotplug", priv);
583 	if (err) {
584 		dev_err(&pdev->dev, "Failed to request irq: %d\n", err);
585 		return err;
586 	}
587 
588 	disable_irq(priv->irq);
589 	spin_lock_init(&priv->lock);
590 	INIT_DELAYED_WORK(&priv->dwork_irq, mlxreg_hotplug_work_handler);
591 	/* Perform initial interrupts setup. */
592 	mlxreg_hotplug_set_irq(priv);
593 
594 	priv->after_probe = true;
595 	dev_set_drvdata(&pdev->dev, priv);
596 
597 	err = mlxreg_hotplug_attr_init(priv);
598 	if (err) {
599 		dev_err(&pdev->dev, "Failed to allocate attributes: %d\n",
600 			err);
601 		return err;
602 	}
603 
604 	priv->hwmon = devm_hwmon_device_register_with_groups(&pdev->dev,
605 					"mlxreg_hotplug", priv, priv->groups);
606 	if (IS_ERR(priv->hwmon)) {
607 		dev_err(&pdev->dev, "Failed to register hwmon device %ld\n",
608 			PTR_ERR(priv->hwmon));
609 		return PTR_ERR(priv->hwmon);
610 	}
611 
612 	return 0;
613 }
614 
615 static int mlxreg_hotplug_remove(struct platform_device *pdev)
616 {
617 	struct mlxreg_hotplug_priv_data *priv = dev_get_drvdata(&pdev->dev);
618 
619 	/* Clean interrupts setup. */
620 	mlxreg_hotplug_unset_irq(priv);
621 
622 	return 0;
623 }
624 
625 static struct platform_driver mlxreg_hotplug_driver = {
626 	.driver = {
627 		.name = "mlxreg-hotplug",
628 	},
629 	.probe = mlxreg_hotplug_probe,
630 	.remove = mlxreg_hotplug_remove,
631 };
632 
633 module_platform_driver(mlxreg_hotplug_driver);
634 
635 MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>");
636 MODULE_DESCRIPTION("Mellanox regmap hotplug platform driver");
637 MODULE_LICENSE("Dual BSD/GPL");
638 MODULE_ALIAS("platform:mlxreg-hotplug");
639