1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Mellanox hotplug driver 4 * 5 * Copyright (C) 2016-2020 Mellanox Technologies 6 */ 7 8 #include <linux/bitops.h> 9 #include <linux/device.h> 10 #include <linux/hwmon.h> 11 #include <linux/hwmon-sysfs.h> 12 #include <linux/i2c.h> 13 #include <linux/interrupt.h> 14 #include <linux/module.h> 15 #include <linux/of_device.h> 16 #include <linux/platform_data/mlxreg.h> 17 #include <linux/platform_device.h> 18 #include <linux/spinlock.h> 19 #include <linux/string_helpers.h> 20 #include <linux/regmap.h> 21 #include <linux/workqueue.h> 22 23 /* Offset of event and mask registers from status register. */ 24 #define MLXREG_HOTPLUG_EVENT_OFF 1 25 #define MLXREG_HOTPLUG_MASK_OFF 2 26 #define MLXREG_HOTPLUG_AGGR_MASK_OFF 1 27 28 /* ASIC good health mask. */ 29 #define MLXREG_HOTPLUG_GOOD_HEALTH_MASK 0x02 30 31 #define MLXREG_HOTPLUG_ATTRS_MAX 128 32 #define MLXREG_HOTPLUG_NOT_ASSERT 3 33 34 /** 35 * struct mlxreg_hotplug_priv_data - platform private data: 36 * @irq: platform device interrupt number; 37 * @dev: basic device; 38 * @pdev: platform device; 39 * @plat: platform data; 40 * @regmap: register map handle; 41 * @dwork_irq: delayed work template; 42 * @lock: spin lock; 43 * @hwmon: hwmon device; 44 * @mlxreg_hotplug_attr: sysfs attributes array; 45 * @mlxreg_hotplug_dev_attr: sysfs sensor device attribute array; 46 * @group: sysfs attribute group; 47 * @groups: list of sysfs attribute group for hwmon registration; 48 * @cell: location of top aggregation interrupt register; 49 * @mask: top aggregation interrupt common mask; 50 * @aggr_cache: last value of aggregation register status; 51 * @after_probe: flag indication probing completion; 52 * @not_asserted: number of entries in workqueue with no signal assertion; 53 */ 54 struct mlxreg_hotplug_priv_data { 55 int irq; 56 struct device *dev; 57 struct platform_device *pdev; 58 struct mlxreg_hotplug_platform_data *plat; 59 struct regmap *regmap; 60 struct delayed_work dwork_irq; 61 spinlock_t lock; /* sync with interrupt */ 62 struct device *hwmon; 63 struct attribute *mlxreg_hotplug_attr[MLXREG_HOTPLUG_ATTRS_MAX + 1]; 64 struct sensor_device_attribute_2 65 mlxreg_hotplug_dev_attr[MLXREG_HOTPLUG_ATTRS_MAX]; 66 struct attribute_group group; 67 const struct attribute_group *groups[2]; 68 u32 cell; 69 u32 mask; 70 u32 aggr_cache; 71 bool after_probe; 72 u8 not_asserted; 73 }; 74 75 /* Environment variables array for udev. */ 76 static char *mlxreg_hotplug_udev_envp[] = { NULL, NULL }; 77 78 static int 79 mlxreg_hotplug_udev_event_send(struct kobject *kobj, 80 struct mlxreg_core_data *data, bool action) 81 { 82 char event_str[MLXREG_CORE_LABEL_MAX_SIZE + 2]; 83 char label[MLXREG_CORE_LABEL_MAX_SIZE] = { 0 }; 84 85 mlxreg_hotplug_udev_envp[0] = event_str; 86 string_upper(label, data->label); 87 snprintf(event_str, MLXREG_CORE_LABEL_MAX_SIZE, "%s=%d", label, !!action); 88 89 return kobject_uevent_env(kobj, KOBJ_CHANGE, mlxreg_hotplug_udev_envp); 90 } 91 92 static void 93 mlxreg_hotplug_pdata_export(void *pdata, void *regmap) 94 { 95 struct mlxreg_core_hotplug_platform_data *dev_pdata = pdata; 96 97 /* Export regmap to underlying device. */ 98 dev_pdata->regmap = regmap; 99 } 100 101 static int mlxreg_hotplug_device_create(struct mlxreg_hotplug_priv_data *priv, 102 struct mlxreg_core_data *data, 103 enum mlxreg_hotplug_kind kind) 104 { 105 struct i2c_board_info *brdinfo = data->hpdev.brdinfo; 106 struct mlxreg_core_hotplug_platform_data *pdata; 107 struct i2c_client *client; 108 109 /* Notify user by sending hwmon uevent. */ 110 mlxreg_hotplug_udev_event_send(&priv->hwmon->kobj, data, true); 111 112 /* 113 * Return if adapter number is negative. It could be in case hotplug 114 * event is not associated with hotplug device. 115 */ 116 if (data->hpdev.nr < 0) 117 return 0; 118 119 pdata = dev_get_platdata(&priv->pdev->dev); 120 switch (data->hpdev.action) { 121 case MLXREG_HOTPLUG_DEVICE_DEFAULT_ACTION: 122 data->hpdev.adapter = i2c_get_adapter(data->hpdev.nr + 123 pdata->shift_nr); 124 if (!data->hpdev.adapter) { 125 dev_err(priv->dev, "Failed to get adapter for bus %d\n", 126 data->hpdev.nr + pdata->shift_nr); 127 return -EFAULT; 128 } 129 130 /* Export platform data to underlying device. */ 131 if (brdinfo->platform_data) 132 mlxreg_hotplug_pdata_export(brdinfo->platform_data, pdata->regmap); 133 134 client = i2c_new_client_device(data->hpdev.adapter, 135 brdinfo); 136 if (IS_ERR(client)) { 137 dev_err(priv->dev, "Failed to create client %s at bus %d at addr 0x%02x\n", 138 brdinfo->type, data->hpdev.nr + 139 pdata->shift_nr, brdinfo->addr); 140 141 i2c_put_adapter(data->hpdev.adapter); 142 data->hpdev.adapter = NULL; 143 return PTR_ERR(client); 144 } 145 146 data->hpdev.client = client; 147 break; 148 case MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION: 149 /* Export platform data to underlying device. */ 150 if (data->hpdev.brdinfo && data->hpdev.brdinfo->platform_data) 151 mlxreg_hotplug_pdata_export(data->hpdev.brdinfo->platform_data, 152 pdata->regmap); 153 /* Pass parent hotplug device handle to underlying device. */ 154 data->notifier = data->hpdev.notifier; 155 data->hpdev.pdev = platform_device_register_resndata(&priv->pdev->dev, 156 brdinfo->type, 157 data->hpdev.nr, 158 NULL, 0, data, 159 sizeof(*data)); 160 if (IS_ERR(data->hpdev.pdev)) 161 return PTR_ERR(data->hpdev.pdev); 162 163 break; 164 default: 165 break; 166 } 167 168 if (data->hpdev.notifier && data->hpdev.notifier->user_handler) 169 return data->hpdev.notifier->user_handler(data->hpdev.notifier->handle, kind, 1); 170 171 return 0; 172 } 173 174 static void 175 mlxreg_hotplug_device_destroy(struct mlxreg_hotplug_priv_data *priv, 176 struct mlxreg_core_data *data, 177 enum mlxreg_hotplug_kind kind) 178 { 179 /* Notify user by sending hwmon uevent. */ 180 mlxreg_hotplug_udev_event_send(&priv->hwmon->kobj, data, false); 181 if (data->hpdev.notifier && data->hpdev.notifier->user_handler) 182 data->hpdev.notifier->user_handler(data->hpdev.notifier->handle, kind, 0); 183 184 switch (data->hpdev.action) { 185 case MLXREG_HOTPLUG_DEVICE_DEFAULT_ACTION: 186 if (data->hpdev.client) { 187 i2c_unregister_device(data->hpdev.client); 188 data->hpdev.client = NULL; 189 } 190 191 if (data->hpdev.adapter) { 192 i2c_put_adapter(data->hpdev.adapter); 193 data->hpdev.adapter = NULL; 194 } 195 break; 196 case MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION: 197 if (data->hpdev.pdev) 198 platform_device_unregister(data->hpdev.pdev); 199 break; 200 default: 201 break; 202 } 203 } 204 205 static ssize_t mlxreg_hotplug_attr_show(struct device *dev, 206 struct device_attribute *attr, 207 char *buf) 208 { 209 struct mlxreg_hotplug_priv_data *priv = dev_get_drvdata(dev); 210 struct mlxreg_core_hotplug_platform_data *pdata; 211 int index = to_sensor_dev_attr_2(attr)->index; 212 int nr = to_sensor_dev_attr_2(attr)->nr; 213 struct mlxreg_core_item *item; 214 struct mlxreg_core_data *data; 215 u32 regval; 216 int ret; 217 218 pdata = dev_get_platdata(&priv->pdev->dev); 219 item = pdata->items + nr; 220 data = item->data + index; 221 222 ret = regmap_read(priv->regmap, data->reg, ®val); 223 if (ret) 224 return ret; 225 226 if (item->health) { 227 regval &= data->mask; 228 } else { 229 /* Bit = 0 : functional if item->inversed is true. */ 230 if (item->inversed) 231 regval = !(regval & data->mask); 232 else 233 regval = !!(regval & data->mask); 234 } 235 236 return sprintf(buf, "%u\n", regval); 237 } 238 239 #define PRIV_ATTR(i) priv->mlxreg_hotplug_attr[i] 240 #define PRIV_DEV_ATTR(i) priv->mlxreg_hotplug_dev_attr[i] 241 242 static int mlxreg_hotplug_attr_init(struct mlxreg_hotplug_priv_data *priv) 243 { 244 struct mlxreg_core_hotplug_platform_data *pdata; 245 struct mlxreg_core_item *item; 246 struct mlxreg_core_data *data; 247 unsigned long mask; 248 u32 regval; 249 int num_attrs = 0, id = 0, i, j, k, ret; 250 251 pdata = dev_get_platdata(&priv->pdev->dev); 252 item = pdata->items; 253 254 /* Go over all kinds of items - psu, pwr, fan. */ 255 for (i = 0; i < pdata->counter; i++, item++) { 256 if (item->capability) { 257 /* 258 * Read group capability register to get actual number 259 * of interrupt capable components and set group mask 260 * accordingly. 261 */ 262 ret = regmap_read(priv->regmap, item->capability, 263 ®val); 264 if (ret) 265 return ret; 266 267 item->mask = GENMASK((regval & item->mask) - 1, 0); 268 } 269 270 data = item->data; 271 272 /* Go over all unmasked units within item. */ 273 mask = item->mask; 274 k = 0; 275 for_each_set_bit(j, &mask, item->count) { 276 if (data->capability) { 277 /* 278 * Read capability register and skip non 279 * relevant attributes. 280 */ 281 ret = regmap_read(priv->regmap, 282 data->capability, ®val); 283 if (ret) 284 return ret; 285 if (!(regval & data->bit)) { 286 data++; 287 continue; 288 } 289 } 290 PRIV_ATTR(id) = &PRIV_DEV_ATTR(id).dev_attr.attr; 291 PRIV_ATTR(id)->name = devm_kasprintf(&priv->pdev->dev, 292 GFP_KERNEL, 293 data->label); 294 295 if (!PRIV_ATTR(id)->name) { 296 dev_err(priv->dev, "Memory allocation failed for attr %d.\n", 297 id); 298 return -ENOMEM; 299 } 300 301 PRIV_DEV_ATTR(id).dev_attr.attr.name = 302 PRIV_ATTR(id)->name; 303 PRIV_DEV_ATTR(id).dev_attr.attr.mode = 0444; 304 PRIV_DEV_ATTR(id).dev_attr.show = 305 mlxreg_hotplug_attr_show; 306 PRIV_DEV_ATTR(id).nr = i; 307 PRIV_DEV_ATTR(id).index = k; 308 sysfs_attr_init(&PRIV_DEV_ATTR(id).dev_attr.attr); 309 data++; 310 id++; 311 k++; 312 } 313 num_attrs += k; 314 } 315 316 priv->group.attrs = devm_kcalloc(&priv->pdev->dev, 317 num_attrs, 318 sizeof(struct attribute *), 319 GFP_KERNEL); 320 if (!priv->group.attrs) 321 return -ENOMEM; 322 323 priv->group.attrs = priv->mlxreg_hotplug_attr; 324 priv->groups[0] = &priv->group; 325 priv->groups[1] = NULL; 326 327 return 0; 328 } 329 330 static void 331 mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv, 332 struct mlxreg_core_item *item) 333 { 334 struct mlxreg_core_data *data; 335 unsigned long asserted; 336 u32 regval, bit; 337 int ret; 338 339 /* 340 * Validate if item related to received signal type is valid. 341 * It should never happen, excepted the situation when some 342 * piece of hardware is broken. In such situation just produce 343 * error message and return. Caller must continue to handle the 344 * signals from other devices if any. 345 */ 346 if (unlikely(!item)) { 347 dev_err(priv->dev, "False signal: at offset:mask 0x%02x:0x%02x.\n", 348 item->reg, item->mask); 349 350 return; 351 } 352 353 /* Mask event. */ 354 ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF, 355 0); 356 if (ret) 357 goto out; 358 359 /* Read status. */ 360 ret = regmap_read(priv->regmap, item->reg, ®val); 361 if (ret) 362 goto out; 363 364 /* Set asserted bits and save last status. */ 365 regval &= item->mask; 366 asserted = item->cache ^ regval; 367 item->cache = regval; 368 369 for_each_set_bit(bit, &asserted, 8) { 370 data = item->data + bit; 371 if (regval & BIT(bit)) { 372 if (item->inversed) 373 mlxreg_hotplug_device_destroy(priv, data, item->kind); 374 else 375 mlxreg_hotplug_device_create(priv, data, item->kind); 376 } else { 377 if (item->inversed) 378 mlxreg_hotplug_device_create(priv, data, item->kind); 379 else 380 mlxreg_hotplug_device_destroy(priv, data, item->kind); 381 } 382 } 383 384 /* Acknowledge event. */ 385 ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_EVENT_OFF, 386 0); 387 if (ret) 388 goto out; 389 390 /* Unmask event. */ 391 ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF, 392 item->mask); 393 394 out: 395 if (ret) 396 dev_err(priv->dev, "Failed to complete workqueue.\n"); 397 } 398 399 static void 400 mlxreg_hotplug_health_work_helper(struct mlxreg_hotplug_priv_data *priv, 401 struct mlxreg_core_item *item) 402 { 403 struct mlxreg_core_data *data = item->data; 404 u32 regval; 405 int i, ret = 0; 406 407 for (i = 0; i < item->count; i++, data++) { 408 /* Mask event. */ 409 ret = regmap_write(priv->regmap, data->reg + 410 MLXREG_HOTPLUG_MASK_OFF, 0); 411 if (ret) 412 goto out; 413 414 /* Read status. */ 415 ret = regmap_read(priv->regmap, data->reg, ®val); 416 if (ret) 417 goto out; 418 419 regval &= data->mask; 420 421 if (item->cache == regval) 422 goto ack_event; 423 424 /* 425 * ASIC health indication is provided through two bits. Bits 426 * value 0x2 indicates that ASIC reached the good health, value 427 * 0x0 indicates ASIC the bad health or dormant state and value 428 * 0x3 indicates the booting state. During ASIC reset it should 429 * pass the following states: dormant -> booting -> good. 430 */ 431 if (regval == MLXREG_HOTPLUG_GOOD_HEALTH_MASK) { 432 if (!data->attached) { 433 /* 434 * ASIC is in steady state. Connect associated 435 * device, if configured. 436 */ 437 mlxreg_hotplug_device_create(priv, data, item->kind); 438 data->attached = true; 439 } 440 } else { 441 if (data->attached) { 442 /* 443 * ASIC health is failed after ASIC has been 444 * in steady state. Disconnect associated 445 * device, if it has been connected. 446 */ 447 mlxreg_hotplug_device_destroy(priv, data, item->kind); 448 data->attached = false; 449 data->health_cntr = 0; 450 } 451 } 452 item->cache = regval; 453 ack_event: 454 /* Acknowledge event. */ 455 ret = regmap_write(priv->regmap, data->reg + 456 MLXREG_HOTPLUG_EVENT_OFF, 0); 457 if (ret) 458 goto out; 459 460 /* Unmask event. */ 461 ret = regmap_write(priv->regmap, data->reg + 462 MLXREG_HOTPLUG_MASK_OFF, data->mask); 463 if (ret) 464 goto out; 465 } 466 467 out: 468 if (ret) 469 dev_err(priv->dev, "Failed to complete workqueue.\n"); 470 } 471 472 /* 473 * mlxreg_hotplug_work_handler - performs traversing of device interrupt 474 * registers according to the below hierarchy schema: 475 * 476 * Aggregation registers (status/mask) 477 * PSU registers: *---* 478 * *-----------------* | | 479 * |status/event/mask|-----> | * | 480 * *-----------------* | | 481 * Power registers: | | 482 * *-----------------* | | 483 * |status/event/mask|-----> | * | 484 * *-----------------* | | 485 * FAN registers: | |--> CPU 486 * *-----------------* | | 487 * |status/event/mask|-----> | * | 488 * *-----------------* | | 489 * ASIC registers: | | 490 * *-----------------* | | 491 * |status/event/mask|-----> | * | 492 * *-----------------* | | 493 * *---* 494 * 495 * In case some system changed are detected: FAN in/out, PSU in/out, power 496 * cable attached/detached, ASIC health good/bad, relevant device is created 497 * or destroyed. 498 */ 499 static void mlxreg_hotplug_work_handler(struct work_struct *work) 500 { 501 struct mlxreg_core_hotplug_platform_data *pdata; 502 struct mlxreg_hotplug_priv_data *priv; 503 struct mlxreg_core_item *item; 504 u32 regval, aggr_asserted; 505 unsigned long flags; 506 int i, ret; 507 508 priv = container_of(work, struct mlxreg_hotplug_priv_data, 509 dwork_irq.work); 510 pdata = dev_get_platdata(&priv->pdev->dev); 511 item = pdata->items; 512 513 /* Mask aggregation event. */ 514 ret = regmap_write(priv->regmap, pdata->cell + 515 MLXREG_HOTPLUG_AGGR_MASK_OFF, 0); 516 if (ret < 0) 517 goto out; 518 519 /* Read aggregation status. */ 520 ret = regmap_read(priv->regmap, pdata->cell, ®val); 521 if (ret) 522 goto out; 523 524 regval &= pdata->mask; 525 aggr_asserted = priv->aggr_cache ^ regval; 526 priv->aggr_cache = regval; 527 528 /* 529 * Handler is invoked, but no assertion is detected at top aggregation 530 * status level. Set aggr_asserted to mask value to allow handler extra 531 * run over all relevant signals to recover any missed signal. 532 */ 533 if (priv->not_asserted == MLXREG_HOTPLUG_NOT_ASSERT) { 534 priv->not_asserted = 0; 535 aggr_asserted = pdata->mask; 536 } 537 if (!aggr_asserted) 538 goto unmask_event; 539 540 /* Handle topology and health configuration changes. */ 541 for (i = 0; i < pdata->counter; i++, item++) { 542 if (aggr_asserted & item->aggr_mask) { 543 if (item->health) 544 mlxreg_hotplug_health_work_helper(priv, item); 545 else 546 mlxreg_hotplug_work_helper(priv, item); 547 } 548 } 549 550 spin_lock_irqsave(&priv->lock, flags); 551 552 /* 553 * It is possible, that some signals have been inserted, while 554 * interrupt has been masked by mlxreg_hotplug_work_handler. In this 555 * case such signals will be missed. In order to handle these signals 556 * delayed work is canceled and work task re-scheduled for immediate 557 * execution. It allows to handle missed signals, if any. In other case 558 * work handler just validates that no new signals have been received 559 * during masking. 560 */ 561 cancel_delayed_work(&priv->dwork_irq); 562 schedule_delayed_work(&priv->dwork_irq, 0); 563 564 spin_unlock_irqrestore(&priv->lock, flags); 565 566 return; 567 568 unmask_event: 569 priv->not_asserted++; 570 /* Unmask aggregation event (no need acknowledge). */ 571 ret = regmap_write(priv->regmap, pdata->cell + 572 MLXREG_HOTPLUG_AGGR_MASK_OFF, pdata->mask); 573 574 out: 575 if (ret) 576 dev_err(priv->dev, "Failed to complete workqueue.\n"); 577 } 578 579 static int mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data *priv) 580 { 581 struct mlxreg_core_hotplug_platform_data *pdata; 582 struct mlxreg_core_item *item; 583 struct mlxreg_core_data *data; 584 u32 regval; 585 int i, j, ret; 586 587 pdata = dev_get_platdata(&priv->pdev->dev); 588 item = pdata->items; 589 590 for (i = 0; i < pdata->counter; i++, item++) { 591 /* Clear group presense event. */ 592 ret = regmap_write(priv->regmap, item->reg + 593 MLXREG_HOTPLUG_EVENT_OFF, 0); 594 if (ret) 595 goto out; 596 597 /* 598 * Verify if hardware configuration requires to disable 599 * interrupt capability for some of components. 600 */ 601 data = item->data; 602 for (j = 0; j < item->count; j++, data++) { 603 /* Verify if the attribute has capability register. */ 604 if (data->capability) { 605 /* Read capability register. */ 606 ret = regmap_read(priv->regmap, 607 data->capability, ®val); 608 if (ret) 609 goto out; 610 611 if (!(regval & data->bit)) 612 item->mask &= ~BIT(j); 613 } 614 } 615 616 /* Set group initial status as mask and unmask group event. */ 617 if (item->inversed) { 618 item->cache = item->mask; 619 ret = regmap_write(priv->regmap, item->reg + 620 MLXREG_HOTPLUG_MASK_OFF, 621 item->mask); 622 if (ret) 623 goto out; 624 } 625 } 626 627 /* Keep aggregation initial status as zero and unmask events. */ 628 ret = regmap_write(priv->regmap, pdata->cell + 629 MLXREG_HOTPLUG_AGGR_MASK_OFF, pdata->mask); 630 if (ret) 631 goto out; 632 633 /* Keep low aggregation initial status as zero and unmask events. */ 634 if (pdata->cell_low) { 635 ret = regmap_write(priv->regmap, pdata->cell_low + 636 MLXREG_HOTPLUG_AGGR_MASK_OFF, 637 pdata->mask_low); 638 if (ret) 639 goto out; 640 } 641 642 /* Invoke work handler for initializing hot plug devices setting. */ 643 mlxreg_hotplug_work_handler(&priv->dwork_irq.work); 644 645 out: 646 if (ret) 647 dev_err(priv->dev, "Failed to set interrupts.\n"); 648 enable_irq(priv->irq); 649 return ret; 650 } 651 652 static void mlxreg_hotplug_unset_irq(struct mlxreg_hotplug_priv_data *priv) 653 { 654 struct mlxreg_core_hotplug_platform_data *pdata; 655 struct mlxreg_core_item *item; 656 struct mlxreg_core_data *data; 657 int count, i, j; 658 659 pdata = dev_get_platdata(&priv->pdev->dev); 660 item = pdata->items; 661 disable_irq(priv->irq); 662 cancel_delayed_work_sync(&priv->dwork_irq); 663 664 /* Mask low aggregation event, if defined. */ 665 if (pdata->cell_low) 666 regmap_write(priv->regmap, pdata->cell_low + 667 MLXREG_HOTPLUG_AGGR_MASK_OFF, 0); 668 669 /* Mask aggregation event. */ 670 regmap_write(priv->regmap, pdata->cell + MLXREG_HOTPLUG_AGGR_MASK_OFF, 671 0); 672 673 /* Clear topology configurations. */ 674 for (i = 0; i < pdata->counter; i++, item++) { 675 data = item->data; 676 /* Mask group presense event. */ 677 regmap_write(priv->regmap, data->reg + MLXREG_HOTPLUG_MASK_OFF, 678 0); 679 /* Clear group presense event. */ 680 regmap_write(priv->regmap, data->reg + 681 MLXREG_HOTPLUG_EVENT_OFF, 0); 682 683 /* Remove all the attached devices in group. */ 684 count = item->count; 685 for (j = 0; j < count; j++, data++) 686 mlxreg_hotplug_device_destroy(priv, data, item->kind); 687 } 688 } 689 690 static irqreturn_t mlxreg_hotplug_irq_handler(int irq, void *dev) 691 { 692 struct mlxreg_hotplug_priv_data *priv; 693 694 priv = (struct mlxreg_hotplug_priv_data *)dev; 695 696 /* Schedule work task for immediate execution.*/ 697 schedule_delayed_work(&priv->dwork_irq, 0); 698 699 return IRQ_HANDLED; 700 } 701 702 static int mlxreg_hotplug_probe(struct platform_device *pdev) 703 { 704 struct mlxreg_core_hotplug_platform_data *pdata; 705 struct mlxreg_hotplug_priv_data *priv; 706 struct i2c_adapter *deferred_adap; 707 int err; 708 709 pdata = dev_get_platdata(&pdev->dev); 710 if (!pdata) { 711 dev_err(&pdev->dev, "Failed to get platform data.\n"); 712 return -EINVAL; 713 } 714 715 /* Defer probing if the necessary adapter is not configured yet. */ 716 deferred_adap = i2c_get_adapter(pdata->deferred_nr); 717 if (!deferred_adap) 718 return -EPROBE_DEFER; 719 i2c_put_adapter(deferred_adap); 720 721 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 722 if (!priv) 723 return -ENOMEM; 724 725 if (pdata->irq) { 726 priv->irq = pdata->irq; 727 } else { 728 priv->irq = platform_get_irq(pdev, 0); 729 if (priv->irq < 0) 730 return priv->irq; 731 } 732 733 priv->regmap = pdata->regmap; 734 priv->dev = pdev->dev.parent; 735 priv->pdev = pdev; 736 737 err = devm_request_irq(&pdev->dev, priv->irq, 738 mlxreg_hotplug_irq_handler, IRQF_TRIGGER_FALLING 739 | IRQF_SHARED, "mlxreg-hotplug", priv); 740 if (err) { 741 dev_err(&pdev->dev, "Failed to request irq: %d\n", err); 742 return err; 743 } 744 745 disable_irq(priv->irq); 746 spin_lock_init(&priv->lock); 747 INIT_DELAYED_WORK(&priv->dwork_irq, mlxreg_hotplug_work_handler); 748 dev_set_drvdata(&pdev->dev, priv); 749 750 err = mlxreg_hotplug_attr_init(priv); 751 if (err) { 752 dev_err(&pdev->dev, "Failed to allocate attributes: %d\n", 753 err); 754 return err; 755 } 756 757 priv->hwmon = devm_hwmon_device_register_with_groups(&pdev->dev, 758 "mlxreg_hotplug", priv, priv->groups); 759 if (IS_ERR(priv->hwmon)) { 760 dev_err(&pdev->dev, "Failed to register hwmon device %ld\n", 761 PTR_ERR(priv->hwmon)); 762 return PTR_ERR(priv->hwmon); 763 } 764 765 /* Perform initial interrupts setup. */ 766 mlxreg_hotplug_set_irq(priv); 767 priv->after_probe = true; 768 769 return 0; 770 } 771 772 static int mlxreg_hotplug_remove(struct platform_device *pdev) 773 { 774 struct mlxreg_hotplug_priv_data *priv = dev_get_drvdata(&pdev->dev); 775 776 /* Clean interrupts setup. */ 777 mlxreg_hotplug_unset_irq(priv); 778 devm_free_irq(&pdev->dev, priv->irq, priv); 779 780 return 0; 781 } 782 783 static struct platform_driver mlxreg_hotplug_driver = { 784 .driver = { 785 .name = "mlxreg-hotplug", 786 }, 787 .probe = mlxreg_hotplug_probe, 788 .remove = mlxreg_hotplug_remove, 789 }; 790 791 module_platform_driver(mlxreg_hotplug_driver); 792 793 MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>"); 794 MODULE_DESCRIPTION("Mellanox regmap hotplug platform driver"); 795 MODULE_LICENSE("Dual BSD/GPL"); 796 MODULE_ALIAS("platform:mlxreg-hotplug"); 797