1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Mellanox hotplug driver 4 * 5 * Copyright (C) 2016-2020 Mellanox Technologies 6 */ 7 8 #include <linux/bitops.h> 9 #include <linux/device.h> 10 #include <linux/hwmon.h> 11 #include <linux/hwmon-sysfs.h> 12 #include <linux/i2c.h> 13 #include <linux/interrupt.h> 14 #include <linux/module.h> 15 #include <linux/platform_data/mlxreg.h> 16 #include <linux/platform_device.h> 17 #include <linux/spinlock.h> 18 #include <linux/string_helpers.h> 19 #include <linux/regmap.h> 20 #include <linux/workqueue.h> 21 22 /* Offset of event and mask registers from status register. */ 23 #define MLXREG_HOTPLUG_EVENT_OFF 1 24 #define MLXREG_HOTPLUG_MASK_OFF 2 25 #define MLXREG_HOTPLUG_AGGR_MASK_OFF 1 26 27 /* ASIC good health mask. */ 28 #define MLXREG_HOTPLUG_GOOD_HEALTH_MASK 0x02 29 30 #define MLXREG_HOTPLUG_ATTRS_MAX 128 31 #define MLXREG_HOTPLUG_NOT_ASSERT 3 32 33 /** 34 * struct mlxreg_hotplug_priv_data - platform private data: 35 * @irq: platform device interrupt number; 36 * @dev: basic device; 37 * @pdev: platform device; 38 * @plat: platform data; 39 * @regmap: register map handle; 40 * @dwork_irq: delayed work template; 41 * @lock: spin lock; 42 * @hwmon: hwmon device; 43 * @mlxreg_hotplug_attr: sysfs attributes array; 44 * @mlxreg_hotplug_dev_attr: sysfs sensor device attribute array; 45 * @group: sysfs attribute group; 46 * @groups: list of sysfs attribute group for hwmon registration; 47 * @cell: location of top aggregation interrupt register; 48 * @mask: top aggregation interrupt common mask; 49 * @aggr_cache: last value of aggregation register status; 50 * @after_probe: flag indication probing completion; 51 * @not_asserted: number of entries in workqueue with no signal assertion; 52 */ 53 struct mlxreg_hotplug_priv_data { 54 int irq; 55 struct device *dev; 56 struct platform_device *pdev; 57 struct mlxreg_hotplug_platform_data *plat; 58 struct regmap *regmap; 59 struct delayed_work dwork_irq; 60 spinlock_t lock; /* sync with interrupt */ 61 struct device *hwmon; 62 struct attribute *mlxreg_hotplug_attr[MLXREG_HOTPLUG_ATTRS_MAX + 1]; 63 struct sensor_device_attribute_2 64 mlxreg_hotplug_dev_attr[MLXREG_HOTPLUG_ATTRS_MAX]; 65 struct attribute_group group; 66 const struct attribute_group *groups[2]; 67 u32 cell; 68 u32 mask; 69 u32 aggr_cache; 70 bool after_probe; 71 u8 not_asserted; 72 }; 73 74 /* Environment variables array for udev. */ 75 static char *mlxreg_hotplug_udev_envp[] = { NULL, NULL }; 76 77 static int 78 mlxreg_hotplug_udev_event_send(struct kobject *kobj, 79 struct mlxreg_core_data *data, bool action) 80 { 81 char event_str[MLXREG_CORE_LABEL_MAX_SIZE + 2]; 82 char label[MLXREG_CORE_LABEL_MAX_SIZE] = { 0 }; 83 84 mlxreg_hotplug_udev_envp[0] = event_str; 85 string_upper(label, data->label); 86 snprintf(event_str, MLXREG_CORE_LABEL_MAX_SIZE, "%s=%d", label, !!action); 87 88 return kobject_uevent_env(kobj, KOBJ_CHANGE, mlxreg_hotplug_udev_envp); 89 } 90 91 static void 92 mlxreg_hotplug_pdata_export(void *pdata, void *regmap) 93 { 94 struct mlxreg_core_hotplug_platform_data *dev_pdata = pdata; 95 96 /* Export regmap to underlying device. */ 97 dev_pdata->regmap = regmap; 98 } 99 100 static int mlxreg_hotplug_device_create(struct mlxreg_hotplug_priv_data *priv, 101 struct mlxreg_core_data *data, 102 enum mlxreg_hotplug_kind kind) 103 { 104 struct i2c_board_info *brdinfo = data->hpdev.brdinfo; 105 struct mlxreg_core_hotplug_platform_data *pdata; 106 struct i2c_client *client; 107 108 /* Notify user by sending hwmon uevent. */ 109 mlxreg_hotplug_udev_event_send(&priv->hwmon->kobj, data, true); 110 111 /* 112 * Return if adapter number is negative. It could be in case hotplug 113 * event is not associated with hotplug device. 114 */ 115 if (data->hpdev.nr < 0 && data->hpdev.action != MLXREG_HOTPLUG_DEVICE_NO_ACTION) 116 return 0; 117 118 pdata = dev_get_platdata(&priv->pdev->dev); 119 switch (data->hpdev.action) { 120 case MLXREG_HOTPLUG_DEVICE_DEFAULT_ACTION: 121 data->hpdev.adapter = i2c_get_adapter(data->hpdev.nr + 122 pdata->shift_nr); 123 if (!data->hpdev.adapter) { 124 dev_err(priv->dev, "Failed to get adapter for bus %d\n", 125 data->hpdev.nr + pdata->shift_nr); 126 return -EFAULT; 127 } 128 129 /* Export platform data to underlying device. */ 130 if (brdinfo->platform_data) 131 mlxreg_hotplug_pdata_export(brdinfo->platform_data, pdata->regmap); 132 133 client = i2c_new_client_device(data->hpdev.adapter, 134 brdinfo); 135 if (IS_ERR(client)) { 136 dev_err(priv->dev, "Failed to create client %s at bus %d at addr 0x%02x\n", 137 brdinfo->type, data->hpdev.nr + 138 pdata->shift_nr, brdinfo->addr); 139 140 i2c_put_adapter(data->hpdev.adapter); 141 data->hpdev.adapter = NULL; 142 return PTR_ERR(client); 143 } 144 145 data->hpdev.client = client; 146 break; 147 case MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION: 148 /* Export platform data to underlying device. */ 149 if (data->hpdev.brdinfo && data->hpdev.brdinfo->platform_data) 150 mlxreg_hotplug_pdata_export(data->hpdev.brdinfo->platform_data, 151 pdata->regmap); 152 /* Pass parent hotplug device handle to underlying device. */ 153 data->notifier = data->hpdev.notifier; 154 data->hpdev.pdev = platform_device_register_resndata(&priv->pdev->dev, 155 brdinfo->type, 156 data->hpdev.nr, 157 NULL, 0, data, 158 sizeof(*data)); 159 if (IS_ERR(data->hpdev.pdev)) 160 return PTR_ERR(data->hpdev.pdev); 161 162 break; 163 default: 164 break; 165 } 166 167 if (data->hpdev.notifier && data->hpdev.notifier->user_handler) 168 return data->hpdev.notifier->user_handler(data->hpdev.notifier->handle, kind, 1); 169 170 return 0; 171 } 172 173 static void 174 mlxreg_hotplug_device_destroy(struct mlxreg_hotplug_priv_data *priv, 175 struct mlxreg_core_data *data, 176 enum mlxreg_hotplug_kind kind) 177 { 178 /* Notify user by sending hwmon uevent. */ 179 mlxreg_hotplug_udev_event_send(&priv->hwmon->kobj, data, false); 180 if (data->hpdev.notifier && data->hpdev.notifier->user_handler) 181 data->hpdev.notifier->user_handler(data->hpdev.notifier->handle, kind, 0); 182 183 switch (data->hpdev.action) { 184 case MLXREG_HOTPLUG_DEVICE_DEFAULT_ACTION: 185 if (data->hpdev.client) { 186 i2c_unregister_device(data->hpdev.client); 187 data->hpdev.client = NULL; 188 } 189 190 if (data->hpdev.adapter) { 191 i2c_put_adapter(data->hpdev.adapter); 192 data->hpdev.adapter = NULL; 193 } 194 break; 195 case MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION: 196 if (data->hpdev.pdev) 197 platform_device_unregister(data->hpdev.pdev); 198 break; 199 default: 200 break; 201 } 202 } 203 204 static ssize_t mlxreg_hotplug_attr_show(struct device *dev, 205 struct device_attribute *attr, 206 char *buf) 207 { 208 struct mlxreg_hotplug_priv_data *priv = dev_get_drvdata(dev); 209 struct mlxreg_core_hotplug_platform_data *pdata; 210 int index = to_sensor_dev_attr_2(attr)->index; 211 int nr = to_sensor_dev_attr_2(attr)->nr; 212 struct mlxreg_core_item *item; 213 struct mlxreg_core_data *data; 214 u32 regval; 215 int ret; 216 217 pdata = dev_get_platdata(&priv->pdev->dev); 218 item = pdata->items + nr; 219 data = item->data + index; 220 221 ret = regmap_read(priv->regmap, data->reg, ®val); 222 if (ret) 223 return ret; 224 225 if (item->health) { 226 regval &= data->mask; 227 } else { 228 /* Bit = 0 : functional if item->inversed is true. */ 229 if (item->inversed) 230 regval = !(regval & data->mask); 231 else 232 regval = !!(regval & data->mask); 233 } 234 235 return sprintf(buf, "%u\n", regval); 236 } 237 238 #define PRIV_ATTR(i) priv->mlxreg_hotplug_attr[i] 239 #define PRIV_DEV_ATTR(i) priv->mlxreg_hotplug_dev_attr[i] 240 241 static int mlxreg_hotplug_item_label_index_get(u32 mask, u32 bit) 242 { 243 int i, j; 244 245 for (i = 0, j = -1; i <= bit; i++) { 246 if (mask & BIT(i)) 247 j++; 248 } 249 return j; 250 } 251 252 static int mlxreg_hotplug_attr_init(struct mlxreg_hotplug_priv_data *priv) 253 { 254 struct mlxreg_core_hotplug_platform_data *pdata; 255 struct mlxreg_core_item *item; 256 struct mlxreg_core_data *data; 257 unsigned long mask; 258 u32 regval; 259 int num_attrs = 0, id = 0, i, j, k, count, ret; 260 261 pdata = dev_get_platdata(&priv->pdev->dev); 262 item = pdata->items; 263 264 /* Go over all kinds of items - psu, pwr, fan. */ 265 for (i = 0; i < pdata->counter; i++, item++) { 266 if (item->capability) { 267 /* 268 * Read group capability register to get actual number 269 * of interrupt capable components and set group mask 270 * accordingly. 271 */ 272 ret = regmap_read(priv->regmap, item->capability, 273 ®val); 274 if (ret) 275 return ret; 276 277 item->mask = GENMASK((regval & item->mask) - 1, 0); 278 } 279 280 data = item->data; 281 282 /* Go over all unmasked units within item. */ 283 mask = item->mask; 284 k = 0; 285 count = item->ind ? item->ind : item->count; 286 for_each_set_bit(j, &mask, count) { 287 if (data->capability) { 288 /* 289 * Read capability register and skip non 290 * relevant attributes. 291 */ 292 ret = regmap_read(priv->regmap, 293 data->capability, ®val); 294 if (ret) 295 return ret; 296 297 if (!(regval & data->bit)) { 298 data++; 299 continue; 300 } 301 } 302 303 PRIV_ATTR(id) = &PRIV_DEV_ATTR(id).dev_attr.attr; 304 PRIV_ATTR(id)->name = devm_kasprintf(&priv->pdev->dev, 305 GFP_KERNEL, 306 data->label); 307 if (!PRIV_ATTR(id)->name) { 308 dev_err(priv->dev, "Memory allocation failed for attr %d.\n", 309 id); 310 return -ENOMEM; 311 } 312 313 PRIV_DEV_ATTR(id).dev_attr.attr.name = 314 PRIV_ATTR(id)->name; 315 PRIV_DEV_ATTR(id).dev_attr.attr.mode = 0444; 316 PRIV_DEV_ATTR(id).dev_attr.show = 317 mlxreg_hotplug_attr_show; 318 PRIV_DEV_ATTR(id).nr = i; 319 PRIV_DEV_ATTR(id).index = k; 320 sysfs_attr_init(&PRIV_DEV_ATTR(id).dev_attr.attr); 321 data++; 322 id++; 323 k++; 324 } 325 num_attrs += k; 326 } 327 328 priv->group.attrs = devm_kcalloc(&priv->pdev->dev, 329 num_attrs, 330 sizeof(struct attribute *), 331 GFP_KERNEL); 332 if (!priv->group.attrs) 333 return -ENOMEM; 334 335 priv->group.attrs = priv->mlxreg_hotplug_attr; 336 priv->groups[0] = &priv->group; 337 priv->groups[1] = NULL; 338 339 return 0; 340 } 341 342 static void 343 mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv, 344 struct mlxreg_core_item *item) 345 { 346 struct mlxreg_core_data *data; 347 unsigned long asserted; 348 u32 regval, bit; 349 int ret; 350 351 /* 352 * Validate if item related to received signal type is valid. 353 * It should never happen, excepted the situation when some 354 * piece of hardware is broken. In such situation just produce 355 * error message and return. Caller must continue to handle the 356 * signals from other devices if any. 357 */ 358 if (unlikely(!item)) { 359 dev_err(priv->dev, "False signal: at offset:mask 0x%02x:0x%02x.\n", 360 item->reg, item->mask); 361 362 return; 363 } 364 365 /* Mask event. */ 366 ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF, 367 0); 368 if (ret) 369 goto out; 370 371 /* Read status. */ 372 ret = regmap_read(priv->regmap, item->reg, ®val); 373 if (ret) 374 goto out; 375 376 /* Set asserted bits and save last status. */ 377 regval &= item->mask; 378 asserted = item->cache ^ regval; 379 item->cache = regval; 380 for_each_set_bit(bit, &asserted, 8) { 381 int pos; 382 383 pos = mlxreg_hotplug_item_label_index_get(item->mask, bit); 384 if (pos < 0) 385 goto out; 386 387 data = item->data + pos; 388 if (regval & BIT(bit)) { 389 if (item->inversed) 390 mlxreg_hotplug_device_destroy(priv, data, item->kind); 391 else 392 mlxreg_hotplug_device_create(priv, data, item->kind); 393 } else { 394 if (item->inversed) 395 mlxreg_hotplug_device_create(priv, data, item->kind); 396 else 397 mlxreg_hotplug_device_destroy(priv, data, item->kind); 398 } 399 } 400 401 /* Acknowledge event. */ 402 ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_EVENT_OFF, 403 0); 404 if (ret) 405 goto out; 406 407 /* Unmask event. */ 408 ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_MASK_OFF, 409 item->mask); 410 411 out: 412 if (ret) 413 dev_err(priv->dev, "Failed to complete workqueue.\n"); 414 } 415 416 static void 417 mlxreg_hotplug_health_work_helper(struct mlxreg_hotplug_priv_data *priv, 418 struct mlxreg_core_item *item) 419 { 420 struct mlxreg_core_data *data = item->data; 421 u32 regval; 422 int i, ret = 0; 423 424 for (i = 0; i < item->count; i++, data++) { 425 /* Mask event. */ 426 ret = regmap_write(priv->regmap, data->reg + 427 MLXREG_HOTPLUG_MASK_OFF, 0); 428 if (ret) 429 goto out; 430 431 /* Read status. */ 432 ret = regmap_read(priv->regmap, data->reg, ®val); 433 if (ret) 434 goto out; 435 436 regval &= data->mask; 437 438 if (item->cache == regval) 439 goto ack_event; 440 441 /* 442 * ASIC health indication is provided through two bits. Bits 443 * value 0x2 indicates that ASIC reached the good health, value 444 * 0x0 indicates ASIC the bad health or dormant state and value 445 * 0x3 indicates the booting state. During ASIC reset it should 446 * pass the following states: dormant -> booting -> good. 447 */ 448 if (regval == MLXREG_HOTPLUG_GOOD_HEALTH_MASK) { 449 if (!data->attached) { 450 /* 451 * ASIC is in steady state. Connect associated 452 * device, if configured. 453 */ 454 mlxreg_hotplug_device_create(priv, data, item->kind); 455 data->attached = true; 456 } 457 } else { 458 if (data->attached) { 459 /* 460 * ASIC health is failed after ASIC has been 461 * in steady state. Disconnect associated 462 * device, if it has been connected. 463 */ 464 mlxreg_hotplug_device_destroy(priv, data, item->kind); 465 data->attached = false; 466 data->health_cntr = 0; 467 } 468 } 469 item->cache = regval; 470 ack_event: 471 /* Acknowledge event. */ 472 ret = regmap_write(priv->regmap, data->reg + 473 MLXREG_HOTPLUG_EVENT_OFF, 0); 474 if (ret) 475 goto out; 476 477 /* Unmask event. */ 478 ret = regmap_write(priv->regmap, data->reg + 479 MLXREG_HOTPLUG_MASK_OFF, data->mask); 480 if (ret) 481 goto out; 482 } 483 484 out: 485 if (ret) 486 dev_err(priv->dev, "Failed to complete workqueue.\n"); 487 } 488 489 /* 490 * mlxreg_hotplug_work_handler - performs traversing of device interrupt 491 * registers according to the below hierarchy schema: 492 * 493 * Aggregation registers (status/mask) 494 * PSU registers: *---* 495 * *-----------------* | | 496 * |status/event/mask|-----> | * | 497 * *-----------------* | | 498 * Power registers: | | 499 * *-----------------* | | 500 * |status/event/mask|-----> | * | 501 * *-----------------* | | 502 * FAN registers: | |--> CPU 503 * *-----------------* | | 504 * |status/event/mask|-----> | * | 505 * *-----------------* | | 506 * ASIC registers: | | 507 * *-----------------* | | 508 * |status/event/mask|-----> | * | 509 * *-----------------* | | 510 * *---* 511 * 512 * In case some system changed are detected: FAN in/out, PSU in/out, power 513 * cable attached/detached, ASIC health good/bad, relevant device is created 514 * or destroyed. 515 */ 516 static void mlxreg_hotplug_work_handler(struct work_struct *work) 517 { 518 struct mlxreg_core_hotplug_platform_data *pdata; 519 struct mlxreg_hotplug_priv_data *priv; 520 struct mlxreg_core_item *item; 521 u32 regval, aggr_asserted; 522 unsigned long flags; 523 int i, ret; 524 525 priv = container_of(work, struct mlxreg_hotplug_priv_data, 526 dwork_irq.work); 527 pdata = dev_get_platdata(&priv->pdev->dev); 528 item = pdata->items; 529 530 /* Mask aggregation event. */ 531 ret = regmap_write(priv->regmap, pdata->cell + 532 MLXREG_HOTPLUG_AGGR_MASK_OFF, 0); 533 if (ret < 0) 534 goto out; 535 536 /* Read aggregation status. */ 537 ret = regmap_read(priv->regmap, pdata->cell, ®val); 538 if (ret) 539 goto out; 540 541 regval &= pdata->mask; 542 aggr_asserted = priv->aggr_cache ^ regval; 543 priv->aggr_cache = regval; 544 545 /* 546 * Handler is invoked, but no assertion is detected at top aggregation 547 * status level. Set aggr_asserted to mask value to allow handler extra 548 * run over all relevant signals to recover any missed signal. 549 */ 550 if (priv->not_asserted == MLXREG_HOTPLUG_NOT_ASSERT) { 551 priv->not_asserted = 0; 552 aggr_asserted = pdata->mask; 553 } 554 if (!aggr_asserted) 555 goto unmask_event; 556 557 /* Handle topology and health configuration changes. */ 558 for (i = 0; i < pdata->counter; i++, item++) { 559 if (aggr_asserted & item->aggr_mask) { 560 if (item->health) 561 mlxreg_hotplug_health_work_helper(priv, item); 562 else 563 mlxreg_hotplug_work_helper(priv, item); 564 } 565 } 566 567 spin_lock_irqsave(&priv->lock, flags); 568 569 /* 570 * It is possible, that some signals have been inserted, while 571 * interrupt has been masked by mlxreg_hotplug_work_handler. In this 572 * case such signals will be missed. In order to handle these signals 573 * delayed work is canceled and work task re-scheduled for immediate 574 * execution. It allows to handle missed signals, if any. In other case 575 * work handler just validates that no new signals have been received 576 * during masking. 577 */ 578 cancel_delayed_work(&priv->dwork_irq); 579 schedule_delayed_work(&priv->dwork_irq, 0); 580 581 spin_unlock_irqrestore(&priv->lock, flags); 582 583 return; 584 585 unmask_event: 586 priv->not_asserted++; 587 /* Unmask aggregation event (no need acknowledge). */ 588 ret = regmap_write(priv->regmap, pdata->cell + 589 MLXREG_HOTPLUG_AGGR_MASK_OFF, pdata->mask); 590 591 out: 592 if (ret) 593 dev_err(priv->dev, "Failed to complete workqueue.\n"); 594 } 595 596 static int mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data *priv) 597 { 598 struct mlxreg_core_hotplug_platform_data *pdata; 599 struct mlxreg_core_item *item; 600 struct mlxreg_core_data *data; 601 u32 regval; 602 int i, j, ret; 603 604 pdata = dev_get_platdata(&priv->pdev->dev); 605 item = pdata->items; 606 607 for (i = 0; i < pdata->counter; i++, item++) { 608 /* Clear group presense event. */ 609 ret = regmap_write(priv->regmap, item->reg + 610 MLXREG_HOTPLUG_EVENT_OFF, 0); 611 if (ret) 612 goto out; 613 614 /* 615 * Verify if hardware configuration requires to disable 616 * interrupt capability for some of components. 617 */ 618 data = item->data; 619 for (j = 0; j < item->count; j++, data++) { 620 /* Verify if the attribute has capability register. */ 621 if (data->capability) { 622 /* Read capability register. */ 623 ret = regmap_read(priv->regmap, 624 data->capability, ®val); 625 if (ret) 626 goto out; 627 628 if (!(regval & data->bit)) 629 item->mask &= ~BIT(j); 630 } 631 } 632 633 /* Set group initial status as mask and unmask group event. */ 634 if (item->inversed) { 635 item->cache = item->mask; 636 ret = regmap_write(priv->regmap, item->reg + 637 MLXREG_HOTPLUG_MASK_OFF, 638 item->mask); 639 if (ret) 640 goto out; 641 } 642 } 643 644 /* Keep aggregation initial status as zero and unmask events. */ 645 ret = regmap_write(priv->regmap, pdata->cell + 646 MLXREG_HOTPLUG_AGGR_MASK_OFF, pdata->mask); 647 if (ret) 648 goto out; 649 650 /* Keep low aggregation initial status as zero and unmask events. */ 651 if (pdata->cell_low) { 652 ret = regmap_write(priv->regmap, pdata->cell_low + 653 MLXREG_HOTPLUG_AGGR_MASK_OFF, 654 pdata->mask_low); 655 if (ret) 656 goto out; 657 } 658 659 /* Invoke work handler for initializing hot plug devices setting. */ 660 mlxreg_hotplug_work_handler(&priv->dwork_irq.work); 661 662 out: 663 if (ret) 664 dev_err(priv->dev, "Failed to set interrupts.\n"); 665 enable_irq(priv->irq); 666 return ret; 667 } 668 669 static void mlxreg_hotplug_unset_irq(struct mlxreg_hotplug_priv_data *priv) 670 { 671 struct mlxreg_core_hotplug_platform_data *pdata; 672 struct mlxreg_core_item *item; 673 struct mlxreg_core_data *data; 674 int count, i, j; 675 676 pdata = dev_get_platdata(&priv->pdev->dev); 677 item = pdata->items; 678 disable_irq(priv->irq); 679 cancel_delayed_work_sync(&priv->dwork_irq); 680 681 /* Mask low aggregation event, if defined. */ 682 if (pdata->cell_low) 683 regmap_write(priv->regmap, pdata->cell_low + 684 MLXREG_HOTPLUG_AGGR_MASK_OFF, 0); 685 686 /* Mask aggregation event. */ 687 regmap_write(priv->regmap, pdata->cell + MLXREG_HOTPLUG_AGGR_MASK_OFF, 688 0); 689 690 /* Clear topology configurations. */ 691 for (i = 0; i < pdata->counter; i++, item++) { 692 data = item->data; 693 /* Mask group presense event. */ 694 regmap_write(priv->regmap, data->reg + MLXREG_HOTPLUG_MASK_OFF, 695 0); 696 /* Clear group presense event. */ 697 regmap_write(priv->regmap, data->reg + 698 MLXREG_HOTPLUG_EVENT_OFF, 0); 699 700 /* Remove all the attached devices in group. */ 701 count = item->count; 702 for (j = 0; j < count; j++, data++) 703 mlxreg_hotplug_device_destroy(priv, data, item->kind); 704 } 705 } 706 707 static irqreturn_t mlxreg_hotplug_irq_handler(int irq, void *dev) 708 { 709 struct mlxreg_hotplug_priv_data *priv; 710 711 priv = (struct mlxreg_hotplug_priv_data *)dev; 712 713 /* Schedule work task for immediate execution.*/ 714 schedule_delayed_work(&priv->dwork_irq, 0); 715 716 return IRQ_HANDLED; 717 } 718 719 static int mlxreg_hotplug_probe(struct platform_device *pdev) 720 { 721 struct mlxreg_core_hotplug_platform_data *pdata; 722 struct mlxreg_hotplug_priv_data *priv; 723 struct i2c_adapter *deferred_adap; 724 int err; 725 726 pdata = dev_get_platdata(&pdev->dev); 727 if (!pdata) { 728 dev_err(&pdev->dev, "Failed to get platform data.\n"); 729 return -EINVAL; 730 } 731 732 /* Defer probing if the necessary adapter is not configured yet. */ 733 deferred_adap = i2c_get_adapter(pdata->deferred_nr); 734 if (!deferred_adap) 735 return -EPROBE_DEFER; 736 i2c_put_adapter(deferred_adap); 737 738 priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); 739 if (!priv) 740 return -ENOMEM; 741 742 if (pdata->irq) { 743 priv->irq = pdata->irq; 744 } else { 745 priv->irq = platform_get_irq(pdev, 0); 746 if (priv->irq < 0) 747 return priv->irq; 748 } 749 750 priv->regmap = pdata->regmap; 751 priv->dev = pdev->dev.parent; 752 priv->pdev = pdev; 753 754 err = devm_request_irq(&pdev->dev, priv->irq, 755 mlxreg_hotplug_irq_handler, IRQF_TRIGGER_FALLING 756 | IRQF_SHARED, "mlxreg-hotplug", priv); 757 if (err) { 758 dev_err(&pdev->dev, "Failed to request irq: %d\n", err); 759 return err; 760 } 761 762 disable_irq(priv->irq); 763 spin_lock_init(&priv->lock); 764 INIT_DELAYED_WORK(&priv->dwork_irq, mlxreg_hotplug_work_handler); 765 dev_set_drvdata(&pdev->dev, priv); 766 767 err = mlxreg_hotplug_attr_init(priv); 768 if (err) { 769 dev_err(&pdev->dev, "Failed to allocate attributes: %d\n", 770 err); 771 return err; 772 } 773 774 priv->hwmon = devm_hwmon_device_register_with_groups(&pdev->dev, 775 "mlxreg_hotplug", priv, priv->groups); 776 if (IS_ERR(priv->hwmon)) { 777 dev_err(&pdev->dev, "Failed to register hwmon device %ld\n", 778 PTR_ERR(priv->hwmon)); 779 return PTR_ERR(priv->hwmon); 780 } 781 782 /* Perform initial interrupts setup. */ 783 mlxreg_hotplug_set_irq(priv); 784 priv->after_probe = true; 785 786 return 0; 787 } 788 789 static int mlxreg_hotplug_remove(struct platform_device *pdev) 790 { 791 struct mlxreg_hotplug_priv_data *priv = dev_get_drvdata(&pdev->dev); 792 793 /* Clean interrupts setup. */ 794 mlxreg_hotplug_unset_irq(priv); 795 devm_free_irq(&pdev->dev, priv->irq, priv); 796 797 return 0; 798 } 799 800 static struct platform_driver mlxreg_hotplug_driver = { 801 .driver = { 802 .name = "mlxreg-hotplug", 803 }, 804 .probe = mlxreg_hotplug_probe, 805 .remove = mlxreg_hotplug_remove, 806 }; 807 808 module_platform_driver(mlxreg_hotplug_driver); 809 810 MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>"); 811 MODULE_DESCRIPTION("Mellanox regmap hotplug platform driver"); 812 MODULE_LICENSE("Dual BSD/GPL"); 813 MODULE_ALIAS("platform:mlxreg-hotplug"); 814