1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Xilinx Event Management Driver 4 * 5 * Copyright (C) 2021 Xilinx, Inc. 6 * 7 * Abhyuday Godhasara <abhyuday.godhasara@xilinx.com> 8 */ 9 10 #include <linux/cpuhotplug.h> 11 #include <linux/firmware/xlnx-event-manager.h> 12 #include <linux/firmware/xlnx-zynqmp.h> 13 #include <linux/hashtable.h> 14 #include <linux/interrupt.h> 15 #include <linux/irq.h> 16 #include <linux/irqdomain.h> 17 #include <linux/module.h> 18 #include <linux/of_irq.h> 19 #include <linux/platform_device.h> 20 #include <linux/slab.h> 21 22 static DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number1); 23 24 static int virq_sgi; 25 static int event_manager_availability = -EACCES; 26 27 /* SGI number used for Event management driver */ 28 #define XLNX_EVENT_SGI_NUM (15) 29 30 /* Max number of driver can register for same event */ 31 #define MAX_DRIVER_PER_EVENT (10U) 32 33 /* Max HashMap Order for PM API feature check (1<<7 = 128) */ 34 #define REGISTERED_DRIVER_MAX_ORDER (7) 35 36 #define MAX_BITS (32U) /* Number of bits available for error mask */ 37 38 #define FIRMWARE_VERSION_MASK (0xFFFFU) 39 #define REGISTER_NOTIFIER_FIRMWARE_VERSION (2U) 40 41 static DEFINE_HASHTABLE(reg_driver_map, REGISTERED_DRIVER_MAX_ORDER); 42 static int sgi_num = XLNX_EVENT_SGI_NUM; 43 44 static bool is_need_to_unregister; 45 46 /** 47 * struct agent_cb - Registered callback function and private data. 48 * @agent_data: Data passed back to handler function. 49 * @eve_cb: Function pointer to store the callback function. 50 * @list: member to create list. 51 */ 52 struct agent_cb { 53 void *agent_data; 54 event_cb_func_t eve_cb; 55 struct list_head list; 56 }; 57 58 /** 59 * struct registered_event_data - Registered Event Data. 60 * @key: key is the combine id(Node-Id | Event-Id) of type u64 61 * where upper u32 for Node-Id and lower u32 for Event-Id, 62 * And this used as key to index into hashmap. 63 * @cb_type: Type of Api callback, like PM_NOTIFY_CB, etc. 64 * @wake: If this flag set, firmware will wake up processor if is 65 * in sleep or power down state. 66 * @cb_list_head: Head of call back data list which contain the information 67 * about registered handler and private data. 68 * @hentry: hlist_node that hooks this entry into hashtable. 69 */ 70 struct registered_event_data { 71 u64 key; 72 enum pm_api_cb_id cb_type; 73 bool wake; 74 struct list_head cb_list_head; 75 struct hlist_node hentry; 76 }; 77 78 static bool xlnx_is_error_event(const u32 node_id) 79 { 80 if (node_id == EVENT_ERROR_PMC_ERR1 || 81 node_id == EVENT_ERROR_PMC_ERR2 || 82 node_id == EVENT_ERROR_PSM_ERR1 || 83 node_id == EVENT_ERROR_PSM_ERR2) 84 return true; 85 86 return false; 87 } 88 89 static int xlnx_add_cb_for_notify_event(const u32 node_id, const u32 event, const bool wake, 90 event_cb_func_t cb_fun, void *data) 91 { 92 u64 key = 0; 93 bool present_in_hash = false; 94 struct registered_event_data *eve_data; 95 struct agent_cb *cb_data; 96 struct agent_cb *cb_pos; 97 struct agent_cb *cb_next; 98 99 key = ((u64)node_id << 32U) | (u64)event; 100 /* Check for existing entry in hash table for given key id */ 101 hash_for_each_possible(reg_driver_map, eve_data, hentry, key) { 102 if (eve_data->key == key) { 103 present_in_hash = true; 104 break; 105 } 106 } 107 108 if (!present_in_hash) { 109 /* Add new entry if not present in HASH table */ 110 eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL); 111 if (!eve_data) 112 return -ENOMEM; 113 eve_data->key = key; 114 eve_data->cb_type = PM_NOTIFY_CB; 115 eve_data->wake = wake; 116 INIT_LIST_HEAD(&eve_data->cb_list_head); 117 118 cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL); 119 if (!cb_data) { 120 kfree(eve_data); 121 return -ENOMEM; 122 } 123 cb_data->eve_cb = cb_fun; 124 cb_data->agent_data = data; 125 126 /* Add into callback list */ 127 list_add(&cb_data->list, &eve_data->cb_list_head); 128 129 /* Add into HASH table */ 130 hash_add(reg_driver_map, &eve_data->hentry, key); 131 } else { 132 /* Search for callback function and private data in list */ 133 list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) { 134 if (cb_pos->eve_cb == cb_fun && 135 cb_pos->agent_data == data) { 136 return 0; 137 } 138 } 139 140 /* Add multiple handler and private data in list */ 141 cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL); 142 if (!cb_data) 143 return -ENOMEM; 144 cb_data->eve_cb = cb_fun; 145 cb_data->agent_data = data; 146 147 list_add(&cb_data->list, &eve_data->cb_list_head); 148 } 149 150 return 0; 151 } 152 153 static int xlnx_add_cb_for_suspend(event_cb_func_t cb_fun, void *data) 154 { 155 struct registered_event_data *eve_data; 156 struct agent_cb *cb_data; 157 158 /* Check for existing entry in hash table for given cb_type */ 159 hash_for_each_possible(reg_driver_map, eve_data, hentry, PM_INIT_SUSPEND_CB) { 160 if (eve_data->cb_type == PM_INIT_SUSPEND_CB) { 161 pr_err("Found as already registered\n"); 162 return -EINVAL; 163 } 164 } 165 166 /* Add new entry if not present */ 167 eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL); 168 if (!eve_data) 169 return -ENOMEM; 170 171 eve_data->key = 0; 172 eve_data->cb_type = PM_INIT_SUSPEND_CB; 173 INIT_LIST_HEAD(&eve_data->cb_list_head); 174 175 cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL); 176 if (!cb_data) 177 return -ENOMEM; 178 cb_data->eve_cb = cb_fun; 179 cb_data->agent_data = data; 180 181 /* Add into callback list */ 182 list_add(&cb_data->list, &eve_data->cb_list_head); 183 184 hash_add(reg_driver_map, &eve_data->hentry, PM_INIT_SUSPEND_CB); 185 186 return 0; 187 } 188 189 static int xlnx_remove_cb_for_suspend(event_cb_func_t cb_fun) 190 { 191 bool is_callback_found = false; 192 struct registered_event_data *eve_data; 193 struct agent_cb *cb_pos; 194 struct agent_cb *cb_next; 195 struct hlist_node *tmp; 196 197 is_need_to_unregister = false; 198 199 /* Check for existing entry in hash table for given cb_type */ 200 hash_for_each_possible_safe(reg_driver_map, eve_data, tmp, hentry, PM_INIT_SUSPEND_CB) { 201 if (eve_data->cb_type == PM_INIT_SUSPEND_CB) { 202 /* Delete the list of callback */ 203 list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) { 204 if (cb_pos->eve_cb == cb_fun) { 205 is_callback_found = true; 206 list_del_init(&cb_pos->list); 207 kfree(cb_pos); 208 } 209 } 210 /* remove an object from a hashtable */ 211 hash_del(&eve_data->hentry); 212 kfree(eve_data); 213 is_need_to_unregister = true; 214 } 215 } 216 if (!is_callback_found) { 217 pr_warn("Didn't find any registered callback for suspend event\n"); 218 return -EINVAL; 219 } 220 221 return 0; 222 } 223 224 static int xlnx_remove_cb_for_notify_event(const u32 node_id, const u32 event, 225 event_cb_func_t cb_fun, void *data) 226 { 227 bool is_callback_found = false; 228 struct registered_event_data *eve_data; 229 u64 key = ((u64)node_id << 32U) | (u64)event; 230 struct agent_cb *cb_pos; 231 struct agent_cb *cb_next; 232 struct hlist_node *tmp; 233 234 is_need_to_unregister = false; 235 236 /* Check for existing entry in hash table for given key id */ 237 hash_for_each_possible_safe(reg_driver_map, eve_data, tmp, hentry, key) { 238 if (eve_data->key == key) { 239 /* Delete the list of callback */ 240 list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) { 241 if (cb_pos->eve_cb == cb_fun && 242 cb_pos->agent_data == data) { 243 is_callback_found = true; 244 list_del_init(&cb_pos->list); 245 kfree(cb_pos); 246 } 247 } 248 249 /* Remove HASH table if callback list is empty */ 250 if (list_empty(&eve_data->cb_list_head)) { 251 /* remove an object from a HASH table */ 252 hash_del(&eve_data->hentry); 253 kfree(eve_data); 254 is_need_to_unregister = true; 255 } 256 } 257 } 258 if (!is_callback_found) { 259 pr_warn("Didn't find any registered callback for 0x%x 0x%x\n", 260 node_id, event); 261 return -EINVAL; 262 } 263 264 return 0; 265 } 266 267 /** 268 * xlnx_register_event() - Register for the event. 269 * @cb_type: Type of callback from pm_api_cb_id, 270 * PM_NOTIFY_CB - for Error Events, 271 * PM_INIT_SUSPEND_CB - for suspend callback. 272 * @node_id: Node-Id related to event. 273 * @event: Event Mask for the Error Event. 274 * @wake: Flag specifying whether the subsystem should be woken upon 275 * event notification. 276 * @cb_fun: Function pointer to store the callback function. 277 * @data: Pointer for the driver instance. 278 * 279 * Return: Returns 0 on successful registration else error code. 280 */ 281 int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event, 282 const bool wake, event_cb_func_t cb_fun, void *data) 283 { 284 int ret = 0; 285 u32 eve; 286 int pos; 287 288 if (event_manager_availability) 289 return event_manager_availability; 290 291 if (cb_type != PM_NOTIFY_CB && cb_type != PM_INIT_SUSPEND_CB) { 292 pr_err("%s() Unsupported Callback 0x%x\n", __func__, cb_type); 293 return -EINVAL; 294 } 295 296 if (!cb_fun) 297 return -EFAULT; 298 299 if (cb_type == PM_INIT_SUSPEND_CB) { 300 ret = xlnx_add_cb_for_suspend(cb_fun, data); 301 } else { 302 if (!xlnx_is_error_event(node_id)) { 303 /* Add entry for Node-Id/Event in hash table */ 304 ret = xlnx_add_cb_for_notify_event(node_id, event, wake, cb_fun, data); 305 } else { 306 /* Add into Hash table */ 307 for (pos = 0; pos < MAX_BITS; pos++) { 308 eve = event & (1 << pos); 309 if (!eve) 310 continue; 311 312 /* Add entry for Node-Id/Eve in hash table */ 313 ret = xlnx_add_cb_for_notify_event(node_id, eve, wake, cb_fun, 314 data); 315 /* Break the loop if got error */ 316 if (ret) 317 break; 318 } 319 if (ret) { 320 /* Skip the Event for which got the error */ 321 pos--; 322 /* Remove registered(during this call) event from hash table */ 323 for ( ; pos >= 0; pos--) { 324 eve = event & (1 << pos); 325 if (!eve) 326 continue; 327 xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data); 328 } 329 } 330 } 331 332 if (ret) { 333 pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__, node_id, 334 event, ret); 335 return ret; 336 } 337 338 /* Register for Node-Id/Event combination in firmware */ 339 ret = zynqmp_pm_register_notifier(node_id, event, wake, true); 340 if (ret) { 341 pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__, node_id, 342 event, ret); 343 /* Remove already registered event from hash table */ 344 if (xlnx_is_error_event(node_id)) { 345 for (pos = 0; pos < MAX_BITS; pos++) { 346 eve = event & (1 << pos); 347 if (!eve) 348 continue; 349 xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data); 350 } 351 } else { 352 xlnx_remove_cb_for_notify_event(node_id, event, cb_fun, data); 353 } 354 return ret; 355 } 356 } 357 358 return ret; 359 } 360 EXPORT_SYMBOL_GPL(xlnx_register_event); 361 362 /** 363 * xlnx_unregister_event() - Unregister for the event. 364 * @cb_type: Type of callback from pm_api_cb_id, 365 * PM_NOTIFY_CB - for Error Events, 366 * PM_INIT_SUSPEND_CB - for suspend callback. 367 * @node_id: Node-Id related to event. 368 * @event: Event Mask for the Error Event. 369 * @cb_fun: Function pointer of callback function. 370 * @data: Pointer of agent's private data. 371 * 372 * Return: Returns 0 on successful unregistration else error code. 373 */ 374 int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event, 375 event_cb_func_t cb_fun, void *data) 376 { 377 int ret = 0; 378 u32 eve, pos; 379 380 is_need_to_unregister = false; 381 382 if (event_manager_availability) 383 return event_manager_availability; 384 385 if (cb_type != PM_NOTIFY_CB && cb_type != PM_INIT_SUSPEND_CB) { 386 pr_err("%s() Unsupported Callback 0x%x\n", __func__, cb_type); 387 return -EINVAL; 388 } 389 390 if (!cb_fun) 391 return -EFAULT; 392 393 if (cb_type == PM_INIT_SUSPEND_CB) { 394 ret = xlnx_remove_cb_for_suspend(cb_fun); 395 } else { 396 /* Remove Node-Id/Event from hash table */ 397 if (!xlnx_is_error_event(node_id)) { 398 xlnx_remove_cb_for_notify_event(node_id, event, cb_fun, data); 399 } else { 400 for (pos = 0; pos < MAX_BITS; pos++) { 401 eve = event & (1 << pos); 402 if (!eve) 403 continue; 404 405 xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data); 406 } 407 } 408 409 /* Un-register if list is empty */ 410 if (is_need_to_unregister) { 411 /* Un-register for Node-Id/Event combination */ 412 ret = zynqmp_pm_register_notifier(node_id, event, false, false); 413 if (ret) { 414 pr_err("%s() failed for 0x%x and 0x%x: %d\n", 415 __func__, node_id, event, ret); 416 return ret; 417 } 418 } 419 } 420 421 return ret; 422 } 423 EXPORT_SYMBOL_GPL(xlnx_unregister_event); 424 425 static void xlnx_call_suspend_cb_handler(const u32 *payload) 426 { 427 bool is_callback_found = false; 428 struct registered_event_data *eve_data; 429 u32 cb_type = payload[0]; 430 struct agent_cb *cb_pos; 431 struct agent_cb *cb_next; 432 433 /* Check for existing entry in hash table for given cb_type */ 434 hash_for_each_possible(reg_driver_map, eve_data, hentry, cb_type) { 435 if (eve_data->cb_type == cb_type) { 436 list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) { 437 cb_pos->eve_cb(&payload[0], cb_pos->agent_data); 438 is_callback_found = true; 439 } 440 } 441 } 442 if (!is_callback_found) 443 pr_warn("Didn't find any registered callback for suspend event\n"); 444 } 445 446 static void xlnx_call_notify_cb_handler(const u32 *payload) 447 { 448 bool is_callback_found = false; 449 struct registered_event_data *eve_data; 450 u64 key = ((u64)payload[1] << 32U) | (u64)payload[2]; 451 int ret; 452 struct agent_cb *cb_pos; 453 struct agent_cb *cb_next; 454 455 /* Check for existing entry in hash table for given key id */ 456 hash_for_each_possible(reg_driver_map, eve_data, hentry, key) { 457 if (eve_data->key == key) { 458 list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) { 459 cb_pos->eve_cb(&payload[0], cb_pos->agent_data); 460 is_callback_found = true; 461 } 462 463 /* re register with firmware to get future events */ 464 ret = zynqmp_pm_register_notifier(payload[1], payload[2], 465 eve_data->wake, true); 466 if (ret) { 467 pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__, 468 payload[1], payload[2], ret); 469 list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, 470 list) { 471 /* Remove already registered event from hash table */ 472 xlnx_remove_cb_for_notify_event(payload[1], payload[2], 473 cb_pos->eve_cb, 474 cb_pos->agent_data); 475 } 476 } 477 } 478 } 479 if (!is_callback_found) 480 pr_warn("Unhandled SGI node 0x%x event 0x%x. Expected with Xen hypervisor\n", 481 payload[1], payload[2]); 482 } 483 484 static void xlnx_get_event_callback_data(u32 *buf) 485 { 486 zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf); 487 } 488 489 static irqreturn_t xlnx_event_handler(int irq, void *dev_id) 490 { 491 u32 cb_type, node_id, event, pos; 492 u32 payload[CB_MAX_PAYLOAD_SIZE] = {0}; 493 u32 event_data[CB_MAX_PAYLOAD_SIZE] = {0}; 494 495 /* Get event data */ 496 xlnx_get_event_callback_data(payload); 497 498 /* First element is callback type, others are callback arguments */ 499 cb_type = payload[0]; 500 501 if (cb_type == PM_NOTIFY_CB) { 502 node_id = payload[1]; 503 event = payload[2]; 504 if (!xlnx_is_error_event(node_id)) { 505 xlnx_call_notify_cb_handler(payload); 506 } else { 507 /* 508 * Each call back function expecting payload as an input arguments. 509 * We can get multiple error events as in one call back through error 510 * mask. So payload[2] may can contain multiple error events. 511 * In reg_driver_map database we store data in the combination of single 512 * node_id-error combination. 513 * So coping the payload message into event_data and update the 514 * event_data[2] with Error Mask for single error event and use 515 * event_data as input argument for registered call back function. 516 * 517 */ 518 memcpy(event_data, payload, (4 * CB_MAX_PAYLOAD_SIZE)); 519 /* Support Multiple Error Event */ 520 for (pos = 0; pos < MAX_BITS; pos++) { 521 if ((0 == (event & (1 << pos)))) 522 continue; 523 event_data[2] = (event & (1 << pos)); 524 xlnx_call_notify_cb_handler(event_data); 525 } 526 } 527 } else if (cb_type == PM_INIT_SUSPEND_CB) { 528 xlnx_call_suspend_cb_handler(payload); 529 } else { 530 pr_err("%s() Unsupported Callback %d\n", __func__, cb_type); 531 } 532 533 return IRQ_HANDLED; 534 } 535 536 static int xlnx_event_cpuhp_start(unsigned int cpu) 537 { 538 enable_percpu_irq(virq_sgi, IRQ_TYPE_NONE); 539 540 return 0; 541 } 542 543 static int xlnx_event_cpuhp_down(unsigned int cpu) 544 { 545 disable_percpu_irq(virq_sgi); 546 547 return 0; 548 } 549 550 static void xlnx_disable_percpu_irq(void *data) 551 { 552 disable_percpu_irq(virq_sgi); 553 } 554 555 static int xlnx_event_init_sgi(struct platform_device *pdev) 556 { 557 int ret = 0; 558 int cpu; 559 /* 560 * IRQ related structures are used for the following: 561 * for each SGI interrupt ensure its mapped by GIC IRQ domain 562 * and that each corresponding linux IRQ for the HW IRQ has 563 * a handler for when receiving an interrupt from the remote 564 * processor. 565 */ 566 struct irq_domain *domain; 567 struct irq_fwspec sgi_fwspec; 568 struct device_node *interrupt_parent = NULL; 569 struct device *parent = pdev->dev.parent; 570 571 /* Find GIC controller to map SGIs. */ 572 interrupt_parent = of_irq_find_parent(parent->of_node); 573 if (!interrupt_parent) { 574 dev_err(&pdev->dev, "Failed to find property for Interrupt parent\n"); 575 return -EINVAL; 576 } 577 578 /* Each SGI needs to be associated with GIC's IRQ domain. */ 579 domain = irq_find_host(interrupt_parent); 580 of_node_put(interrupt_parent); 581 582 /* Each mapping needs GIC domain when finding IRQ mapping. */ 583 sgi_fwspec.fwnode = domain->fwnode; 584 585 /* 586 * When irq domain looks at mapping each arg is as follows: 587 * 3 args for: interrupt type (SGI), interrupt # (set later), type 588 */ 589 sgi_fwspec.param_count = 1; 590 591 /* Set SGI's hwirq */ 592 sgi_fwspec.param[0] = sgi_num; 593 virq_sgi = irq_create_fwspec_mapping(&sgi_fwspec); 594 595 cpu = get_cpu(); 596 per_cpu(cpu_number1, cpu) = cpu; 597 ret = request_percpu_irq(virq_sgi, xlnx_event_handler, "xlnx_event_mgmt", 598 &cpu_number1); 599 put_cpu(); 600 601 WARN_ON(ret); 602 if (ret) { 603 irq_dispose_mapping(virq_sgi); 604 return ret; 605 } 606 607 irq_to_desc(virq_sgi); 608 irq_set_status_flags(virq_sgi, IRQ_PER_CPU); 609 610 return ret; 611 } 612 613 static void xlnx_event_cleanup_sgi(struct platform_device *pdev) 614 { 615 int cpu = smp_processor_id(); 616 617 per_cpu(cpu_number1, cpu) = cpu; 618 619 cpuhp_remove_state(CPUHP_AP_ONLINE_DYN); 620 621 on_each_cpu(xlnx_disable_percpu_irq, NULL, 1); 622 623 irq_clear_status_flags(virq_sgi, IRQ_PER_CPU); 624 free_percpu_irq(virq_sgi, &cpu_number1); 625 irq_dispose_mapping(virq_sgi); 626 } 627 628 static int xlnx_event_manager_probe(struct platform_device *pdev) 629 { 630 int ret; 631 632 ret = zynqmp_pm_feature(PM_REGISTER_NOTIFIER); 633 if (ret < 0) { 634 dev_err(&pdev->dev, "Feature check failed with %d\n", ret); 635 return ret; 636 } 637 638 if ((ret & FIRMWARE_VERSION_MASK) < 639 REGISTER_NOTIFIER_FIRMWARE_VERSION) { 640 dev_err(&pdev->dev, "Register notifier version error. Expected Firmware: v%d - Found: v%d\n", 641 REGISTER_NOTIFIER_FIRMWARE_VERSION, 642 ret & FIRMWARE_VERSION_MASK); 643 return -EOPNOTSUPP; 644 } 645 646 /* Initialize the SGI */ 647 ret = xlnx_event_init_sgi(pdev); 648 if (ret) { 649 dev_err(&pdev->dev, "SGI Init has been failed with %d\n", ret); 650 return ret; 651 } 652 653 /* Setup function for the CPU hot-plug cases */ 654 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "soc/event:starting", 655 xlnx_event_cpuhp_start, xlnx_event_cpuhp_down); 656 657 ret = zynqmp_pm_register_sgi(sgi_num, 0); 658 if (ret) { 659 dev_err(&pdev->dev, "SGI %d Registration over TF-A failed with %d\n", sgi_num, ret); 660 xlnx_event_cleanup_sgi(pdev); 661 return ret; 662 } 663 664 event_manager_availability = 0; 665 666 dev_info(&pdev->dev, "SGI %d Registered over TF-A\n", sgi_num); 667 dev_info(&pdev->dev, "Xilinx Event Management driver probed\n"); 668 669 return ret; 670 } 671 672 static void xlnx_event_manager_remove(struct platform_device *pdev) 673 { 674 int i; 675 struct registered_event_data *eve_data; 676 struct hlist_node *tmp; 677 int ret; 678 struct agent_cb *cb_pos; 679 struct agent_cb *cb_next; 680 681 hash_for_each_safe(reg_driver_map, i, tmp, eve_data, hentry) { 682 list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) { 683 list_del_init(&cb_pos->list); 684 kfree(cb_pos); 685 } 686 hash_del(&eve_data->hentry); 687 kfree(eve_data); 688 } 689 690 ret = zynqmp_pm_register_sgi(0, 1); 691 if (ret) 692 dev_err(&pdev->dev, "SGI unregistration over TF-A failed with %d\n", ret); 693 694 xlnx_event_cleanup_sgi(pdev); 695 696 event_manager_availability = -EACCES; 697 } 698 699 static struct platform_driver xlnx_event_manager_driver = { 700 .probe = xlnx_event_manager_probe, 701 .remove_new = xlnx_event_manager_remove, 702 .driver = { 703 .name = "xlnx_event_manager", 704 }, 705 }; 706 module_param(sgi_num, uint, 0); 707 module_platform_driver(xlnx_event_manager_driver); 708