1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Xilinx Event Management Driver
4 *
5 * Copyright (C) 2021 Xilinx, Inc.
6 * Copyright (C) 2024 Advanced Micro Devices, Inc.
7 *
8 * Abhyuday Godhasara <abhyuday.godhasara@xilinx.com>
9 */
10
11 #include <linux/cpuhotplug.h>
12 #include <linux/firmware/xlnx-event-manager.h>
13 #include <linux/firmware/xlnx-zynqmp.h>
14 #include <linux/hashtable.h>
15 #include <linux/interrupt.h>
16 #include <linux/irq.h>
17 #include <linux/irqdomain.h>
18 #include <linux/module.h>
19 #include <linux/of_irq.h>
20 #include <linux/platform_device.h>
21 #include <linux/slab.h>
22
23 static DEFINE_PER_CPU_READ_MOSTLY(int, dummy_cpu_number);
24
25 static int virq_sgi;
26 static int event_manager_availability = -EACCES;
27
28 /* SGI number used for Event management driver */
29 #define XLNX_EVENT_SGI_NUM (15)
30
31 /* Max number of driver can register for same event */
32 #define MAX_DRIVER_PER_EVENT (10U)
33
34 /* Max HashMap Order for PM API feature check (1<<7 = 128) */
35 #define REGISTERED_DRIVER_MAX_ORDER (7)
36
37 #define MAX_BITS (32U) /* Number of bits available for error mask */
38
39 #define FIRMWARE_VERSION_MASK (0xFFFFU)
40 #define REGISTER_NOTIFIER_FIRMWARE_VERSION (2U)
41
42 static DEFINE_HASHTABLE(reg_driver_map, REGISTERED_DRIVER_MAX_ORDER);
43 static int sgi_num = XLNX_EVENT_SGI_NUM;
44
45 static bool is_need_to_unregister;
46
47 /**
48 * struct agent_cb - Registered callback function and private data.
49 * @agent_data: Data passed back to handler function.
50 * @eve_cb: Function pointer to store the callback function.
51 * @list: member to create list.
52 */
53 struct agent_cb {
54 void *agent_data;
55 event_cb_func_t eve_cb;
56 struct list_head list;
57 };
58
59 /**
60 * struct registered_event_data - Registered Event Data.
61 * @key: key is the combine id(Node-Id | Event-Id) of type u64
62 * where upper u32 for Node-Id and lower u32 for Event-Id,
63 * And this used as key to index into hashmap.
64 * @cb_type: Type of Api callback, like PM_NOTIFY_CB, etc.
65 * @wake: If this flag set, firmware will wake up processor if is
66 * in sleep or power down state.
67 * @cb_list_head: Head of call back data list which contain the information
68 * about registered handler and private data.
69 * @hentry: hlist_node that hooks this entry into hashtable.
70 */
71 struct registered_event_data {
72 u64 key;
73 enum pm_api_cb_id cb_type;
74 bool wake;
75 struct list_head cb_list_head;
76 struct hlist_node hentry;
77 };
78
xlnx_is_error_event(const u32 node_id)79 static bool xlnx_is_error_event(const u32 node_id)
80 {
81 if (node_id == EVENT_ERROR_PMC_ERR1 ||
82 node_id == EVENT_ERROR_PMC_ERR2 ||
83 node_id == EVENT_ERROR_PSM_ERR1 ||
84 node_id == EVENT_ERROR_PSM_ERR2)
85 return true;
86
87 return false;
88 }
89
xlnx_add_cb_for_notify_event(const u32 node_id,const u32 event,const bool wake,event_cb_func_t cb_fun,void * data)90 static int xlnx_add_cb_for_notify_event(const u32 node_id, const u32 event, const bool wake,
91 event_cb_func_t cb_fun, void *data)
92 {
93 u64 key = 0;
94 bool present_in_hash = false;
95 struct registered_event_data *eve_data;
96 struct agent_cb *cb_data;
97 struct agent_cb *cb_pos;
98 struct agent_cb *cb_next;
99
100 key = ((u64)node_id << 32U) | (u64)event;
101 /* Check for existing entry in hash table for given key id */
102 hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
103 if (eve_data->key == key) {
104 present_in_hash = true;
105 break;
106 }
107 }
108
109 if (!present_in_hash) {
110 /* Add new entry if not present in HASH table */
111 eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL);
112 if (!eve_data)
113 return -ENOMEM;
114 eve_data->key = key;
115 eve_data->cb_type = PM_NOTIFY_CB;
116 eve_data->wake = wake;
117 INIT_LIST_HEAD(&eve_data->cb_list_head);
118
119 cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
120 if (!cb_data) {
121 kfree(eve_data);
122 return -ENOMEM;
123 }
124 cb_data->eve_cb = cb_fun;
125 cb_data->agent_data = data;
126
127 /* Add into callback list */
128 list_add(&cb_data->list, &eve_data->cb_list_head);
129
130 /* Add into HASH table */
131 hash_add(reg_driver_map, &eve_data->hentry, key);
132 } else {
133 /* Search for callback function and private data in list */
134 list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
135 if (cb_pos->eve_cb == cb_fun &&
136 cb_pos->agent_data == data) {
137 return 0;
138 }
139 }
140
141 /* Add multiple handler and private data in list */
142 cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
143 if (!cb_data)
144 return -ENOMEM;
145 cb_data->eve_cb = cb_fun;
146 cb_data->agent_data = data;
147
148 list_add(&cb_data->list, &eve_data->cb_list_head);
149 }
150
151 return 0;
152 }
153
xlnx_add_cb_for_suspend(event_cb_func_t cb_fun,void * data)154 static int xlnx_add_cb_for_suspend(event_cb_func_t cb_fun, void *data)
155 {
156 struct registered_event_data *eve_data;
157 struct agent_cb *cb_data;
158
159 /* Check for existing entry in hash table for given cb_type */
160 hash_for_each_possible(reg_driver_map, eve_data, hentry, PM_INIT_SUSPEND_CB) {
161 if (eve_data->cb_type == PM_INIT_SUSPEND_CB) {
162 pr_err("Found as already registered\n");
163 return -EINVAL;
164 }
165 }
166
167 /* Add new entry if not present */
168 eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL);
169 if (!eve_data)
170 return -ENOMEM;
171
172 eve_data->key = 0;
173 eve_data->cb_type = PM_INIT_SUSPEND_CB;
174 INIT_LIST_HEAD(&eve_data->cb_list_head);
175
176 cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
177 if (!cb_data)
178 return -ENOMEM;
179 cb_data->eve_cb = cb_fun;
180 cb_data->agent_data = data;
181
182 /* Add into callback list */
183 list_add(&cb_data->list, &eve_data->cb_list_head);
184
185 hash_add(reg_driver_map, &eve_data->hentry, PM_INIT_SUSPEND_CB);
186
187 return 0;
188 }
189
xlnx_remove_cb_for_suspend(event_cb_func_t cb_fun)190 static int xlnx_remove_cb_for_suspend(event_cb_func_t cb_fun)
191 {
192 bool is_callback_found = false;
193 struct registered_event_data *eve_data;
194 struct agent_cb *cb_pos;
195 struct agent_cb *cb_next;
196 struct hlist_node *tmp;
197
198 is_need_to_unregister = false;
199
200 /* Check for existing entry in hash table for given cb_type */
201 hash_for_each_possible_safe(reg_driver_map, eve_data, tmp, hentry, PM_INIT_SUSPEND_CB) {
202 if (eve_data->cb_type == PM_INIT_SUSPEND_CB) {
203 /* Delete the list of callback */
204 list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
205 if (cb_pos->eve_cb == cb_fun) {
206 is_callback_found = true;
207 list_del_init(&cb_pos->list);
208 kfree(cb_pos);
209 }
210 }
211 /* remove an object from a hashtable */
212 hash_del(&eve_data->hentry);
213 kfree(eve_data);
214 is_need_to_unregister = true;
215 }
216 }
217 if (!is_callback_found) {
218 pr_warn("Didn't find any registered callback for suspend event\n");
219 return -EINVAL;
220 }
221
222 return 0;
223 }
224
xlnx_remove_cb_for_notify_event(const u32 node_id,const u32 event,event_cb_func_t cb_fun,void * data)225 static int xlnx_remove_cb_for_notify_event(const u32 node_id, const u32 event,
226 event_cb_func_t cb_fun, void *data)
227 {
228 bool is_callback_found = false;
229 struct registered_event_data *eve_data;
230 u64 key = ((u64)node_id << 32U) | (u64)event;
231 struct agent_cb *cb_pos;
232 struct agent_cb *cb_next;
233 struct hlist_node *tmp;
234
235 is_need_to_unregister = false;
236
237 /* Check for existing entry in hash table for given key id */
238 hash_for_each_possible_safe(reg_driver_map, eve_data, tmp, hentry, key) {
239 if (eve_data->key == key) {
240 /* Delete the list of callback */
241 list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
242 if (cb_pos->eve_cb == cb_fun &&
243 cb_pos->agent_data == data) {
244 is_callback_found = true;
245 list_del_init(&cb_pos->list);
246 kfree(cb_pos);
247 }
248 }
249
250 /* Remove HASH table if callback list is empty */
251 if (list_empty(&eve_data->cb_list_head)) {
252 /* remove an object from a HASH table */
253 hash_del(&eve_data->hentry);
254 kfree(eve_data);
255 is_need_to_unregister = true;
256 }
257 }
258 }
259 if (!is_callback_found) {
260 pr_warn("Didn't find any registered callback for 0x%x 0x%x\n",
261 node_id, event);
262 return -EINVAL;
263 }
264
265 return 0;
266 }
267
268 /**
269 * xlnx_register_event() - Register for the event.
270 * @cb_type: Type of callback from pm_api_cb_id,
271 * PM_NOTIFY_CB - for Error Events,
272 * PM_INIT_SUSPEND_CB - for suspend callback.
273 * @node_id: Node-Id related to event.
274 * @event: Event Mask for the Error Event.
275 * @wake: Flag specifying whether the subsystem should be woken upon
276 * event notification.
277 * @cb_fun: Function pointer to store the callback function.
278 * @data: Pointer for the driver instance.
279 *
280 * Return: Returns 0 on successful registration else error code.
281 */
xlnx_register_event(const enum pm_api_cb_id cb_type,const u32 node_id,const u32 event,const bool wake,event_cb_func_t cb_fun,void * data)282 int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event,
283 const bool wake, event_cb_func_t cb_fun, void *data)
284 {
285 int ret = 0;
286 u32 eve;
287 int pos;
288
289 if (event_manager_availability)
290 return event_manager_availability;
291
292 if (cb_type != PM_NOTIFY_CB && cb_type != PM_INIT_SUSPEND_CB) {
293 pr_err("%s() Unsupported Callback 0x%x\n", __func__, cb_type);
294 return -EINVAL;
295 }
296
297 if (!cb_fun)
298 return -EFAULT;
299
300 if (cb_type == PM_INIT_SUSPEND_CB) {
301 ret = xlnx_add_cb_for_suspend(cb_fun, data);
302 } else {
303 if (!xlnx_is_error_event(node_id)) {
304 /* Add entry for Node-Id/Event in hash table */
305 ret = xlnx_add_cb_for_notify_event(node_id, event, wake, cb_fun, data);
306 } else {
307 /* Add into Hash table */
308 for (pos = 0; pos < MAX_BITS; pos++) {
309 eve = event & (1 << pos);
310 if (!eve)
311 continue;
312
313 /* Add entry for Node-Id/Eve in hash table */
314 ret = xlnx_add_cb_for_notify_event(node_id, eve, wake, cb_fun,
315 data);
316 /* Break the loop if got error */
317 if (ret)
318 break;
319 }
320 if (ret) {
321 /* Skip the Event for which got the error */
322 pos--;
323 /* Remove registered(during this call) event from hash table */
324 for ( ; pos >= 0; pos--) {
325 eve = event & (1 << pos);
326 if (!eve)
327 continue;
328 xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
329 }
330 }
331 }
332
333 if (ret) {
334 pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__, node_id,
335 event, ret);
336 return ret;
337 }
338
339 /* Register for Node-Id/Event combination in firmware */
340 ret = zynqmp_pm_register_notifier(node_id, event, wake, true);
341 if (ret) {
342 pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__, node_id,
343 event, ret);
344 /* Remove already registered event from hash table */
345 if (xlnx_is_error_event(node_id)) {
346 for (pos = 0; pos < MAX_BITS; pos++) {
347 eve = event & (1 << pos);
348 if (!eve)
349 continue;
350 xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
351 }
352 } else {
353 xlnx_remove_cb_for_notify_event(node_id, event, cb_fun, data);
354 }
355 return ret;
356 }
357 }
358
359 return ret;
360 }
361 EXPORT_SYMBOL_GPL(xlnx_register_event);
362
363 /**
364 * xlnx_unregister_event() - Unregister for the event.
365 * @cb_type: Type of callback from pm_api_cb_id,
366 * PM_NOTIFY_CB - for Error Events,
367 * PM_INIT_SUSPEND_CB - for suspend callback.
368 * @node_id: Node-Id related to event.
369 * @event: Event Mask for the Error Event.
370 * @cb_fun: Function pointer of callback function.
371 * @data: Pointer of agent's private data.
372 *
373 * Return: Returns 0 on successful unregistration else error code.
374 */
xlnx_unregister_event(const enum pm_api_cb_id cb_type,const u32 node_id,const u32 event,event_cb_func_t cb_fun,void * data)375 int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event,
376 event_cb_func_t cb_fun, void *data)
377 {
378 int ret = 0;
379 u32 eve, pos;
380
381 is_need_to_unregister = false;
382
383 if (event_manager_availability)
384 return event_manager_availability;
385
386 if (cb_type != PM_NOTIFY_CB && cb_type != PM_INIT_SUSPEND_CB) {
387 pr_err("%s() Unsupported Callback 0x%x\n", __func__, cb_type);
388 return -EINVAL;
389 }
390
391 if (!cb_fun)
392 return -EFAULT;
393
394 if (cb_type == PM_INIT_SUSPEND_CB) {
395 ret = xlnx_remove_cb_for_suspend(cb_fun);
396 } else {
397 /* Remove Node-Id/Event from hash table */
398 if (!xlnx_is_error_event(node_id)) {
399 xlnx_remove_cb_for_notify_event(node_id, event, cb_fun, data);
400 } else {
401 for (pos = 0; pos < MAX_BITS; pos++) {
402 eve = event & (1 << pos);
403 if (!eve)
404 continue;
405
406 xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
407 }
408 }
409
410 /* Un-register if list is empty */
411 if (is_need_to_unregister) {
412 /* Un-register for Node-Id/Event combination */
413 ret = zynqmp_pm_register_notifier(node_id, event, false, false);
414 if (ret) {
415 pr_err("%s() failed for 0x%x and 0x%x: %d\n",
416 __func__, node_id, event, ret);
417 return ret;
418 }
419 }
420 }
421
422 return ret;
423 }
424 EXPORT_SYMBOL_GPL(xlnx_unregister_event);
425
xlnx_call_suspend_cb_handler(const u32 * payload)426 static void xlnx_call_suspend_cb_handler(const u32 *payload)
427 {
428 bool is_callback_found = false;
429 struct registered_event_data *eve_data;
430 u32 cb_type = payload[0];
431 struct agent_cb *cb_pos;
432 struct agent_cb *cb_next;
433
434 /* Check for existing entry in hash table for given cb_type */
435 hash_for_each_possible(reg_driver_map, eve_data, hentry, cb_type) {
436 if (eve_data->cb_type == cb_type) {
437 list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
438 cb_pos->eve_cb(&payload[0], cb_pos->agent_data);
439 is_callback_found = true;
440 }
441 }
442 }
443 if (!is_callback_found)
444 pr_warn("Didn't find any registered callback for suspend event\n");
445 }
446
xlnx_call_notify_cb_handler(const u32 * payload)447 static void xlnx_call_notify_cb_handler(const u32 *payload)
448 {
449 bool is_callback_found = false;
450 struct registered_event_data *eve_data;
451 u64 key = ((u64)payload[1] << 32U) | (u64)payload[2];
452 int ret;
453 struct agent_cb *cb_pos;
454 struct agent_cb *cb_next;
455
456 /* Check for existing entry in hash table for given key id */
457 hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
458 if (eve_data->key == key) {
459 list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
460 cb_pos->eve_cb(&payload[0], cb_pos->agent_data);
461 is_callback_found = true;
462 }
463
464 /* re register with firmware to get future events */
465 ret = zynqmp_pm_register_notifier(payload[1], payload[2],
466 eve_data->wake, true);
467 if (ret) {
468 pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__,
469 payload[1], payload[2], ret);
470 list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head,
471 list) {
472 /* Remove already registered event from hash table */
473 xlnx_remove_cb_for_notify_event(payload[1], payload[2],
474 cb_pos->eve_cb,
475 cb_pos->agent_data);
476 }
477 }
478 }
479 }
480 if (!is_callback_found)
481 pr_warn("Unhandled SGI node 0x%x event 0x%x. Expected with Xen hypervisor\n",
482 payload[1], payload[2]);
483 }
484
xlnx_get_event_callback_data(u32 * buf)485 static void xlnx_get_event_callback_data(u32 *buf)
486 {
487 zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf);
488 }
489
xlnx_event_handler(int irq,void * dev_id)490 static irqreturn_t xlnx_event_handler(int irq, void *dev_id)
491 {
492 u32 cb_type, node_id, event, pos;
493 u32 payload[CB_MAX_PAYLOAD_SIZE] = {0};
494 u32 event_data[CB_MAX_PAYLOAD_SIZE] = {0};
495
496 /* Get event data */
497 xlnx_get_event_callback_data(payload);
498
499 /* First element is callback type, others are callback arguments */
500 cb_type = payload[0];
501
502 if (cb_type == PM_NOTIFY_CB) {
503 node_id = payload[1];
504 event = payload[2];
505 if (!xlnx_is_error_event(node_id)) {
506 xlnx_call_notify_cb_handler(payload);
507 } else {
508 /*
509 * Each call back function expecting payload as an input arguments.
510 * We can get multiple error events as in one call back through error
511 * mask. So payload[2] may can contain multiple error events.
512 * In reg_driver_map database we store data in the combination of single
513 * node_id-error combination.
514 * So coping the payload message into event_data and update the
515 * event_data[2] with Error Mask for single error event and use
516 * event_data as input argument for registered call back function.
517 *
518 */
519 memcpy(event_data, payload, (4 * CB_MAX_PAYLOAD_SIZE));
520 /* Support Multiple Error Event */
521 for (pos = 0; pos < MAX_BITS; pos++) {
522 if ((0 == (event & (1 << pos))))
523 continue;
524 event_data[2] = (event & (1 << pos));
525 xlnx_call_notify_cb_handler(event_data);
526 }
527 }
528 } else if (cb_type == PM_INIT_SUSPEND_CB) {
529 xlnx_call_suspend_cb_handler(payload);
530 } else {
531 pr_err("%s() Unsupported Callback %d\n", __func__, cb_type);
532 }
533
534 return IRQ_HANDLED;
535 }
536
xlnx_event_cpuhp_start(unsigned int cpu)537 static int xlnx_event_cpuhp_start(unsigned int cpu)
538 {
539 enable_percpu_irq(virq_sgi, IRQ_TYPE_NONE);
540
541 return 0;
542 }
543
xlnx_event_cpuhp_down(unsigned int cpu)544 static int xlnx_event_cpuhp_down(unsigned int cpu)
545 {
546 disable_percpu_irq(virq_sgi);
547
548 return 0;
549 }
550
xlnx_disable_percpu_irq(void * data)551 static void xlnx_disable_percpu_irq(void *data)
552 {
553 disable_percpu_irq(virq_sgi);
554 }
555
xlnx_event_init_sgi(struct platform_device * pdev)556 static int xlnx_event_init_sgi(struct platform_device *pdev)
557 {
558 int ret = 0;
559 /*
560 * IRQ related structures are used for the following:
561 * for each SGI interrupt ensure its mapped by GIC IRQ domain
562 * and that each corresponding linux IRQ for the HW IRQ has
563 * a handler for when receiving an interrupt from the remote
564 * processor.
565 */
566 struct irq_domain *domain;
567 struct irq_fwspec sgi_fwspec;
568 struct device_node *interrupt_parent = NULL;
569 struct device *parent = pdev->dev.parent;
570
571 /* Find GIC controller to map SGIs. */
572 interrupt_parent = of_irq_find_parent(parent->of_node);
573 if (!interrupt_parent) {
574 dev_err(&pdev->dev, "Failed to find property for Interrupt parent\n");
575 return -EINVAL;
576 }
577
578 /* Each SGI needs to be associated with GIC's IRQ domain. */
579 domain = irq_find_host(interrupt_parent);
580 of_node_put(interrupt_parent);
581
582 /* Each mapping needs GIC domain when finding IRQ mapping. */
583 sgi_fwspec.fwnode = domain->fwnode;
584
585 /*
586 * When irq domain looks at mapping each arg is as follows:
587 * 3 args for: interrupt type (SGI), interrupt # (set later), type
588 */
589 sgi_fwspec.param_count = 1;
590
591 /* Set SGI's hwirq */
592 sgi_fwspec.param[0] = sgi_num;
593 virq_sgi = irq_create_fwspec_mapping(&sgi_fwspec);
594
595 ret = request_percpu_irq(virq_sgi, xlnx_event_handler, "xlnx_event_mgmt",
596 &dummy_cpu_number);
597
598 WARN_ON(ret);
599 if (ret) {
600 irq_dispose_mapping(virq_sgi);
601 return ret;
602 }
603
604 irq_to_desc(virq_sgi);
605 irq_set_status_flags(virq_sgi, IRQ_PER_CPU);
606
607 return ret;
608 }
609
xlnx_event_cleanup_sgi(struct platform_device * pdev)610 static void xlnx_event_cleanup_sgi(struct platform_device *pdev)
611 {
612 cpuhp_remove_state(CPUHP_AP_ONLINE_DYN);
613
614 on_each_cpu(xlnx_disable_percpu_irq, NULL, 1);
615
616 irq_clear_status_flags(virq_sgi, IRQ_PER_CPU);
617 free_percpu_irq(virq_sgi, &dummy_cpu_number);
618 irq_dispose_mapping(virq_sgi);
619 }
620
xlnx_event_manager_probe(struct platform_device * pdev)621 static int xlnx_event_manager_probe(struct platform_device *pdev)
622 {
623 int ret;
624
625 ret = zynqmp_pm_feature(PM_REGISTER_NOTIFIER);
626 if (ret < 0) {
627 dev_err(&pdev->dev, "Feature check failed with %d\n", ret);
628 return ret;
629 }
630
631 if ((ret & FIRMWARE_VERSION_MASK) <
632 REGISTER_NOTIFIER_FIRMWARE_VERSION) {
633 dev_err(&pdev->dev, "Register notifier version error. Expected Firmware: v%d - Found: v%d\n",
634 REGISTER_NOTIFIER_FIRMWARE_VERSION,
635 ret & FIRMWARE_VERSION_MASK);
636 return -EOPNOTSUPP;
637 }
638
639 /* Initialize the SGI */
640 ret = xlnx_event_init_sgi(pdev);
641 if (ret) {
642 dev_err(&pdev->dev, "SGI Init has been failed with %d\n", ret);
643 return ret;
644 }
645
646 /* Setup function for the CPU hot-plug cases */
647 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "soc/event:starting",
648 xlnx_event_cpuhp_start, xlnx_event_cpuhp_down);
649
650 ret = zynqmp_pm_register_sgi(sgi_num, 0);
651 if (ret) {
652 dev_err(&pdev->dev, "SGI %d Registration over TF-A failed with %d\n", sgi_num, ret);
653 xlnx_event_cleanup_sgi(pdev);
654 return ret;
655 }
656
657 event_manager_availability = 0;
658
659 dev_info(&pdev->dev, "SGI %d Registered over TF-A\n", sgi_num);
660 dev_info(&pdev->dev, "Xilinx Event Management driver probed\n");
661
662 return ret;
663 }
664
xlnx_event_manager_remove(struct platform_device * pdev)665 static void xlnx_event_manager_remove(struct platform_device *pdev)
666 {
667 int i;
668 struct registered_event_data *eve_data;
669 struct hlist_node *tmp;
670 int ret;
671 struct agent_cb *cb_pos;
672 struct agent_cb *cb_next;
673
674 hash_for_each_safe(reg_driver_map, i, tmp, eve_data, hentry) {
675 list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
676 list_del_init(&cb_pos->list);
677 kfree(cb_pos);
678 }
679 hash_del(&eve_data->hentry);
680 kfree(eve_data);
681 }
682
683 ret = zynqmp_pm_register_sgi(0, 1);
684 if (ret)
685 dev_err(&pdev->dev, "SGI unregistration over TF-A failed with %d\n", ret);
686
687 xlnx_event_cleanup_sgi(pdev);
688
689 event_manager_availability = -EACCES;
690 }
691
692 static struct platform_driver xlnx_event_manager_driver = {
693 .probe = xlnx_event_manager_probe,
694 .remove_new = xlnx_event_manager_remove,
695 .driver = {
696 .name = "xlnx_event_manager",
697 },
698 };
699 module_param(sgi_num, uint, 0);
700 module_platform_driver(xlnx_event_manager_driver);
701