1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Xilinx Event Management Driver
4 *
5 * Copyright (C) 2021 Xilinx, Inc.
6 * Copyright (C) 2024 Advanced Micro Devices, Inc.
7 *
8 * Abhyuday Godhasara <abhyuday.godhasara@xilinx.com>
9 */
10
11 #include <linux/cpuhotplug.h>
12 #include <linux/firmware/xlnx-event-manager.h>
13 #include <linux/firmware/xlnx-zynqmp.h>
14 #include <linux/hashtable.h>
15 #include <linux/interrupt.h>
16 #include <linux/irq.h>
17 #include <linux/irqdomain.h>
18 #include <linux/module.h>
19 #include <linux/of_irq.h>
20 #include <linux/platform_device.h>
21 #include <linux/slab.h>
22
23 static DEFINE_PER_CPU_READ_MOSTLY(int, dummy_cpu_number);
24
25 static int virq_sgi;
26 static int event_manager_availability = -EACCES;
27
28 /* SGI number used for Event management driver */
29 #define XLNX_EVENT_SGI_NUM (15)
30
31 /* Max number of driver can register for same event */
32 #define MAX_DRIVER_PER_EVENT (10U)
33
34 /* Max HashMap Order for PM API feature check (1<<7 = 128) */
35 #define REGISTERED_DRIVER_MAX_ORDER (7)
36
37 #define MAX_BITS (32U) /* Number of bits available for error mask */
38
39 #define FIRMWARE_VERSION_MASK (0xFFFFU)
40 #define REGISTER_NOTIFIER_FIRMWARE_VERSION (2U)
41
42 static DEFINE_HASHTABLE(reg_driver_map, REGISTERED_DRIVER_MAX_ORDER);
43 static int sgi_num = XLNX_EVENT_SGI_NUM;
44
45 static bool is_need_to_unregister;
46
47 /**
48 * struct agent_cb - Registered callback function and private data.
49 * @agent_data: Data passed back to handler function.
50 * @eve_cb: Function pointer to store the callback function.
51 * @list: member to create list.
52 */
53 struct agent_cb {
54 void *agent_data;
55 event_cb_func_t eve_cb;
56 struct list_head list;
57 };
58
59 /**
60 * struct registered_event_data - Registered Event Data.
61 * @key: key is the combine id(Node-Id | Event-Id) of type u64
62 * where upper u32 for Node-Id and lower u32 for Event-Id,
63 * And this used as key to index into hashmap.
64 * @cb_type: Type of Api callback, like PM_NOTIFY_CB, etc.
65 * @wake: If this flag set, firmware will wake up processor if is
66 * in sleep or power down state.
67 * @cb_list_head: Head of call back data list which contain the information
68 * about registered handler and private data.
69 * @hentry: hlist_node that hooks this entry into hashtable.
70 */
71 struct registered_event_data {
72 u64 key;
73 enum pm_api_cb_id cb_type;
74 bool wake;
75 struct list_head cb_list_head;
76 struct hlist_node hentry;
77 };
78
xlnx_is_error_event(const u32 node_id)79 static bool xlnx_is_error_event(const u32 node_id)
80 {
81 if (node_id == EVENT_ERROR_PMC_ERR1 ||
82 node_id == EVENT_ERROR_PMC_ERR2 ||
83 node_id == EVENT_ERROR_PSM_ERR1 ||
84 node_id == EVENT_ERROR_PSM_ERR2)
85 return true;
86
87 return false;
88 }
89
xlnx_add_cb_for_notify_event(const u32 node_id,const u32 event,const bool wake,event_cb_func_t cb_fun,void * data)90 static int xlnx_add_cb_for_notify_event(const u32 node_id, const u32 event, const bool wake,
91 event_cb_func_t cb_fun, void *data)
92 {
93 u64 key = 0;
94 bool present_in_hash = false;
95 struct registered_event_data *eve_data;
96 struct agent_cb *cb_data;
97 struct agent_cb *cb_pos;
98 struct agent_cb *cb_next;
99
100 key = ((u64)node_id << 32U) | (u64)event;
101 /* Check for existing entry in hash table for given key id */
102 hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
103 if (eve_data->key == key) {
104 present_in_hash = true;
105 break;
106 }
107 }
108
109 if (!present_in_hash) {
110 /* Add new entry if not present in HASH table */
111 eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL);
112 if (!eve_data)
113 return -ENOMEM;
114 eve_data->key = key;
115 eve_data->cb_type = PM_NOTIFY_CB;
116 eve_data->wake = wake;
117 INIT_LIST_HEAD(&eve_data->cb_list_head);
118
119 cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
120 if (!cb_data) {
121 kfree(eve_data);
122 return -ENOMEM;
123 }
124 cb_data->eve_cb = cb_fun;
125 cb_data->agent_data = data;
126
127 /* Add into callback list */
128 list_add(&cb_data->list, &eve_data->cb_list_head);
129
130 /* Add into HASH table */
131 hash_add(reg_driver_map, &eve_data->hentry, key);
132 } else {
133 /* Search for callback function and private data in list */
134 list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
135 if (cb_pos->eve_cb == cb_fun &&
136 cb_pos->agent_data == data) {
137 return 0;
138 }
139 }
140
141 /* Add multiple handler and private data in list */
142 cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
143 if (!cb_data)
144 return -ENOMEM;
145 cb_data->eve_cb = cb_fun;
146 cb_data->agent_data = data;
147
148 list_add(&cb_data->list, &eve_data->cb_list_head);
149 }
150
151 return 0;
152 }
153
xlnx_add_cb_for_suspend(event_cb_func_t cb_fun,void * data)154 static int xlnx_add_cb_for_suspend(event_cb_func_t cb_fun, void *data)
155 {
156 struct registered_event_data *eve_data;
157 struct agent_cb *cb_data;
158
159 /* Check for existing entry in hash table for given cb_type */
160 hash_for_each_possible(reg_driver_map, eve_data, hentry, PM_INIT_SUSPEND_CB) {
161 if (eve_data->cb_type == PM_INIT_SUSPEND_CB) {
162 pr_err("Found as already registered\n");
163 return -EINVAL;
164 }
165 }
166
167 /* Add new entry if not present */
168 eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL);
169 if (!eve_data)
170 return -ENOMEM;
171
172 eve_data->key = 0;
173 eve_data->cb_type = PM_INIT_SUSPEND_CB;
174 INIT_LIST_HEAD(&eve_data->cb_list_head);
175
176 cb_data = kmalloc(sizeof(*cb_data), GFP_KERNEL);
177 if (!cb_data) {
178 kfree(eve_data);
179 return -ENOMEM;
180 }
181 cb_data->eve_cb = cb_fun;
182 cb_data->agent_data = data;
183
184 /* Add into callback list */
185 list_add(&cb_data->list, &eve_data->cb_list_head);
186
187 hash_add(reg_driver_map, &eve_data->hentry, PM_INIT_SUSPEND_CB);
188
189 return 0;
190 }
191
xlnx_remove_cb_for_suspend(event_cb_func_t cb_fun)192 static int xlnx_remove_cb_for_suspend(event_cb_func_t cb_fun)
193 {
194 bool is_callback_found = false;
195 struct registered_event_data *eve_data;
196 struct agent_cb *cb_pos;
197 struct agent_cb *cb_next;
198 struct hlist_node *tmp;
199
200 is_need_to_unregister = false;
201
202 /* Check for existing entry in hash table for given cb_type */
203 hash_for_each_possible_safe(reg_driver_map, eve_data, tmp, hentry, PM_INIT_SUSPEND_CB) {
204 if (eve_data->cb_type == PM_INIT_SUSPEND_CB) {
205 /* Delete the list of callback */
206 list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
207 if (cb_pos->eve_cb == cb_fun) {
208 is_callback_found = true;
209 list_del_init(&cb_pos->list);
210 kfree(cb_pos);
211 }
212 }
213 /* remove an object from a hashtable */
214 hash_del(&eve_data->hentry);
215 kfree(eve_data);
216 is_need_to_unregister = true;
217 }
218 }
219 if (!is_callback_found) {
220 pr_warn("Didn't find any registered callback for suspend event\n");
221 return -EINVAL;
222 }
223
224 return 0;
225 }
226
xlnx_remove_cb_for_notify_event(const u32 node_id,const u32 event,event_cb_func_t cb_fun,void * data)227 static int xlnx_remove_cb_for_notify_event(const u32 node_id, const u32 event,
228 event_cb_func_t cb_fun, void *data)
229 {
230 bool is_callback_found = false;
231 struct registered_event_data *eve_data;
232 u64 key = ((u64)node_id << 32U) | (u64)event;
233 struct agent_cb *cb_pos;
234 struct agent_cb *cb_next;
235 struct hlist_node *tmp;
236
237 is_need_to_unregister = false;
238
239 /* Check for existing entry in hash table for given key id */
240 hash_for_each_possible_safe(reg_driver_map, eve_data, tmp, hentry, key) {
241 if (eve_data->key == key) {
242 /* Delete the list of callback */
243 list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
244 if (cb_pos->eve_cb == cb_fun &&
245 cb_pos->agent_data == data) {
246 is_callback_found = true;
247 list_del_init(&cb_pos->list);
248 kfree(cb_pos);
249 }
250 }
251
252 /* Remove HASH table if callback list is empty */
253 if (list_empty(&eve_data->cb_list_head)) {
254 /* remove an object from a HASH table */
255 hash_del(&eve_data->hentry);
256 kfree(eve_data);
257 is_need_to_unregister = true;
258 }
259 }
260 }
261 if (!is_callback_found) {
262 pr_warn("Didn't find any registered callback for 0x%x 0x%x\n",
263 node_id, event);
264 return -EINVAL;
265 }
266
267 return 0;
268 }
269
270 /**
271 * xlnx_register_event() - Register for the event.
272 * @cb_type: Type of callback from pm_api_cb_id,
273 * PM_NOTIFY_CB - for Error Events,
274 * PM_INIT_SUSPEND_CB - for suspend callback.
275 * @node_id: Node-Id related to event.
276 * @event: Event Mask for the Error Event.
277 * @wake: Flag specifying whether the subsystem should be woken upon
278 * event notification.
279 * @cb_fun: Function pointer to store the callback function.
280 * @data: Pointer for the driver instance.
281 *
282 * Return: Returns 0 on successful registration else error code.
283 */
xlnx_register_event(const enum pm_api_cb_id cb_type,const u32 node_id,const u32 event,const bool wake,event_cb_func_t cb_fun,void * data)284 int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event,
285 const bool wake, event_cb_func_t cb_fun, void *data)
286 {
287 int ret = 0;
288 u32 eve;
289 int pos;
290
291 if (event_manager_availability)
292 return event_manager_availability;
293
294 if (cb_type != PM_NOTIFY_CB && cb_type != PM_INIT_SUSPEND_CB) {
295 pr_err("%s() Unsupported Callback 0x%x\n", __func__, cb_type);
296 return -EINVAL;
297 }
298
299 if (!cb_fun)
300 return -EFAULT;
301
302 if (cb_type == PM_INIT_SUSPEND_CB) {
303 ret = xlnx_add_cb_for_suspend(cb_fun, data);
304 } else {
305 if (!xlnx_is_error_event(node_id)) {
306 /* Add entry for Node-Id/Event in hash table */
307 ret = xlnx_add_cb_for_notify_event(node_id, event, wake, cb_fun, data);
308 } else {
309 /* Add into Hash table */
310 for (pos = 0; pos < MAX_BITS; pos++) {
311 eve = event & (1 << pos);
312 if (!eve)
313 continue;
314
315 /* Add entry for Node-Id/Eve in hash table */
316 ret = xlnx_add_cb_for_notify_event(node_id, eve, wake, cb_fun,
317 data);
318 /* Break the loop if got error */
319 if (ret)
320 break;
321 }
322 if (ret) {
323 /* Skip the Event for which got the error */
324 pos--;
325 /* Remove registered(during this call) event from hash table */
326 for ( ; pos >= 0; pos--) {
327 eve = event & (1 << pos);
328 if (!eve)
329 continue;
330 xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
331 }
332 }
333 }
334
335 if (ret) {
336 pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__, node_id,
337 event, ret);
338 return ret;
339 }
340
341 /* Register for Node-Id/Event combination in firmware */
342 ret = zynqmp_pm_register_notifier(node_id, event, wake, true);
343 if (ret) {
344 pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__, node_id,
345 event, ret);
346 /* Remove already registered event from hash table */
347 if (xlnx_is_error_event(node_id)) {
348 for (pos = 0; pos < MAX_BITS; pos++) {
349 eve = event & (1 << pos);
350 if (!eve)
351 continue;
352 xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
353 }
354 } else {
355 xlnx_remove_cb_for_notify_event(node_id, event, cb_fun, data);
356 }
357 return ret;
358 }
359 }
360
361 return ret;
362 }
363 EXPORT_SYMBOL_GPL(xlnx_register_event);
364
365 /**
366 * xlnx_unregister_event() - Unregister for the event.
367 * @cb_type: Type of callback from pm_api_cb_id,
368 * PM_NOTIFY_CB - for Error Events,
369 * PM_INIT_SUSPEND_CB - for suspend callback.
370 * @node_id: Node-Id related to event.
371 * @event: Event Mask for the Error Event.
372 * @cb_fun: Function pointer of callback function.
373 * @data: Pointer of agent's private data.
374 *
375 * Return: Returns 0 on successful unregistration else error code.
376 */
xlnx_unregister_event(const enum pm_api_cb_id cb_type,const u32 node_id,const u32 event,event_cb_func_t cb_fun,void * data)377 int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event,
378 event_cb_func_t cb_fun, void *data)
379 {
380 int ret = 0;
381 u32 eve, pos;
382
383 is_need_to_unregister = false;
384
385 if (event_manager_availability)
386 return event_manager_availability;
387
388 if (cb_type != PM_NOTIFY_CB && cb_type != PM_INIT_SUSPEND_CB) {
389 pr_err("%s() Unsupported Callback 0x%x\n", __func__, cb_type);
390 return -EINVAL;
391 }
392
393 if (!cb_fun)
394 return -EFAULT;
395
396 if (cb_type == PM_INIT_SUSPEND_CB) {
397 ret = xlnx_remove_cb_for_suspend(cb_fun);
398 } else {
399 /* Remove Node-Id/Event from hash table */
400 if (!xlnx_is_error_event(node_id)) {
401 xlnx_remove_cb_for_notify_event(node_id, event, cb_fun, data);
402 } else {
403 for (pos = 0; pos < MAX_BITS; pos++) {
404 eve = event & (1 << pos);
405 if (!eve)
406 continue;
407
408 xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun, data);
409 }
410 }
411
412 /* Un-register if list is empty */
413 if (is_need_to_unregister) {
414 /* Un-register for Node-Id/Event combination */
415 ret = zynqmp_pm_register_notifier(node_id, event, false, false);
416 if (ret) {
417 pr_err("%s() failed for 0x%x and 0x%x: %d\n",
418 __func__, node_id, event, ret);
419 return ret;
420 }
421 }
422 }
423
424 return ret;
425 }
426 EXPORT_SYMBOL_GPL(xlnx_unregister_event);
427
xlnx_call_suspend_cb_handler(const u32 * payload)428 static void xlnx_call_suspend_cb_handler(const u32 *payload)
429 {
430 bool is_callback_found = false;
431 struct registered_event_data *eve_data;
432 u32 cb_type = payload[0];
433 struct agent_cb *cb_pos;
434 struct agent_cb *cb_next;
435
436 /* Check for existing entry in hash table for given cb_type */
437 hash_for_each_possible(reg_driver_map, eve_data, hentry, cb_type) {
438 if (eve_data->cb_type == cb_type) {
439 list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
440 cb_pos->eve_cb(&payload[0], cb_pos->agent_data);
441 is_callback_found = true;
442 }
443 }
444 }
445 if (!is_callback_found)
446 pr_warn("Didn't find any registered callback for suspend event\n");
447 }
448
xlnx_call_notify_cb_handler(const u32 * payload)449 static void xlnx_call_notify_cb_handler(const u32 *payload)
450 {
451 bool is_callback_found = false;
452 struct registered_event_data *eve_data;
453 u64 key = ((u64)payload[1] << 32U) | (u64)payload[2];
454 int ret;
455 struct agent_cb *cb_pos;
456 struct agent_cb *cb_next;
457
458 /* Check for existing entry in hash table for given key id */
459 hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
460 if (eve_data->key == key) {
461 list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
462 cb_pos->eve_cb(&payload[0], cb_pos->agent_data);
463 is_callback_found = true;
464 }
465
466 /* re register with firmware to get future events */
467 ret = zynqmp_pm_register_notifier(payload[1], payload[2],
468 eve_data->wake, true);
469 if (ret) {
470 pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__,
471 payload[1], payload[2], ret);
472 list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head,
473 list) {
474 /* Remove already registered event from hash table */
475 xlnx_remove_cb_for_notify_event(payload[1], payload[2],
476 cb_pos->eve_cb,
477 cb_pos->agent_data);
478 }
479 }
480 }
481 }
482 if (!is_callback_found)
483 pr_warn("Unhandled SGI node 0x%x event 0x%x. Expected with Xen hypervisor\n",
484 payload[1], payload[2]);
485 }
486
xlnx_get_event_callback_data(u32 * buf)487 static void xlnx_get_event_callback_data(u32 *buf)
488 {
489 zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf);
490 }
491
xlnx_event_handler(int irq,void * dev_id)492 static irqreturn_t xlnx_event_handler(int irq, void *dev_id)
493 {
494 u32 cb_type, node_id, event, pos;
495 u32 payload[CB_MAX_PAYLOAD_SIZE] = {0};
496 u32 event_data[CB_MAX_PAYLOAD_SIZE] = {0};
497
498 /* Get event data */
499 xlnx_get_event_callback_data(payload);
500
501 /* First element is callback type, others are callback arguments */
502 cb_type = payload[0];
503
504 if (cb_type == PM_NOTIFY_CB) {
505 node_id = payload[1];
506 event = payload[2];
507 if (!xlnx_is_error_event(node_id)) {
508 xlnx_call_notify_cb_handler(payload);
509 } else {
510 /*
511 * Each call back function expecting payload as an input arguments.
512 * We can get multiple error events as in one call back through error
513 * mask. So payload[2] may can contain multiple error events.
514 * In reg_driver_map database we store data in the combination of single
515 * node_id-error combination.
516 * So coping the payload message into event_data and update the
517 * event_data[2] with Error Mask for single error event and use
518 * event_data as input argument for registered call back function.
519 *
520 */
521 memcpy(event_data, payload, (4 * CB_MAX_PAYLOAD_SIZE));
522 /* Support Multiple Error Event */
523 for (pos = 0; pos < MAX_BITS; pos++) {
524 if ((0 == (event & (1 << pos))))
525 continue;
526 event_data[2] = (event & (1 << pos));
527 xlnx_call_notify_cb_handler(event_data);
528 }
529 }
530 } else if (cb_type == PM_INIT_SUSPEND_CB) {
531 xlnx_call_suspend_cb_handler(payload);
532 } else {
533 pr_err("%s() Unsupported Callback %d\n", __func__, cb_type);
534 }
535
536 return IRQ_HANDLED;
537 }
538
xlnx_event_cpuhp_start(unsigned int cpu)539 static int xlnx_event_cpuhp_start(unsigned int cpu)
540 {
541 enable_percpu_irq(virq_sgi, IRQ_TYPE_NONE);
542
543 return 0;
544 }
545
xlnx_event_cpuhp_down(unsigned int cpu)546 static int xlnx_event_cpuhp_down(unsigned int cpu)
547 {
548 disable_percpu_irq(virq_sgi);
549
550 return 0;
551 }
552
xlnx_disable_percpu_irq(void * data)553 static void xlnx_disable_percpu_irq(void *data)
554 {
555 disable_percpu_irq(virq_sgi);
556 }
557
xlnx_event_init_sgi(struct platform_device * pdev)558 static int xlnx_event_init_sgi(struct platform_device *pdev)
559 {
560 int ret = 0;
561 /*
562 * IRQ related structures are used for the following:
563 * for each SGI interrupt ensure its mapped by GIC IRQ domain
564 * and that each corresponding linux IRQ for the HW IRQ has
565 * a handler for when receiving an interrupt from the remote
566 * processor.
567 */
568 struct irq_domain *domain;
569 struct irq_fwspec sgi_fwspec;
570 struct device_node *interrupt_parent = NULL;
571 struct device *parent = pdev->dev.parent;
572
573 /* Find GIC controller to map SGIs. */
574 interrupt_parent = of_irq_find_parent(parent->of_node);
575 if (!interrupt_parent) {
576 dev_err(&pdev->dev, "Failed to find property for Interrupt parent\n");
577 return -EINVAL;
578 }
579
580 /* Each SGI needs to be associated with GIC's IRQ domain. */
581 domain = irq_find_host(interrupt_parent);
582 of_node_put(interrupt_parent);
583
584 /* Each mapping needs GIC domain when finding IRQ mapping. */
585 sgi_fwspec.fwnode = domain->fwnode;
586
587 /*
588 * When irq domain looks at mapping each arg is as follows:
589 * 3 args for: interrupt type (SGI), interrupt # (set later), type
590 */
591 sgi_fwspec.param_count = 1;
592
593 /* Set SGI's hwirq */
594 sgi_fwspec.param[0] = sgi_num;
595 virq_sgi = irq_create_fwspec_mapping(&sgi_fwspec);
596
597 ret = request_percpu_irq(virq_sgi, xlnx_event_handler, "xlnx_event_mgmt",
598 &dummy_cpu_number);
599
600 WARN_ON(ret);
601 if (ret) {
602 irq_dispose_mapping(virq_sgi);
603 return ret;
604 }
605
606 irq_to_desc(virq_sgi);
607 irq_set_status_flags(virq_sgi, IRQ_PER_CPU);
608
609 return ret;
610 }
611
xlnx_event_cleanup_sgi(struct platform_device * pdev)612 static void xlnx_event_cleanup_sgi(struct platform_device *pdev)
613 {
614 cpuhp_remove_state(CPUHP_AP_ONLINE_DYN);
615
616 on_each_cpu(xlnx_disable_percpu_irq, NULL, 1);
617
618 irq_clear_status_flags(virq_sgi, IRQ_PER_CPU);
619 free_percpu_irq(virq_sgi, &dummy_cpu_number);
620 irq_dispose_mapping(virq_sgi);
621 }
622
xlnx_event_manager_probe(struct platform_device * pdev)623 static int xlnx_event_manager_probe(struct platform_device *pdev)
624 {
625 int ret;
626
627 ret = zynqmp_pm_feature(PM_REGISTER_NOTIFIER);
628 if (ret < 0) {
629 dev_err(&pdev->dev, "Feature check failed with %d\n", ret);
630 return ret;
631 }
632
633 if ((ret & FIRMWARE_VERSION_MASK) <
634 REGISTER_NOTIFIER_FIRMWARE_VERSION) {
635 dev_err(&pdev->dev, "Register notifier version error. Expected Firmware: v%d - Found: v%d\n",
636 REGISTER_NOTIFIER_FIRMWARE_VERSION,
637 ret & FIRMWARE_VERSION_MASK);
638 return -EOPNOTSUPP;
639 }
640
641 /* Initialize the SGI */
642 ret = xlnx_event_init_sgi(pdev);
643 if (ret) {
644 dev_err(&pdev->dev, "SGI Init has been failed with %d\n", ret);
645 return ret;
646 }
647
648 /* Setup function for the CPU hot-plug cases */
649 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "soc/event:starting",
650 xlnx_event_cpuhp_start, xlnx_event_cpuhp_down);
651
652 ret = zynqmp_pm_register_sgi(sgi_num, 0);
653 if (ret) {
654 dev_err(&pdev->dev, "SGI %d Registration over TF-A failed with %d\n", sgi_num, ret);
655 xlnx_event_cleanup_sgi(pdev);
656 return ret;
657 }
658
659 event_manager_availability = 0;
660
661 dev_info(&pdev->dev, "SGI %d Registered over TF-A\n", sgi_num);
662 dev_info(&pdev->dev, "Xilinx Event Management driver probed\n");
663
664 return ret;
665 }
666
xlnx_event_manager_remove(struct platform_device * pdev)667 static void xlnx_event_manager_remove(struct platform_device *pdev)
668 {
669 int i;
670 struct registered_event_data *eve_data;
671 struct hlist_node *tmp;
672 int ret;
673 struct agent_cb *cb_pos;
674 struct agent_cb *cb_next;
675
676 hash_for_each_safe(reg_driver_map, i, tmp, eve_data, hentry) {
677 list_for_each_entry_safe(cb_pos, cb_next, &eve_data->cb_list_head, list) {
678 list_del_init(&cb_pos->list);
679 kfree(cb_pos);
680 }
681 hash_del(&eve_data->hentry);
682 kfree(eve_data);
683 }
684
685 ret = zynqmp_pm_register_sgi(0, 1);
686 if (ret)
687 dev_err(&pdev->dev, "SGI unregistration over TF-A failed with %d\n", ret);
688
689 xlnx_event_cleanup_sgi(pdev);
690
691 event_manager_availability = -EACCES;
692 }
693
694 static struct platform_driver xlnx_event_manager_driver = {
695 .probe = xlnx_event_manager_probe,
696 .remove_new = xlnx_event_manager_remove,
697 .driver = {
698 .name = "xlnx_event_manager",
699 },
700 };
701 module_param(sgi_num, uint, 0);
702 module_platform_driver(xlnx_event_manager_driver);
703