1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2015, Sony Mobile Communications Inc.
4 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
5 */
6
7 #include <linux/interrupt.h>
8 #include <linux/mfd/syscon.h>
9 #include <linux/module.h>
10 #include <linux/of_irq.h>
11 #include <linux/platform_device.h>
12 #include <linux/spinlock.h>
13 #include <linux/regmap.h>
14 #include <linux/soc/qcom/smem.h>
15 #include <linux/soc/qcom/smem_state.h>
16
17 /*
18 * This driver implements the Qualcomm Shared Memory State Machine, a mechanism
19 * for communicating single bit state information to remote processors.
20 *
21 * The implementation is based on two sections of shared memory; the first
22 * holding the state bits and the second holding a matrix of subscription bits.
23 *
24 * The state bits are structured in entries of 32 bits, each belonging to one
25 * system in the SoC. The entry belonging to the local system is considered
26 * read-write, while the rest should be considered read-only.
27 *
28 * The subscription matrix consists of N bitmaps per entry, denoting interest
29 * in updates of the entry for each of the N hosts. Upon updating a state bit
30 * each host's subscription bitmap should be queried and the remote system
31 * should be interrupted if they request so.
32 *
33 * The subscription matrix is laid out in entry-major order:
34 * entry0: [host0 ... hostN]
35 * .
36 * .
37 * entryM: [host0 ... hostN]
38 *
39 * A third, optional, shared memory region might contain information regarding
40 * the number of entries in the state bitmap as well as number of columns in
41 * the subscription matrix.
42 */
43
44 /*
45 * Shared memory identifiers, used to acquire handles to respective memory
46 * region.
47 */
48 #define SMEM_SMSM_SHARED_STATE 85
49 #define SMEM_SMSM_CPU_INTR_MASK 333
50 #define SMEM_SMSM_SIZE_INFO 419
51
52 /*
53 * Default sizes, in case SMEM_SMSM_SIZE_INFO is not found.
54 */
55 #define SMSM_DEFAULT_NUM_ENTRIES 8
56 #define SMSM_DEFAULT_NUM_HOSTS 3
57
58 struct smsm_entry;
59 struct smsm_host;
60
61 /**
62 * struct qcom_smsm - smsm driver context
63 * @dev: smsm device pointer
64 * @local_host: column in the subscription matrix representing this system
65 * @num_hosts: number of columns in the subscription matrix
66 * @num_entries: number of entries in the state map and rows in the subscription
67 * matrix
68 * @local_state: pointer to the local processor's state bits
69 * @subscription: pointer to local processor's row in subscription matrix
70 * @state: smem state handle
71 * @lock: spinlock for read-modify-write of the outgoing state
72 * @entries: context for each of the entries
73 * @hosts: context for each of the hosts
74 */
75 struct qcom_smsm {
76 struct device *dev;
77
78 u32 local_host;
79
80 u32 num_hosts;
81 u32 num_entries;
82
83 u32 *local_state;
84 u32 *subscription;
85 struct qcom_smem_state *state;
86
87 spinlock_t lock;
88
89 struct smsm_entry *entries;
90 struct smsm_host *hosts;
91 };
92
93 /**
94 * struct smsm_entry - per remote processor entry context
95 * @smsm: back-reference to driver context
96 * @domain: IRQ domain for this entry, if representing a remote system
97 * @irq_enabled: bitmap of which state bits IRQs are enabled
98 * @irq_rising: bitmap tracking if rising bits should be propagated
99 * @irq_falling: bitmap tracking if falling bits should be propagated
100 * @last_value: snapshot of state bits last time the interrupts where propagated
101 * @remote_state: pointer to this entry's state bits
102 * @subscription: pointer to a row in the subscription matrix representing this
103 * entry
104 */
105 struct smsm_entry {
106 struct qcom_smsm *smsm;
107
108 struct irq_domain *domain;
109 DECLARE_BITMAP(irq_enabled, 32);
110 DECLARE_BITMAP(irq_rising, 32);
111 DECLARE_BITMAP(irq_falling, 32);
112 unsigned long last_value;
113
114 u32 *remote_state;
115 u32 *subscription;
116 };
117
118 /**
119 * struct smsm_host - representation of a remote host
120 * @ipc_regmap: regmap for outgoing interrupt
121 * @ipc_offset: offset in @ipc_regmap for outgoing interrupt
122 * @ipc_bit: bit in @ipc_regmap + @ipc_offset for outgoing interrupt
123 */
124 struct smsm_host {
125 struct regmap *ipc_regmap;
126 int ipc_offset;
127 int ipc_bit;
128 };
129
130 /**
131 * smsm_update_bits() - change bit in outgoing entry and inform subscribers
132 * @data: smsm context pointer
133 * @mask: value mask
134 * @value: new value
135 *
136 * Used to set and clear the bits in the outgoing/local entry and inform
137 * subscribers about the change.
138 */
smsm_update_bits(void * data,u32 mask,u32 value)139 static int smsm_update_bits(void *data, u32 mask, u32 value)
140 {
141 struct qcom_smsm *smsm = data;
142 struct smsm_host *hostp;
143 unsigned long flags;
144 u32 changes;
145 u32 host;
146 u32 orig;
147 u32 val;
148
149 spin_lock_irqsave(&smsm->lock, flags);
150
151 /* Update the entry */
152 val = orig = readl(smsm->local_state);
153 val &= ~mask;
154 val |= value;
155
156 /* Don't signal if we didn't change the value */
157 changes = val ^ orig;
158 if (!changes) {
159 spin_unlock_irqrestore(&smsm->lock, flags);
160 goto done;
161 }
162
163 /* Write out the new value */
164 writel(val, smsm->local_state);
165 spin_unlock_irqrestore(&smsm->lock, flags);
166
167 /* Make sure the value update is ordered before any kicks */
168 wmb();
169
170 /* Iterate over all hosts to check whom wants a kick */
171 for (host = 0; host < smsm->num_hosts; host++) {
172 hostp = &smsm->hosts[host];
173
174 val = readl(smsm->subscription + host);
175 if (val & changes && hostp->ipc_regmap) {
176 regmap_write(hostp->ipc_regmap,
177 hostp->ipc_offset,
178 BIT(hostp->ipc_bit));
179 }
180 }
181
182 done:
183 return 0;
184 }
185
186 static const struct qcom_smem_state_ops smsm_state_ops = {
187 .update_bits = smsm_update_bits,
188 };
189
190 /**
191 * smsm_intr() - cascading IRQ handler for SMSM
192 * @irq: unused
193 * @data: entry related to this IRQ
194 *
195 * This function cascades an incoming interrupt from a remote system, based on
196 * the state bits and configuration.
197 */
smsm_intr(int irq,void * data)198 static irqreturn_t smsm_intr(int irq, void *data)
199 {
200 struct smsm_entry *entry = data;
201 unsigned i;
202 int irq_pin;
203 u32 changed;
204 u32 val;
205
206 val = readl(entry->remote_state);
207 changed = val ^ xchg(&entry->last_value, val);
208
209 for_each_set_bit(i, entry->irq_enabled, 32) {
210 if (!(changed & BIT(i)))
211 continue;
212
213 if (val & BIT(i)) {
214 if (test_bit(i, entry->irq_rising)) {
215 irq_pin = irq_find_mapping(entry->domain, i);
216 handle_nested_irq(irq_pin);
217 }
218 } else {
219 if (test_bit(i, entry->irq_falling)) {
220 irq_pin = irq_find_mapping(entry->domain, i);
221 handle_nested_irq(irq_pin);
222 }
223 }
224 }
225
226 return IRQ_HANDLED;
227 }
228
229 /**
230 * smsm_mask_irq() - un-subscribe from cascades of IRQs of a certain staus bit
231 * @irqd: IRQ handle to be masked
232 *
233 * This un-subscribes the local CPU from interrupts upon changes to the defines
234 * status bit. The bit is also cleared from cascading.
235 */
smsm_mask_irq(struct irq_data * irqd)236 static void smsm_mask_irq(struct irq_data *irqd)
237 {
238 struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
239 irq_hw_number_t irq = irqd_to_hwirq(irqd);
240 struct qcom_smsm *smsm = entry->smsm;
241 u32 val;
242
243 if (entry->subscription) {
244 val = readl(entry->subscription + smsm->local_host);
245 val &= ~BIT(irq);
246 writel(val, entry->subscription + smsm->local_host);
247 }
248
249 clear_bit(irq, entry->irq_enabled);
250 }
251
252 /**
253 * smsm_unmask_irq() - subscribe to cascades of IRQs of a certain status bit
254 * @irqd: IRQ handle to be unmasked
255 *
256 * This subscribes the local CPU to interrupts upon changes to the defined
257 * status bit. The bit is also marked for cascading.
258 */
smsm_unmask_irq(struct irq_data * irqd)259 static void smsm_unmask_irq(struct irq_data *irqd)
260 {
261 struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
262 irq_hw_number_t irq = irqd_to_hwirq(irqd);
263 struct qcom_smsm *smsm = entry->smsm;
264 u32 val;
265
266 /* Make sure our last cached state is up-to-date */
267 if (readl(entry->remote_state) & BIT(irq))
268 set_bit(irq, &entry->last_value);
269 else
270 clear_bit(irq, &entry->last_value);
271
272 set_bit(irq, entry->irq_enabled);
273
274 if (entry->subscription) {
275 val = readl(entry->subscription + smsm->local_host);
276 val |= BIT(irq);
277 writel(val, entry->subscription + smsm->local_host);
278 }
279 }
280
281 /**
282 * smsm_set_irq_type() - updates the requested IRQ type for the cascading
283 * @irqd: consumer interrupt handle
284 * @type: requested flags
285 */
smsm_set_irq_type(struct irq_data * irqd,unsigned int type)286 static int smsm_set_irq_type(struct irq_data *irqd, unsigned int type)
287 {
288 struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
289 irq_hw_number_t irq = irqd_to_hwirq(irqd);
290
291 if (!(type & IRQ_TYPE_EDGE_BOTH))
292 return -EINVAL;
293
294 if (type & IRQ_TYPE_EDGE_RISING)
295 set_bit(irq, entry->irq_rising);
296 else
297 clear_bit(irq, entry->irq_rising);
298
299 if (type & IRQ_TYPE_EDGE_FALLING)
300 set_bit(irq, entry->irq_falling);
301 else
302 clear_bit(irq, entry->irq_falling);
303
304 return 0;
305 }
306
smsm_get_irqchip_state(struct irq_data * irqd,enum irqchip_irq_state which,bool * state)307 static int smsm_get_irqchip_state(struct irq_data *irqd,
308 enum irqchip_irq_state which, bool *state)
309 {
310 struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
311 irq_hw_number_t irq = irqd_to_hwirq(irqd);
312 u32 val;
313
314 if (which != IRQCHIP_STATE_LINE_LEVEL)
315 return -EINVAL;
316
317 val = readl(entry->remote_state);
318 *state = !!(val & BIT(irq));
319
320 return 0;
321 }
322
323 static struct irq_chip smsm_irq_chip = {
324 .name = "smsm",
325 .irq_mask = smsm_mask_irq,
326 .irq_unmask = smsm_unmask_irq,
327 .irq_set_type = smsm_set_irq_type,
328 .irq_get_irqchip_state = smsm_get_irqchip_state,
329 };
330
331 /**
332 * smsm_irq_map() - sets up a mapping for a cascaded IRQ
333 * @d: IRQ domain representing an entry
334 * @irq: IRQ to set up
335 * @hw: unused
336 */
smsm_irq_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hw)337 static int smsm_irq_map(struct irq_domain *d,
338 unsigned int irq,
339 irq_hw_number_t hw)
340 {
341 struct smsm_entry *entry = d->host_data;
342
343 irq_set_chip_and_handler(irq, &smsm_irq_chip, handle_level_irq);
344 irq_set_chip_data(irq, entry);
345 irq_set_nested_thread(irq, 1);
346
347 return 0;
348 }
349
350 static const struct irq_domain_ops smsm_irq_ops = {
351 .map = smsm_irq_map,
352 .xlate = irq_domain_xlate_twocell,
353 };
354
355 /**
356 * smsm_parse_ipc() - parses a qcom,ipc-%d device tree property
357 * @smsm: smsm driver context
358 * @host_id: index of the remote host to be resolved
359 *
360 * Parses device tree to acquire the information needed for sending the
361 * outgoing interrupts to a remote host - identified by @host_id.
362 */
smsm_parse_ipc(struct qcom_smsm * smsm,unsigned host_id)363 static int smsm_parse_ipc(struct qcom_smsm *smsm, unsigned host_id)
364 {
365 struct device_node *syscon;
366 struct device_node *node = smsm->dev->of_node;
367 struct smsm_host *host = &smsm->hosts[host_id];
368 char key[16];
369 int ret;
370
371 snprintf(key, sizeof(key), "qcom,ipc-%d", host_id);
372 syscon = of_parse_phandle(node, key, 0);
373 if (!syscon)
374 return 0;
375
376 host->ipc_regmap = syscon_node_to_regmap(syscon);
377 of_node_put(syscon);
378 if (IS_ERR(host->ipc_regmap))
379 return PTR_ERR(host->ipc_regmap);
380
381 ret = of_property_read_u32_index(node, key, 1, &host->ipc_offset);
382 if (ret < 0) {
383 dev_err(smsm->dev, "no offset in %s\n", key);
384 return -EINVAL;
385 }
386
387 ret = of_property_read_u32_index(node, key, 2, &host->ipc_bit);
388 if (ret < 0) {
389 dev_err(smsm->dev, "no bit in %s\n", key);
390 return -EINVAL;
391 }
392
393 return 0;
394 }
395
396 /**
397 * smsm_inbound_entry() - parse DT and set up an entry representing a remote system
398 * @smsm: smsm driver context
399 * @entry: entry context to be set up
400 * @node: dt node containing the entry's properties
401 */
smsm_inbound_entry(struct qcom_smsm * smsm,struct smsm_entry * entry,struct device_node * node)402 static int smsm_inbound_entry(struct qcom_smsm *smsm,
403 struct smsm_entry *entry,
404 struct device_node *node)
405 {
406 int ret;
407 int irq;
408
409 irq = irq_of_parse_and_map(node, 0);
410 if (!irq) {
411 dev_err(smsm->dev, "failed to parse smsm interrupt\n");
412 return -EINVAL;
413 }
414
415 ret = devm_request_threaded_irq(smsm->dev, irq,
416 NULL, smsm_intr,
417 IRQF_ONESHOT,
418 "smsm", (void *)entry);
419 if (ret) {
420 dev_err(smsm->dev, "failed to request interrupt\n");
421 return ret;
422 }
423
424 entry->domain = irq_domain_add_linear(node, 32, &smsm_irq_ops, entry);
425 if (!entry->domain) {
426 dev_err(smsm->dev, "failed to add irq_domain\n");
427 return -ENOMEM;
428 }
429
430 return 0;
431 }
432
433 /**
434 * smsm_get_size_info() - parse the optional memory segment for sizes
435 * @smsm: smsm driver context
436 *
437 * Attempt to acquire the number of hosts and entries from the optional shared
438 * memory location. Not being able to find this segment should indicate that
439 * we're on a older system where these values was hard coded to
440 * SMSM_DEFAULT_NUM_ENTRIES and SMSM_DEFAULT_NUM_HOSTS.
441 *
442 * Returns 0 on success, negative errno on failure.
443 */
smsm_get_size_info(struct qcom_smsm * smsm)444 static int smsm_get_size_info(struct qcom_smsm *smsm)
445 {
446 size_t size;
447 struct {
448 u32 num_hosts;
449 u32 num_entries;
450 u32 reserved0;
451 u32 reserved1;
452 } *info;
453
454 info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SIZE_INFO, &size);
455 if (IS_ERR(info) && PTR_ERR(info) != -ENOENT)
456 return dev_err_probe(smsm->dev, PTR_ERR(info),
457 "unable to retrieve smsm size info\n");
458 else if (IS_ERR(info) || size != sizeof(*info)) {
459 dev_warn(smsm->dev, "no smsm size info, using defaults\n");
460 smsm->num_entries = SMSM_DEFAULT_NUM_ENTRIES;
461 smsm->num_hosts = SMSM_DEFAULT_NUM_HOSTS;
462 return 0;
463 }
464
465 smsm->num_entries = info->num_entries;
466 smsm->num_hosts = info->num_hosts;
467
468 dev_dbg(smsm->dev,
469 "found custom size of smsm: %d entries %d hosts\n",
470 smsm->num_entries, smsm->num_hosts);
471
472 return 0;
473 }
474
qcom_smsm_probe(struct platform_device * pdev)475 static int qcom_smsm_probe(struct platform_device *pdev)
476 {
477 struct device_node *local_node;
478 struct device_node *node;
479 struct smsm_entry *entry;
480 struct qcom_smsm *smsm;
481 u32 *intr_mask;
482 size_t size;
483 u32 *states;
484 u32 id;
485 int ret;
486
487 smsm = devm_kzalloc(&pdev->dev, sizeof(*smsm), GFP_KERNEL);
488 if (!smsm)
489 return -ENOMEM;
490 smsm->dev = &pdev->dev;
491 spin_lock_init(&smsm->lock);
492
493 ret = smsm_get_size_info(smsm);
494 if (ret)
495 return ret;
496
497 smsm->entries = devm_kcalloc(&pdev->dev,
498 smsm->num_entries,
499 sizeof(struct smsm_entry),
500 GFP_KERNEL);
501 if (!smsm->entries)
502 return -ENOMEM;
503
504 smsm->hosts = devm_kcalloc(&pdev->dev,
505 smsm->num_hosts,
506 sizeof(struct smsm_host),
507 GFP_KERNEL);
508 if (!smsm->hosts)
509 return -ENOMEM;
510
511 for_each_child_of_node(pdev->dev.of_node, local_node) {
512 if (of_property_present(local_node, "#qcom,smem-state-cells"))
513 break;
514 }
515 if (!local_node) {
516 dev_err(&pdev->dev, "no state entry\n");
517 return -EINVAL;
518 }
519
520 of_property_read_u32(pdev->dev.of_node,
521 "qcom,local-host",
522 &smsm->local_host);
523
524 /* Parse the host properties */
525 for (id = 0; id < smsm->num_hosts; id++) {
526 ret = smsm_parse_ipc(smsm, id);
527 if (ret < 0)
528 goto out_put;
529 }
530
531 /* Acquire the main SMSM state vector */
532 ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE,
533 smsm->num_entries * sizeof(u32));
534 if (ret < 0 && ret != -EEXIST) {
535 dev_err(&pdev->dev, "unable to allocate shared state entry\n");
536 goto out_put;
537 }
538
539 states = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE, NULL);
540 if (IS_ERR(states)) {
541 dev_err(&pdev->dev, "Unable to acquire shared state entry\n");
542 ret = PTR_ERR(states);
543 goto out_put;
544 }
545
546 /* Acquire the list of interrupt mask vectors */
547 size = smsm->num_entries * smsm->num_hosts * sizeof(u32);
548 ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, size);
549 if (ret < 0 && ret != -EEXIST) {
550 dev_err(&pdev->dev, "unable to allocate smsm interrupt mask\n");
551 goto out_put;
552 }
553
554 intr_mask = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, NULL);
555 if (IS_ERR(intr_mask)) {
556 dev_err(&pdev->dev, "unable to acquire shared memory interrupt mask\n");
557 ret = PTR_ERR(intr_mask);
558 goto out_put;
559 }
560
561 /* Setup the reference to the local state bits */
562 smsm->local_state = states + smsm->local_host;
563 smsm->subscription = intr_mask + smsm->local_host * smsm->num_hosts;
564
565 /* Register the outgoing state */
566 smsm->state = qcom_smem_state_register(local_node, &smsm_state_ops, smsm);
567 if (IS_ERR(smsm->state)) {
568 dev_err(smsm->dev, "failed to register qcom_smem_state\n");
569 ret = PTR_ERR(smsm->state);
570 goto out_put;
571 }
572
573 /* Register handlers for remote processor entries of interest. */
574 for_each_available_child_of_node(pdev->dev.of_node, node) {
575 if (!of_property_read_bool(node, "interrupt-controller"))
576 continue;
577
578 ret = of_property_read_u32(node, "reg", &id);
579 if (ret || id >= smsm->num_entries) {
580 dev_err(&pdev->dev, "invalid reg of entry\n");
581 if (!ret)
582 ret = -EINVAL;
583 goto unwind_interfaces;
584 }
585 entry = &smsm->entries[id];
586
587 entry->smsm = smsm;
588 entry->remote_state = states + id;
589
590 /* Setup subscription pointers and unsubscribe to any kicks */
591 entry->subscription = intr_mask + id * smsm->num_hosts;
592 writel(0, entry->subscription + smsm->local_host);
593
594 ret = smsm_inbound_entry(smsm, entry, node);
595 if (ret < 0)
596 goto unwind_interfaces;
597 }
598
599 platform_set_drvdata(pdev, smsm);
600 of_node_put(local_node);
601
602 return 0;
603
604 unwind_interfaces:
605 of_node_put(node);
606 for (id = 0; id < smsm->num_entries; id++)
607 if (smsm->entries[id].domain)
608 irq_domain_remove(smsm->entries[id].domain);
609
610 qcom_smem_state_unregister(smsm->state);
611 out_put:
612 of_node_put(local_node);
613 return ret;
614 }
615
qcom_smsm_remove(struct platform_device * pdev)616 static int qcom_smsm_remove(struct platform_device *pdev)
617 {
618 struct qcom_smsm *smsm = platform_get_drvdata(pdev);
619 unsigned id;
620
621 for (id = 0; id < smsm->num_entries; id++)
622 if (smsm->entries[id].domain)
623 irq_domain_remove(smsm->entries[id].domain);
624
625 qcom_smem_state_unregister(smsm->state);
626
627 return 0;
628 }
629
630 static const struct of_device_id qcom_smsm_of_match[] = {
631 { .compatible = "qcom,smsm" },
632 {}
633 };
634 MODULE_DEVICE_TABLE(of, qcom_smsm_of_match);
635
636 static struct platform_driver qcom_smsm_driver = {
637 .probe = qcom_smsm_probe,
638 .remove = qcom_smsm_remove,
639 .driver = {
640 .name = "qcom-smsm",
641 .of_match_table = qcom_smsm_of_match,
642 },
643 };
644 module_platform_driver(qcom_smsm_driver);
645
646 MODULE_DESCRIPTION("Qualcomm Shared Memory State Machine driver");
647 MODULE_LICENSE("GPL v2");
648