xref: /openbmc/linux/drivers/gpu/drm/msm/disp/mdp_kms.c (revision b830f94f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 
8 #include "msm_drv.h"
9 #include "mdp_kms.h"
10 
11 
12 struct mdp_irq_wait {
13 	struct mdp_irq irq;
14 	int count;
15 };
16 
17 static DECLARE_WAIT_QUEUE_HEAD(wait_event);
18 
19 static DEFINE_SPINLOCK(list_lock);
20 
21 static void update_irq(struct mdp_kms *mdp_kms)
22 {
23 	struct mdp_irq *irq;
24 	uint32_t irqmask = mdp_kms->vblank_mask;
25 
26 	assert_spin_locked(&list_lock);
27 
28 	list_for_each_entry(irq, &mdp_kms->irq_list, node)
29 		irqmask |= irq->irqmask;
30 
31 	mdp_kms->funcs->set_irqmask(mdp_kms, irqmask, mdp_kms->cur_irq_mask);
32 	mdp_kms->cur_irq_mask = irqmask;
33 }
34 
35 /* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder
36  * link changes, this must be called to figure out the new global irqmask
37  */
38 void mdp_irq_update(struct mdp_kms *mdp_kms)
39 {
40 	unsigned long flags;
41 	spin_lock_irqsave(&list_lock, flags);
42 	update_irq(mdp_kms);
43 	spin_unlock_irqrestore(&list_lock, flags);
44 }
45 
46 void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status)
47 {
48 	struct mdp_irq *handler, *n;
49 	unsigned long flags;
50 
51 	spin_lock_irqsave(&list_lock, flags);
52 	mdp_kms->in_irq = true;
53 	list_for_each_entry_safe(handler, n, &mdp_kms->irq_list, node) {
54 		if (handler->irqmask & status) {
55 			spin_unlock_irqrestore(&list_lock, flags);
56 			handler->irq(handler, handler->irqmask & status);
57 			spin_lock_irqsave(&list_lock, flags);
58 		}
59 	}
60 	mdp_kms->in_irq = false;
61 	update_irq(mdp_kms);
62 	spin_unlock_irqrestore(&list_lock, flags);
63 
64 }
65 
66 void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable)
67 {
68 	unsigned long flags;
69 
70 	spin_lock_irqsave(&list_lock, flags);
71 	if (enable)
72 		mdp_kms->vblank_mask |= mask;
73 	else
74 		mdp_kms->vblank_mask &= ~mask;
75 	update_irq(mdp_kms);
76 	spin_unlock_irqrestore(&list_lock, flags);
77 }
78 
79 static void wait_irq(struct mdp_irq *irq, uint32_t irqstatus)
80 {
81 	struct mdp_irq_wait *wait =
82 			container_of(irq, struct mdp_irq_wait, irq);
83 	wait->count--;
84 	wake_up_all(&wait_event);
85 }
86 
87 void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask)
88 {
89 	struct mdp_irq_wait wait = {
90 		.irq = {
91 			.irq = wait_irq,
92 			.irqmask = irqmask,
93 		},
94 		.count = 1,
95 	};
96 	mdp_irq_register(mdp_kms, &wait.irq);
97 	wait_event_timeout(wait_event, (wait.count <= 0),
98 			msecs_to_jiffies(100));
99 	mdp_irq_unregister(mdp_kms, &wait.irq);
100 }
101 
102 void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
103 {
104 	unsigned long flags;
105 	bool needs_update = false;
106 
107 	spin_lock_irqsave(&list_lock, flags);
108 
109 	if (!irq->registered) {
110 		irq->registered = true;
111 		list_add(&irq->node, &mdp_kms->irq_list);
112 		needs_update = !mdp_kms->in_irq;
113 	}
114 
115 	spin_unlock_irqrestore(&list_lock, flags);
116 
117 	if (needs_update)
118 		mdp_irq_update(mdp_kms);
119 }
120 
121 void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
122 {
123 	unsigned long flags;
124 	bool needs_update = false;
125 
126 	spin_lock_irqsave(&list_lock, flags);
127 
128 	if (irq->registered) {
129 		irq->registered = false;
130 		list_del(&irq->node);
131 		needs_update = !mdp_kms->in_irq;
132 	}
133 
134 	spin_unlock_irqrestore(&list_lock, flags);
135 
136 	if (needs_update)
137 		mdp_irq_update(mdp_kms);
138 }
139