xref: /openbmc/linux/drivers/gpu/host1x/intr.c (revision 82003e04)
1 /*
2  * Tegra host1x Interrupt Management
3  *
4  * Copyright (c) 2010-2013, NVIDIA Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include <linux/clk.h>
20 #include <linux/interrupt.h>
21 #include <linux/slab.h>
22 #include <linux/irq.h>
23 
24 #include <trace/events/host1x.h>
25 #include "channel.h"
26 #include "dev.h"
27 #include "intr.h"
28 
29 /* Wait list management */
30 
31 enum waitlist_state {
32 	WLS_PENDING,
33 	WLS_REMOVED,
34 	WLS_CANCELLED,
35 	WLS_HANDLED
36 };
37 
38 static void waiter_release(struct kref *kref)
39 {
40 	kfree(container_of(kref, struct host1x_waitlist, refcount));
41 }
42 
43 /*
44  * add a waiter to a waiter queue, sorted by threshold
45  * returns true if it was added at the head of the queue
46  */
47 static bool add_waiter_to_queue(struct host1x_waitlist *waiter,
48 				struct list_head *queue)
49 {
50 	struct host1x_waitlist *pos;
51 	u32 thresh = waiter->thresh;
52 
53 	list_for_each_entry_reverse(pos, queue, list)
54 		if ((s32)(pos->thresh - thresh) <= 0) {
55 			list_add(&waiter->list, &pos->list);
56 			return false;
57 		}
58 
59 	list_add(&waiter->list, queue);
60 	return true;
61 }
62 
63 /*
64  * run through a waiter queue for a single sync point ID
65  * and gather all completed waiters into lists by actions
66  */
67 static void remove_completed_waiters(struct list_head *head, u32 sync,
68 			struct list_head completed[HOST1X_INTR_ACTION_COUNT])
69 {
70 	struct list_head *dest;
71 	struct host1x_waitlist *waiter, *next, *prev;
72 
73 	list_for_each_entry_safe(waiter, next, head, list) {
74 		if ((s32)(waiter->thresh - sync) > 0)
75 			break;
76 
77 		dest = completed + waiter->action;
78 
79 		/* consolidate submit cleanups */
80 		if (waiter->action == HOST1X_INTR_ACTION_SUBMIT_COMPLETE &&
81 		    !list_empty(dest)) {
82 			prev = list_entry(dest->prev,
83 					  struct host1x_waitlist, list);
84 			if (prev->data == waiter->data) {
85 				prev->count++;
86 				dest = NULL;
87 			}
88 		}
89 
90 		/* PENDING->REMOVED or CANCELLED->HANDLED */
91 		if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
92 			list_del(&waiter->list);
93 			kref_put(&waiter->refcount, waiter_release);
94 		} else
95 			list_move_tail(&waiter->list, dest);
96 	}
97 }
98 
99 static void reset_threshold_interrupt(struct host1x *host,
100 				      struct list_head *head,
101 				      unsigned int id)
102 {
103 	u32 thresh =
104 		list_first_entry(head, struct host1x_waitlist, list)->thresh;
105 
106 	host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
107 	host1x_hw_intr_enable_syncpt_intr(host, id);
108 }
109 
110 static void action_submit_complete(struct host1x_waitlist *waiter)
111 {
112 	struct host1x_channel *channel = waiter->data;
113 
114 	host1x_cdma_update(&channel->cdma);
115 
116 	/*  Add nr_completed to trace */
117 	trace_host1x_channel_submit_complete(dev_name(channel->dev),
118 					     waiter->count, waiter->thresh);
119 
120 }
121 
122 static void action_wakeup(struct host1x_waitlist *waiter)
123 {
124 	wait_queue_head_t *wq = waiter->data;
125 
126 	wake_up(wq);
127 }
128 
129 static void action_wakeup_interruptible(struct host1x_waitlist *waiter)
130 {
131 	wait_queue_head_t *wq = waiter->data;
132 
133 	wake_up_interruptible(wq);
134 }
135 
136 typedef void (*action_handler)(struct host1x_waitlist *waiter);
137 
138 static const action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
139 	action_submit_complete,
140 	action_wakeup,
141 	action_wakeup_interruptible,
142 };
143 
144 static void run_handlers(struct list_head completed[HOST1X_INTR_ACTION_COUNT])
145 {
146 	struct list_head *head = completed;
147 	int i;
148 
149 	for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i, ++head) {
150 		action_handler handler = action_handlers[i];
151 		struct host1x_waitlist *waiter, *next;
152 
153 		list_for_each_entry_safe(waiter, next, head, list) {
154 			list_del(&waiter->list);
155 			handler(waiter);
156 			WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) !=
157 				WLS_REMOVED);
158 			kref_put(&waiter->refcount, waiter_release);
159 		}
160 	}
161 }
162 
163 /*
164  * Remove & handle all waiters that have completed for the given syncpt
165  */
166 static int process_wait_list(struct host1x *host,
167 			     struct host1x_syncpt *syncpt,
168 			     u32 threshold)
169 {
170 	struct list_head completed[HOST1X_INTR_ACTION_COUNT];
171 	unsigned int i;
172 	int empty;
173 
174 	for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i)
175 		INIT_LIST_HEAD(completed + i);
176 
177 	spin_lock(&syncpt->intr.lock);
178 
179 	remove_completed_waiters(&syncpt->intr.wait_head, threshold,
180 				 completed);
181 
182 	empty = list_empty(&syncpt->intr.wait_head);
183 	if (empty)
184 		host1x_hw_intr_disable_syncpt_intr(host, syncpt->id);
185 	else
186 		reset_threshold_interrupt(host, &syncpt->intr.wait_head,
187 					  syncpt->id);
188 
189 	spin_unlock(&syncpt->intr.lock);
190 
191 	run_handlers(completed);
192 
193 	return empty;
194 }
195 
196 /*
197  * Sync point threshold interrupt service thread function
198  * Handles sync point threshold triggers, in thread context
199  */
200 
201 static void syncpt_thresh_work(struct work_struct *work)
202 {
203 	struct host1x_syncpt_intr *syncpt_intr =
204 		container_of(work, struct host1x_syncpt_intr, work);
205 	struct host1x_syncpt *syncpt =
206 		container_of(syncpt_intr, struct host1x_syncpt, intr);
207 	unsigned int id = syncpt->id;
208 	struct host1x *host = syncpt->host;
209 
210 	(void)process_wait_list(host, syncpt,
211 				host1x_syncpt_load(host->syncpt + id));
212 }
213 
214 int host1x_intr_add_action(struct host1x *host, unsigned int id, u32 thresh,
215 			   enum host1x_intr_action action, void *data,
216 			   struct host1x_waitlist *waiter, void **ref)
217 {
218 	struct host1x_syncpt *syncpt;
219 	int queue_was_empty;
220 
221 	if (waiter == NULL) {
222 		pr_warn("%s: NULL waiter\n", __func__);
223 		return -EINVAL;
224 	}
225 
226 	/* initialize a new waiter */
227 	INIT_LIST_HEAD(&waiter->list);
228 	kref_init(&waiter->refcount);
229 	if (ref)
230 		kref_get(&waiter->refcount);
231 	waiter->thresh = thresh;
232 	waiter->action = action;
233 	atomic_set(&waiter->state, WLS_PENDING);
234 	waiter->data = data;
235 	waiter->count = 1;
236 
237 	syncpt = host->syncpt + id;
238 
239 	spin_lock(&syncpt->intr.lock);
240 
241 	queue_was_empty = list_empty(&syncpt->intr.wait_head);
242 
243 	if (add_waiter_to_queue(waiter, &syncpt->intr.wait_head)) {
244 		/* added at head of list - new threshold value */
245 		host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
246 
247 		/* added as first waiter - enable interrupt */
248 		if (queue_was_empty)
249 			host1x_hw_intr_enable_syncpt_intr(host, id);
250 	}
251 
252 	spin_unlock(&syncpt->intr.lock);
253 
254 	if (ref)
255 		*ref = waiter;
256 	return 0;
257 }
258 
259 void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref)
260 {
261 	struct host1x_waitlist *waiter = ref;
262 	struct host1x_syncpt *syncpt;
263 
264 	while (atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED) ==
265 	       WLS_REMOVED)
266 		schedule();
267 
268 	syncpt = host->syncpt + id;
269 	(void)process_wait_list(host, syncpt,
270 				host1x_syncpt_load(host->syncpt + id));
271 
272 	kref_put(&waiter->refcount, waiter_release);
273 }
274 
275 int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
276 {
277 	unsigned int id;
278 	u32 nb_pts = host1x_syncpt_nb_pts(host);
279 
280 	mutex_init(&host->intr_mutex);
281 	host->intr_syncpt_irq = irq_sync;
282 
283 	for (id = 0; id < nb_pts; ++id) {
284 		struct host1x_syncpt *syncpt = host->syncpt + id;
285 
286 		spin_lock_init(&syncpt->intr.lock);
287 		INIT_LIST_HEAD(&syncpt->intr.wait_head);
288 		snprintf(syncpt->intr.thresh_irq_name,
289 			 sizeof(syncpt->intr.thresh_irq_name),
290 			 "host1x_sp_%02u", id);
291 	}
292 
293 	host1x_intr_start(host);
294 
295 	return 0;
296 }
297 
298 void host1x_intr_deinit(struct host1x *host)
299 {
300 	host1x_intr_stop(host);
301 }
302 
303 void host1x_intr_start(struct host1x *host)
304 {
305 	u32 hz = clk_get_rate(host->clk);
306 	int err;
307 
308 	mutex_lock(&host->intr_mutex);
309 	err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000),
310 					    syncpt_thresh_work);
311 	if (err) {
312 		mutex_unlock(&host->intr_mutex);
313 		return;
314 	}
315 	mutex_unlock(&host->intr_mutex);
316 }
317 
318 void host1x_intr_stop(struct host1x *host)
319 {
320 	unsigned int id;
321 	struct host1x_syncpt *syncpt = host->syncpt;
322 	u32 nb_pts = host1x_syncpt_nb_pts(host);
323 
324 	mutex_lock(&host->intr_mutex);
325 
326 	host1x_hw_intr_disable_all_syncpt_intrs(host);
327 
328 	for (id = 0; id < nb_pts; ++id) {
329 		struct host1x_waitlist *waiter, *next;
330 
331 		list_for_each_entry_safe(waiter, next,
332 			&syncpt[id].intr.wait_head, list) {
333 			if (atomic_cmpxchg(&waiter->state,
334 			    WLS_CANCELLED, WLS_HANDLED) == WLS_CANCELLED) {
335 				list_del(&waiter->list);
336 				kref_put(&waiter->refcount, waiter_release);
337 			}
338 		}
339 
340 		if (!list_empty(&syncpt[id].intr.wait_head)) {
341 			/* output diagnostics */
342 			mutex_unlock(&host->intr_mutex);
343 			pr_warn("%s cannot stop syncpt intr id=%u\n",
344 				__func__, id);
345 			return;
346 		}
347 	}
348 
349 	host1x_hw_intr_free_syncpt_irq(host);
350 
351 	mutex_unlock(&host->intr_mutex);
352 }
353