xref: /openbmc/linux/drivers/gpu/host1x/intr.c (revision e23feb16)
1 /*
2  * Tegra host1x Interrupt Management
3  *
4  * Copyright (c) 2010-2013, NVIDIA Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 
19 #include <linux/clk.h>
20 #include <linux/interrupt.h>
21 #include <linux/slab.h>
22 #include <linux/irq.h>
23 
24 #include <trace/events/host1x.h>
25 #include "channel.h"
26 #include "dev.h"
27 #include "intr.h"
28 
29 /* Wait list management */
30 
31 enum waitlist_state {
32 	WLS_PENDING,
33 	WLS_REMOVED,
34 	WLS_CANCELLED,
35 	WLS_HANDLED
36 };
37 
38 static void waiter_release(struct kref *kref)
39 {
40 	kfree(container_of(kref, struct host1x_waitlist, refcount));
41 }
42 
43 /*
44  * add a waiter to a waiter queue, sorted by threshold
45  * returns true if it was added at the head of the queue
46  */
47 static bool add_waiter_to_queue(struct host1x_waitlist *waiter,
48 				struct list_head *queue)
49 {
50 	struct host1x_waitlist *pos;
51 	u32 thresh = waiter->thresh;
52 
53 	list_for_each_entry_reverse(pos, queue, list)
54 		if ((s32)(pos->thresh - thresh) <= 0) {
55 			list_add(&waiter->list, &pos->list);
56 			return false;
57 		}
58 
59 	list_add(&waiter->list, queue);
60 	return true;
61 }
62 
63 /*
64  * run through a waiter queue for a single sync point ID
65  * and gather all completed waiters into lists by actions
66  */
67 static void remove_completed_waiters(struct list_head *head, u32 sync,
68 			struct list_head completed[HOST1X_INTR_ACTION_COUNT])
69 {
70 	struct list_head *dest;
71 	struct host1x_waitlist *waiter, *next, *prev;
72 
73 	list_for_each_entry_safe(waiter, next, head, list) {
74 		if ((s32)(waiter->thresh - sync) > 0)
75 			break;
76 
77 		dest = completed + waiter->action;
78 
79 		/* consolidate submit cleanups */
80 		if (waiter->action == HOST1X_INTR_ACTION_SUBMIT_COMPLETE &&
81 		    !list_empty(dest)) {
82 			prev = list_entry(dest->prev,
83 					  struct host1x_waitlist, list);
84 			if (prev->data == waiter->data) {
85 				prev->count++;
86 				dest = NULL;
87 			}
88 		}
89 
90 		/* PENDING->REMOVED or CANCELLED->HANDLED */
91 		if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
92 			list_del(&waiter->list);
93 			kref_put(&waiter->refcount, waiter_release);
94 		} else
95 			list_move_tail(&waiter->list, dest);
96 	}
97 }
98 
99 static void reset_threshold_interrupt(struct host1x *host,
100 				      struct list_head *head,
101 				      unsigned int id)
102 {
103 	u32 thresh =
104 		list_first_entry(head, struct host1x_waitlist, list)->thresh;
105 
106 	host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
107 	host1x_hw_intr_enable_syncpt_intr(host, id);
108 }
109 
110 static void action_submit_complete(struct host1x_waitlist *waiter)
111 {
112 	struct host1x_channel *channel = waiter->data;
113 
114 	host1x_cdma_update(&channel->cdma);
115 
116 	/*  Add nr_completed to trace */
117 	trace_host1x_channel_submit_complete(dev_name(channel->dev),
118 					     waiter->count, waiter->thresh);
119 
120 }
121 
122 static void action_wakeup(struct host1x_waitlist *waiter)
123 {
124 	wait_queue_head_t *wq = waiter->data;
125 	wake_up(wq);
126 }
127 
128 static void action_wakeup_interruptible(struct host1x_waitlist *waiter)
129 {
130 	wait_queue_head_t *wq = waiter->data;
131 	wake_up_interruptible(wq);
132 }
133 
134 typedef void (*action_handler)(struct host1x_waitlist *waiter);
135 
136 static action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
137 	action_submit_complete,
138 	action_wakeup,
139 	action_wakeup_interruptible,
140 };
141 
142 static void run_handlers(struct list_head completed[HOST1X_INTR_ACTION_COUNT])
143 {
144 	struct list_head *head = completed;
145 	int i;
146 
147 	for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i, ++head) {
148 		action_handler handler = action_handlers[i];
149 		struct host1x_waitlist *waiter, *next;
150 
151 		list_for_each_entry_safe(waiter, next, head, list) {
152 			list_del(&waiter->list);
153 			handler(waiter);
154 			WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) !=
155 				WLS_REMOVED);
156 			kref_put(&waiter->refcount, waiter_release);
157 		}
158 	}
159 }
160 
161 /*
162  * Remove & handle all waiters that have completed for the given syncpt
163  */
164 static int process_wait_list(struct host1x *host,
165 			     struct host1x_syncpt *syncpt,
166 			     u32 threshold)
167 {
168 	struct list_head completed[HOST1X_INTR_ACTION_COUNT];
169 	unsigned int i;
170 	int empty;
171 
172 	for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i)
173 		INIT_LIST_HEAD(completed + i);
174 
175 	spin_lock(&syncpt->intr.lock);
176 
177 	remove_completed_waiters(&syncpt->intr.wait_head, threshold,
178 				 completed);
179 
180 	empty = list_empty(&syncpt->intr.wait_head);
181 	if (empty)
182 		host1x_hw_intr_disable_syncpt_intr(host, syncpt->id);
183 	else
184 		reset_threshold_interrupt(host, &syncpt->intr.wait_head,
185 					  syncpt->id);
186 
187 	spin_unlock(&syncpt->intr.lock);
188 
189 	run_handlers(completed);
190 
191 	return empty;
192 }
193 
194 /*
195  * Sync point threshold interrupt service thread function
196  * Handles sync point threshold triggers, in thread context
197  */
198 
199 static void syncpt_thresh_work(struct work_struct *work)
200 {
201 	struct host1x_syncpt_intr *syncpt_intr =
202 		container_of(work, struct host1x_syncpt_intr, work);
203 	struct host1x_syncpt *syncpt =
204 		container_of(syncpt_intr, struct host1x_syncpt, intr);
205 	unsigned int id = syncpt->id;
206 	struct host1x *host = syncpt->host;
207 
208 	(void)process_wait_list(host, syncpt,
209 				host1x_syncpt_load(host->syncpt + id));
210 }
211 
212 int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
213 			   enum host1x_intr_action action, void *data,
214 			   struct host1x_waitlist *waiter, void **ref)
215 {
216 	struct host1x_syncpt *syncpt;
217 	int queue_was_empty;
218 
219 	if (waiter == NULL) {
220 		pr_warn("%s: NULL waiter\n", __func__);
221 		return -EINVAL;
222 	}
223 
224 	/* initialize a new waiter */
225 	INIT_LIST_HEAD(&waiter->list);
226 	kref_init(&waiter->refcount);
227 	if (ref)
228 		kref_get(&waiter->refcount);
229 	waiter->thresh = thresh;
230 	waiter->action = action;
231 	atomic_set(&waiter->state, WLS_PENDING);
232 	waiter->data = data;
233 	waiter->count = 1;
234 
235 	syncpt = host->syncpt + id;
236 
237 	spin_lock(&syncpt->intr.lock);
238 
239 	queue_was_empty = list_empty(&syncpt->intr.wait_head);
240 
241 	if (add_waiter_to_queue(waiter, &syncpt->intr.wait_head)) {
242 		/* added at head of list - new threshold value */
243 		host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
244 
245 		/* added as first waiter - enable interrupt */
246 		if (queue_was_empty)
247 			host1x_hw_intr_enable_syncpt_intr(host, id);
248 	}
249 
250 	spin_unlock(&syncpt->intr.lock);
251 
252 	if (ref)
253 		*ref = waiter;
254 	return 0;
255 }
256 
257 void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref)
258 {
259 	struct host1x_waitlist *waiter = ref;
260 	struct host1x_syncpt *syncpt;
261 
262 	while (atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED) ==
263 	       WLS_REMOVED)
264 		schedule();
265 
266 	syncpt = host->syncpt + id;
267 	(void)process_wait_list(host, syncpt,
268 				host1x_syncpt_load(host->syncpt + id));
269 
270 	kref_put(&waiter->refcount, waiter_release);
271 }
272 
273 int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
274 {
275 	unsigned int id;
276 	u32 nb_pts = host1x_syncpt_nb_pts(host);
277 
278 	mutex_init(&host->intr_mutex);
279 	host->intr_syncpt_irq = irq_sync;
280 	host->intr_wq = create_workqueue("host_syncpt");
281 	if (!host->intr_wq)
282 		return -ENOMEM;
283 
284 	for (id = 0; id < nb_pts; ++id) {
285 		struct host1x_syncpt *syncpt = host->syncpt + id;
286 
287 		spin_lock_init(&syncpt->intr.lock);
288 		INIT_LIST_HEAD(&syncpt->intr.wait_head);
289 		snprintf(syncpt->intr.thresh_irq_name,
290 			 sizeof(syncpt->intr.thresh_irq_name),
291 			 "host1x_sp_%02d", id);
292 	}
293 
294 	host1x_intr_start(host);
295 
296 	return 0;
297 }
298 
299 void host1x_intr_deinit(struct host1x *host)
300 {
301 	host1x_intr_stop(host);
302 	destroy_workqueue(host->intr_wq);
303 }
304 
305 void host1x_intr_start(struct host1x *host)
306 {
307 	u32 hz = clk_get_rate(host->clk);
308 	int err;
309 
310 	mutex_lock(&host->intr_mutex);
311 	err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000),
312 					    syncpt_thresh_work);
313 	if (err) {
314 		mutex_unlock(&host->intr_mutex);
315 		return;
316 	}
317 	mutex_unlock(&host->intr_mutex);
318 }
319 
320 void host1x_intr_stop(struct host1x *host)
321 {
322 	unsigned int id;
323 	struct host1x_syncpt *syncpt = host->syncpt;
324 	u32 nb_pts = host1x_syncpt_nb_pts(host);
325 
326 	mutex_lock(&host->intr_mutex);
327 
328 	host1x_hw_intr_disable_all_syncpt_intrs(host);
329 
330 	for (id = 0; id < nb_pts; ++id) {
331 		struct host1x_waitlist *waiter, *next;
332 
333 		list_for_each_entry_safe(waiter, next,
334 			&syncpt[id].intr.wait_head, list) {
335 			if (atomic_cmpxchg(&waiter->state,
336 			    WLS_CANCELLED, WLS_HANDLED) == WLS_CANCELLED) {
337 				list_del(&waiter->list);
338 				kref_put(&waiter->refcount, waiter_release);
339 			}
340 		}
341 
342 		if (!list_empty(&syncpt[id].intr.wait_head)) {
343 			/* output diagnostics */
344 			mutex_unlock(&host->intr_mutex);
345 			pr_warn("%s cannot stop syncpt intr id=%d\n",
346 				__func__, id);
347 			return;
348 		}
349 	}
350 
351 	host1x_hw_intr_free_syncpt_irq(host);
352 
353 	mutex_unlock(&host->intr_mutex);
354 }
355