xref: /openbmc/linux/drivers/gpu/drm/msm/msm_gpu.c (revision fac59652993f075d57860769c99045b3ca18780d)
1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
27198e6b0SRob Clark /*
37198e6b0SRob Clark  * Copyright (C) 2013 Red Hat
47198e6b0SRob Clark  * Author: Rob Clark <robdclark@gmail.com>
57198e6b0SRob Clark  */
67198e6b0SRob Clark 
7cfebe3fdSRob Clark #include "drm/drm_drv.h"
8cfebe3fdSRob Clark 
97198e6b0SRob Clark #include "msm_gpu.h"
107198e6b0SRob Clark #include "msm_gem.h"
11871d812aSRob Clark #include "msm_mmu.h"
12fde5de6cSRob Clark #include "msm_fence.h"
134241db42SJordan Crouse #include "msm_gpu_trace.h"
14c2052a4eSJonathan Marek #include "adreno/adreno_gpu.h"
157198e6b0SRob Clark 
16c0fec7f5SJordan Crouse #include <generated/utsrelease.h>
1718bb8a6cSRob Clark #include <linux/string_helpers.h>
18c0fec7f5SJordan Crouse #include <linux/devcoredump.h>
1970082a52SArnd Bergmann #include <linux/sched/task.h>
207198e6b0SRob Clark 
217198e6b0SRob Clark /*
227198e6b0SRob Clark  * Power Management:
237198e6b0SRob Clark  */
247198e6b0SRob Clark 
enable_pwrrail(struct msm_gpu * gpu)257198e6b0SRob Clark static int enable_pwrrail(struct msm_gpu *gpu)
267198e6b0SRob Clark {
277198e6b0SRob Clark 	struct drm_device *dev = gpu->dev;
287198e6b0SRob Clark 	int ret = 0;
297198e6b0SRob Clark 
307198e6b0SRob Clark 	if (gpu->gpu_reg) {
317198e6b0SRob Clark 		ret = regulator_enable(gpu->gpu_reg);
327198e6b0SRob Clark 		if (ret) {
336a41da17SMamta Shukla 			DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
347198e6b0SRob Clark 			return ret;
357198e6b0SRob Clark 		}
367198e6b0SRob Clark 	}
377198e6b0SRob Clark 
387198e6b0SRob Clark 	if (gpu->gpu_cx) {
397198e6b0SRob Clark 		ret = regulator_enable(gpu->gpu_cx);
407198e6b0SRob Clark 		if (ret) {
416a41da17SMamta Shukla 			DRM_DEV_ERROR(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
427198e6b0SRob Clark 			return ret;
437198e6b0SRob Clark 		}
447198e6b0SRob Clark 	}
457198e6b0SRob Clark 
467198e6b0SRob Clark 	return 0;
477198e6b0SRob Clark }
487198e6b0SRob Clark 
disable_pwrrail(struct msm_gpu * gpu)497198e6b0SRob Clark static int disable_pwrrail(struct msm_gpu *gpu)
507198e6b0SRob Clark {
517198e6b0SRob Clark 	if (gpu->gpu_cx)
527198e6b0SRob Clark 		regulator_disable(gpu->gpu_cx);
537198e6b0SRob Clark 	if (gpu->gpu_reg)
547198e6b0SRob Clark 		regulator_disable(gpu->gpu_reg);
557198e6b0SRob Clark 	return 0;
567198e6b0SRob Clark }
577198e6b0SRob Clark 
enable_clk(struct msm_gpu * gpu)587198e6b0SRob Clark static int enable_clk(struct msm_gpu *gpu)
597198e6b0SRob Clark {
6098db803fSJordan Crouse 	if (gpu->core_clk && gpu->fast_rate)
619f251f93SKonrad Dybcio 		dev_pm_opp_set_rate(&gpu->pdev->dev, gpu->fast_rate);
6289d777a5SJordan Crouse 
63b5f103abSJordan Crouse 	/* Set the RBBM timer rate to 19.2Mhz */
6498db803fSJordan Crouse 	if (gpu->rbbmtimer_clk)
6598db803fSJordan Crouse 		clk_set_rate(gpu->rbbmtimer_clk, 19200000);
66b5f103abSJordan Crouse 
678e54eea5SJordan Crouse 	return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
687198e6b0SRob Clark }
697198e6b0SRob Clark 
disable_clk(struct msm_gpu * gpu)707198e6b0SRob Clark static int disable_clk(struct msm_gpu *gpu)
717198e6b0SRob Clark {
728e54eea5SJordan Crouse 	clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
737198e6b0SRob Clark 
74bf5af4aeSJordan Crouse 	/*
75bf5af4aeSJordan Crouse 	 * Set the clock to a deliberately low rate. On older targets the clock
76bf5af4aeSJordan Crouse 	 * speed had to be non zero to avoid problems. On newer targets this
77bf5af4aeSJordan Crouse 	 * will be rounded down to zero anyway so it all works out.
78bf5af4aeSJordan Crouse 	 */
7998db803fSJordan Crouse 	if (gpu->core_clk)
809f251f93SKonrad Dybcio 		dev_pm_opp_set_rate(&gpu->pdev->dev, 27000000);
8189d777a5SJordan Crouse 
8298db803fSJordan Crouse 	if (gpu->rbbmtimer_clk)
8398db803fSJordan Crouse 		clk_set_rate(gpu->rbbmtimer_clk, 0);
84b5f103abSJordan Crouse 
857198e6b0SRob Clark 	return 0;
867198e6b0SRob Clark }
877198e6b0SRob Clark 
enable_axi(struct msm_gpu * gpu)887198e6b0SRob Clark static int enable_axi(struct msm_gpu *gpu)
897198e6b0SRob Clark {
90dd29bd41STian Tao 	return clk_prepare_enable(gpu->ebi1_clk);
917198e6b0SRob Clark }
927198e6b0SRob Clark 
disable_axi(struct msm_gpu * gpu)937198e6b0SRob Clark static int disable_axi(struct msm_gpu *gpu)
947198e6b0SRob Clark {
957198e6b0SRob Clark 	clk_disable_unprepare(gpu->ebi1_clk);
967198e6b0SRob Clark 	return 0;
977198e6b0SRob Clark }
987198e6b0SRob Clark 
msm_gpu_pm_resume(struct msm_gpu * gpu)997198e6b0SRob Clark int msm_gpu_pm_resume(struct msm_gpu *gpu)
1007198e6b0SRob Clark {
1017198e6b0SRob Clark 	int ret;
1027198e6b0SRob Clark 
103eeb75474SRob Clark 	DBG("%s", gpu->name);
104ec1cb6e4SRob Clark 	trace_msm_gpu_resume(0);
1057198e6b0SRob Clark 
1067198e6b0SRob Clark 	ret = enable_pwrrail(gpu);
1077198e6b0SRob Clark 	if (ret)
1087198e6b0SRob Clark 		return ret;
1097198e6b0SRob Clark 
1107198e6b0SRob Clark 	ret = enable_clk(gpu);
1117198e6b0SRob Clark 	if (ret)
1127198e6b0SRob Clark 		return ret;
1137198e6b0SRob Clark 
1147198e6b0SRob Clark 	ret = enable_axi(gpu);
1157198e6b0SRob Clark 	if (ret)
1167198e6b0SRob Clark 		return ret;
1177198e6b0SRob Clark 
118af5b4fffSRob Clark 	msm_devfreq_resume(gpu);
119f91c14abSJordan Crouse 
120eeb75474SRob Clark 	gpu->needs_hw_init = true;
121eeb75474SRob Clark 
1227198e6b0SRob Clark 	return 0;
1237198e6b0SRob Clark }
1247198e6b0SRob Clark 
msm_gpu_pm_suspend(struct msm_gpu * gpu)1257198e6b0SRob Clark int msm_gpu_pm_suspend(struct msm_gpu *gpu)
1267198e6b0SRob Clark {
1277198e6b0SRob Clark 	int ret;
1287198e6b0SRob Clark 
129eeb75474SRob Clark 	DBG("%s", gpu->name);
130ec1cb6e4SRob Clark 	trace_msm_gpu_suspend(0);
1317198e6b0SRob Clark 
132af5b4fffSRob Clark 	msm_devfreq_suspend(gpu);
133f91c14abSJordan Crouse 
1347198e6b0SRob Clark 	ret = disable_axi(gpu);
1357198e6b0SRob Clark 	if (ret)
1367198e6b0SRob Clark 		return ret;
1377198e6b0SRob Clark 
1387198e6b0SRob Clark 	ret = disable_clk(gpu);
1397198e6b0SRob Clark 	if (ret)
1407198e6b0SRob Clark 		return ret;
1417198e6b0SRob Clark 
1427198e6b0SRob Clark 	ret = disable_pwrrail(gpu);
1437198e6b0SRob Clark 	if (ret)
1447198e6b0SRob Clark 		return ret;
1457198e6b0SRob Clark 
1463ab1c5ccSRob Clark 	gpu->suspend_count++;
1473ab1c5ccSRob Clark 
1487198e6b0SRob Clark 	return 0;
1497198e6b0SRob Clark }
1507198e6b0SRob Clark 
msm_gpu_show_fdinfo(struct msm_gpu * gpu,struct msm_file_private * ctx,struct drm_printer * p)151cfebe3fdSRob Clark void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_file_private *ctx,
152cfebe3fdSRob Clark 			 struct drm_printer *p)
153cfebe3fdSRob Clark {
154cfebe3fdSRob Clark 	drm_printf(p, "drm-engine-gpu:\t%llu ns\n", ctx->elapsed_ns);
155cfebe3fdSRob Clark 	drm_printf(p, "drm-cycles-gpu:\t%llu\n", ctx->cycles);
156cfebe3fdSRob Clark 	drm_printf(p, "drm-maxfreq-gpu:\t%u Hz\n", gpu->fast_rate);
157cfebe3fdSRob Clark }
158cfebe3fdSRob Clark 
msm_gpu_hw_init(struct msm_gpu * gpu)159eeb75474SRob Clark int msm_gpu_hw_init(struct msm_gpu *gpu)
16037d77c3aSRob Clark {
161eeb75474SRob Clark 	int ret;
16237d77c3aSRob Clark 
163c28e2f2bSRob Clark 	WARN_ON(!mutex_is_locked(&gpu->lock));
164cb1e3818SRob Clark 
165eeb75474SRob Clark 	if (!gpu->needs_hw_init)
166eeb75474SRob Clark 		return 0;
16737d77c3aSRob Clark 
168eeb75474SRob Clark 	disable_irq(gpu->irq);
169eeb75474SRob Clark 	ret = gpu->funcs->hw_init(gpu);
170eeb75474SRob Clark 	if (!ret)
171eeb75474SRob Clark 		gpu->needs_hw_init = false;
172eeb75474SRob Clark 	enable_irq(gpu->irq);
17337d77c3aSRob Clark 
174eeb75474SRob Clark 	return ret;
17537d77c3aSRob Clark }
17637d77c3aSRob Clark 
177c0fec7f5SJordan Crouse #ifdef CONFIG_DEV_COREDUMP
msm_gpu_devcoredump_read(char * buffer,loff_t offset,size_t count,void * data,size_t datalen)178c0fec7f5SJordan Crouse static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
179c0fec7f5SJordan Crouse 		size_t count, void *data, size_t datalen)
180c0fec7f5SJordan Crouse {
181c0fec7f5SJordan Crouse 	struct msm_gpu *gpu = data;
182c0fec7f5SJordan Crouse 	struct drm_print_iterator iter;
183c0fec7f5SJordan Crouse 	struct drm_printer p;
184c0fec7f5SJordan Crouse 	struct msm_gpu_state *state;
185c0fec7f5SJordan Crouse 
186c0fec7f5SJordan Crouse 	state = msm_gpu_crashstate_get(gpu);
187c0fec7f5SJordan Crouse 	if (!state)
188c0fec7f5SJordan Crouse 		return 0;
189c0fec7f5SJordan Crouse 
190c0fec7f5SJordan Crouse 	iter.data = buffer;
191c0fec7f5SJordan Crouse 	iter.offset = 0;
192c0fec7f5SJordan Crouse 	iter.start = offset;
193c0fec7f5SJordan Crouse 	iter.remain = count;
194c0fec7f5SJordan Crouse 
195c0fec7f5SJordan Crouse 	p = drm_coredump_printer(&iter);
196c0fec7f5SJordan Crouse 
197c0fec7f5SJordan Crouse 	drm_printf(&p, "---\n");
198c0fec7f5SJordan Crouse 	drm_printf(&p, "kernel: " UTS_RELEASE "\n");
199c0fec7f5SJordan Crouse 	drm_printf(&p, "module: " KBUILD_MODNAME "\n");
2003530a17fSArnd Bergmann 	drm_printf(&p, "time: %lld.%09ld\n",
2013530a17fSArnd Bergmann 		state->time.tv_sec, state->time.tv_nsec);
202c0fec7f5SJordan Crouse 	if (state->comm)
203c0fec7f5SJordan Crouse 		drm_printf(&p, "comm: %s\n", state->comm);
204c0fec7f5SJordan Crouse 	if (state->cmd)
205c0fec7f5SJordan Crouse 		drm_printf(&p, "cmdline: %s\n", state->cmd);
206c0fec7f5SJordan Crouse 
207c0fec7f5SJordan Crouse 	gpu->funcs->show(gpu, state, &p);
208c0fec7f5SJordan Crouse 
209c0fec7f5SJordan Crouse 	msm_gpu_crashstate_put(gpu);
210c0fec7f5SJordan Crouse 
211c0fec7f5SJordan Crouse 	return count - iter.remain;
212c0fec7f5SJordan Crouse }
213c0fec7f5SJordan Crouse 
msm_gpu_devcoredump_free(void * data)214c0fec7f5SJordan Crouse static void msm_gpu_devcoredump_free(void *data)
215c0fec7f5SJordan Crouse {
216c0fec7f5SJordan Crouse 	struct msm_gpu *gpu = data;
217c0fec7f5SJordan Crouse 
218c0fec7f5SJordan Crouse 	msm_gpu_crashstate_put(gpu);
219c0fec7f5SJordan Crouse }
220c0fec7f5SJordan Crouse 
msm_gpu_crashstate_get_bo(struct msm_gpu_state * state,struct drm_gem_object * obj,u64 iova,bool full)221cdb95931SJordan Crouse static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
2226ba5daa5SRob Clark 		struct drm_gem_object *obj, u64 iova, bool full)
223cdb95931SJordan Crouse {
224cdb95931SJordan Crouse 	struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
225cdb95931SJordan Crouse 
226cdb95931SJordan Crouse 	/* Don't record write only objects */
2276ba5daa5SRob Clark 	state_bo->size = obj->size;
228cdb95931SJordan Crouse 	state_bo->iova = iova;
229cdb95931SJordan Crouse 
2306ba5daa5SRob Clark 	BUILD_BUG_ON(sizeof(state_bo->name) != sizeof(to_msm_bo(obj)->name));
23118514c38SRob Clark 
2326ba5daa5SRob Clark 	memcpy(state_bo->name, to_msm_bo(obj)->name, sizeof(state_bo->name));
23318514c38SRob Clark 
234cc66a42cSRob Clark 	if (full) {
235cdb95931SJordan Crouse 		void *ptr;
236cdb95931SJordan Crouse 
2376ba5daa5SRob Clark 		state_bo->data = kvmalloc(obj->size, GFP_KERNEL);
238cdb95931SJordan Crouse 		if (!state_bo->data)
239896a248aSJordan Crouse 			goto out;
240cdb95931SJordan Crouse 
2416ba5daa5SRob Clark 		msm_gem_lock(obj);
2426ba5daa5SRob Clark 		ptr = msm_gem_get_vaddr_active(obj);
2436ba5daa5SRob Clark 		msm_gem_unlock(obj);
244cdb95931SJordan Crouse 		if (IS_ERR(ptr)) {
245cdb95931SJordan Crouse 			kvfree(state_bo->data);
246896a248aSJordan Crouse 			state_bo->data = NULL;
247896a248aSJordan Crouse 			goto out;
248cdb95931SJordan Crouse 		}
249cdb95931SJordan Crouse 
2506ba5daa5SRob Clark 		memcpy(state_bo->data, ptr, obj->size);
2516ba5daa5SRob Clark 		msm_gem_put_vaddr(obj);
252cdb95931SJordan Crouse 	}
253896a248aSJordan Crouse out:
254cdb95931SJordan Crouse 	state->nr_bos++;
255cdb95931SJordan Crouse }
256cdb95931SJordan Crouse 
msm_gpu_crashstate_capture(struct msm_gpu * gpu,struct msm_gem_submit * submit,char * comm,char * cmd)257cdb95931SJordan Crouse static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
258cdb95931SJordan Crouse 		struct msm_gem_submit *submit, char *comm, char *cmd)
259c0fec7f5SJordan Crouse {
260c0fec7f5SJordan Crouse 	struct msm_gpu_state *state;
261c0fec7f5SJordan Crouse 
2624f3a31a8SSharat Masetty 	/* Check if the target supports capturing crash state */
2634f3a31a8SSharat Masetty 	if (!gpu->funcs->gpu_state_get)
2644f3a31a8SSharat Masetty 		return;
2654f3a31a8SSharat Masetty 
266c0fec7f5SJordan Crouse 	/* Only save one crash state at a time */
267c0fec7f5SJordan Crouse 	if (gpu->crashstate)
268c0fec7f5SJordan Crouse 		return;
269c0fec7f5SJordan Crouse 
270c0fec7f5SJordan Crouse 	state = gpu->funcs->gpu_state_get(gpu);
271c0fec7f5SJordan Crouse 	if (IS_ERR_OR_NULL(state))
272c0fec7f5SJordan Crouse 		return;
273c0fec7f5SJordan Crouse 
274c0fec7f5SJordan Crouse 	/* Fill in the additional crash state information */
275c0fec7f5SJordan Crouse 	state->comm = kstrdup(comm, GFP_KERNEL);
276c0fec7f5SJordan Crouse 	state->cmd = kstrdup(cmd, GFP_KERNEL);
277e25e92e0SRob Clark 	state->fault_info = gpu->fault_info;
278c0fec7f5SJordan Crouse 
279cdb95931SJordan Crouse 	if (submit) {
280cc66a42cSRob Clark 		int i;
281cdb95931SJordan Crouse 
282cc66a42cSRob Clark 		state->bos = kcalloc(submit->nr_bos,
283cdb95931SJordan Crouse 			sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
284cdb95931SJordan Crouse 
285b220c154STim Gardner 		for (i = 0; state->bos && i < submit->nr_bos; i++) {
286e515af8dSRob Clark 			msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
287cc66a42cSRob Clark 						  submit->bos[i].iova,
288cc66a42cSRob Clark 						  should_dump(submit, i));
289cdb95931SJordan Crouse 		}
290e515af8dSRob Clark 	}
291cdb95931SJordan Crouse 
292c0fec7f5SJordan Crouse 	/* Set the active crash state to be dumped on failure */
293c0fec7f5SJordan Crouse 	gpu->crashstate = state;
294c0fec7f5SJordan Crouse 
295c0fec7f5SJordan Crouse 	/* FIXME: Release the crashstate if this errors out? */
296c0fec7f5SJordan Crouse 	dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
297c0fec7f5SJordan Crouse 		msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
298c0fec7f5SJordan Crouse }
299c0fec7f5SJordan Crouse #else
msm_gpu_crashstate_capture(struct msm_gpu * gpu,struct msm_gem_submit * submit,char * comm,char * cmd)3006969019fSAnders Roxell static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
3016969019fSAnders Roxell 		struct msm_gem_submit *submit, char *comm, char *cmd)
302c0fec7f5SJordan Crouse {
303c0fec7f5SJordan Crouse }
304c0fec7f5SJordan Crouse #endif
305c0fec7f5SJordan Crouse 
30637d77c3aSRob Clark /*
307bd6f82d8SRob Clark  * Hangcheck detection for locked gpu:
308bd6f82d8SRob Clark  */
309bd6f82d8SRob Clark 
31018bb8a6cSRob Clark static struct msm_gem_submit *
find_submit(struct msm_ringbuffer * ring,uint32_t fence)31118bb8a6cSRob Clark find_submit(struct msm_ringbuffer *ring, uint32_t fence)
31218bb8a6cSRob Clark {
31318bb8a6cSRob Clark 	struct msm_gem_submit *submit;
314298287f6SRob Clark 	unsigned long flags;
31518bb8a6cSRob Clark 
316298287f6SRob Clark 	spin_lock_irqsave(&ring->submit_lock, flags);
31777d20529SRob Clark 	list_for_each_entry(submit, &ring->submits, node) {
31877d20529SRob Clark 		if (submit->seqno == fence) {
319298287f6SRob Clark 			spin_unlock_irqrestore(&ring->submit_lock, flags);
32018bb8a6cSRob Clark 			return submit;
32177d20529SRob Clark 		}
32277d20529SRob Clark 	}
323298287f6SRob Clark 	spin_unlock_irqrestore(&ring->submit_lock, flags);
32418bb8a6cSRob Clark 
32518bb8a6cSRob Clark 	return NULL;
32618bb8a6cSRob Clark }
32718bb8a6cSRob Clark 
328b6295f9aSRob Clark static void retire_submits(struct msm_gpu *gpu);
3291a370be9SRob Clark 
get_comm_cmdline(struct msm_gem_submit * submit,char ** comm,char ** cmd)33039ba0c0dSRob Clark static void get_comm_cmdline(struct msm_gem_submit *submit, char **comm, char **cmd)
33139ba0c0dSRob Clark {
332d4726d77SRob Clark 	struct msm_file_private *ctx = submit->queue->ctx;
33339ba0c0dSRob Clark 	struct task_struct *task;
33439ba0c0dSRob Clark 
335a66f1efcSRob Clark 	WARN_ON(!mutex_is_locked(&submit->gpu->lock));
336a66f1efcSRob Clark 
337d4726d77SRob Clark 	/* Note that kstrdup will return NULL if argument is NULL: */
338d4726d77SRob Clark 	*comm = kstrdup(ctx->comm, GFP_KERNEL);
339d4726d77SRob Clark 	*cmd  = kstrdup(ctx->cmdline, GFP_KERNEL);
340d4726d77SRob Clark 
34139ba0c0dSRob Clark 	task = get_pid_task(submit->pid, PIDTYPE_PID);
34239ba0c0dSRob Clark 	if (!task)
34339ba0c0dSRob Clark 		return;
34439ba0c0dSRob Clark 
345d4726d77SRob Clark 	if (!*comm)
34639ba0c0dSRob Clark 		*comm = kstrdup(task->comm, GFP_KERNEL);
347d4726d77SRob Clark 
348d4726d77SRob Clark 	if (!*cmd)
34939ba0c0dSRob Clark 		*cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
35039ba0c0dSRob Clark 
35139ba0c0dSRob Clark 	put_task_struct(task);
35239ba0c0dSRob Clark }
35339ba0c0dSRob Clark 
recover_worker(struct kthread_work * work)3547e688294SRob Clark static void recover_worker(struct kthread_work *work)
355bd6f82d8SRob Clark {
356bd6f82d8SRob Clark 	struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
357bd6f82d8SRob Clark 	struct drm_device *dev = gpu->dev;
35896169f4eSRob Clark 	struct msm_drm_private *priv = dev->dev_private;
3594816b626SRob Clark 	struct msm_gem_submit *submit;
360f97decacSJordan Crouse 	struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
36165a3c274SJordan Crouse 	char *comm = NULL, *cmd = NULL;
362f97decacSJordan Crouse 	int i;
363bd6f82d8SRob Clark 
364c28e2f2bSRob Clark 	mutex_lock(&gpu->lock);
3651a370be9SRob Clark 
3666a41da17SMamta Shukla 	DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
367f97decacSJordan Crouse 
36896169f4eSRob Clark 	submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
36918bb8a6cSRob Clark 	if (submit) {
370b0fb6604SJordan Crouse 		/* Increment the fault counts */
371b0fb6604SJordan Crouse 		submit->queue->faults++;
37236a1d1bdSLuca Weiss 		if (submit->aspace)
373bc211258SRob Clark 			submit->aspace->faults++;
37448dc4241SRob Clark 
37539ba0c0dSRob Clark 		get_comm_cmdline(submit, &comm, &cmd);
37665a3c274SJordan Crouse 
37765a3c274SJordan Crouse 		if (comm && cmd) {
3786a41da17SMamta Shukla 			DRM_DEV_ERROR(dev->dev, "%s: offending task: %s (%s)\n",
37965a3c274SJordan Crouse 				gpu->name, comm, cmd);
38065a3c274SJordan Crouse 
38165a3c274SJordan Crouse 			msm_rd_dump_submit(priv->hangrd, submit,
38265a3c274SJordan Crouse 				"offending task: %s (%s)", comm, cmd);
3836c0e3ea2SRob Clark 		} else {
38465a3c274SJordan Crouse 			msm_rd_dump_submit(priv->hangrd, submit, NULL);
38596169f4eSRob Clark 		}
386bc211258SRob Clark 	} else {
387bc211258SRob Clark 		/*
388bc211258SRob Clark 		 * We couldn't attribute this fault to any particular context,
389bc211258SRob Clark 		 * so increment the global fault count instead.
390bc211258SRob Clark 		 */
391bc211258SRob Clark 		gpu->global_faults++;
3926c0e3ea2SRob Clark 	}
3936c0e3ea2SRob Clark 
394c0fec7f5SJordan Crouse 	/* Record the crash state */
395c0fec7f5SJordan Crouse 	pm_runtime_get_sync(&gpu->pdev->dev);
396cdb95931SJordan Crouse 	msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
397c0fec7f5SJordan Crouse 
39865a3c274SJordan Crouse 	kfree(cmd);
39965a3c274SJordan Crouse 	kfree(comm);
40096169f4eSRob Clark 
40196169f4eSRob Clark 	/*
40296169f4eSRob Clark 	 * Update all the rings with the latest and greatest fence.. this
40396169f4eSRob Clark 	 * needs to happen after msm_rd_dump_submit() to ensure that the
40496169f4eSRob Clark 	 * bo's referenced by the offending submit are still around.
40596169f4eSRob Clark 	 */
4067ddae82eSJordan Crouse 	for (i = 0; i < gpu->nr_rings; i++) {
40796169f4eSRob Clark 		struct msm_ringbuffer *ring = gpu->rb[i];
40896169f4eSRob Clark 
40996169f4eSRob Clark 		uint32_t fence = ring->memptrs->fence;
41096169f4eSRob Clark 
41196169f4eSRob Clark 		/*
41296169f4eSRob Clark 		 * For the current (faulting?) ring/submit advance the fence by
41396169f4eSRob Clark 		 * one more to clear the faulting submit
41496169f4eSRob Clark 		 */
41596169f4eSRob Clark 		if (ring == cur_ring)
416c8af219dSRob Clark 			ring->memptrs->fence = ++fence;
41796169f4eSRob Clark 
4183c7a5221SRob Clark 		msm_update_fence(ring->fctx, fence);
4194816b626SRob Clark 	}
4204816b626SRob Clark 
4214816b626SRob Clark 	if (msm_gpu_active(gpu)) {
4221a370be9SRob Clark 		/* retire completed submits, plus the one that hung: */
423b6295f9aSRob Clark 		retire_submits(gpu);
4241a370be9SRob Clark 
425bd6f82d8SRob Clark 		gpu->funcs->recover(gpu);
4261a370be9SRob Clark 
427f97decacSJordan Crouse 		/*
428f97decacSJordan Crouse 		 * Replay all remaining submits starting with highest priority
429f97decacSJordan Crouse 		 * ring
430f97decacSJordan Crouse 		 */
431b1fc2839SJordan Crouse 		for (i = 0; i < gpu->nr_rings; i++) {
432f97decacSJordan Crouse 			struct msm_ringbuffer *ring = gpu->rb[i];
433298287f6SRob Clark 			unsigned long flags;
434f97decacSJordan Crouse 
435298287f6SRob Clark 			spin_lock_irqsave(&ring->submit_lock, flags);
436f97decacSJordan Crouse 			list_for_each_entry(submit, &ring->submits, node)
43715eb9ad0SJordan Crouse 				gpu->funcs->submit(gpu, submit);
438298287f6SRob Clark 			spin_unlock_irqrestore(&ring->submit_lock, flags);
4391a370be9SRob Clark 		}
44037d77c3aSRob Clark 	}
4414816b626SRob Clark 
442f350bfb9SAkhil P Oommen 	pm_runtime_put(&gpu->pdev->dev);
44306097e37SAkhil P Oommen 
444c28e2f2bSRob Clark 	mutex_unlock(&gpu->lock);
445bd6f82d8SRob Clark 
446bd6f82d8SRob Clark 	msm_gpu_retire(gpu);
447bd6f82d8SRob Clark }
448bd6f82d8SRob Clark 
fault_worker(struct kthread_work * work)449e25e92e0SRob Clark static void fault_worker(struct kthread_work *work)
450e25e92e0SRob Clark {
451e25e92e0SRob Clark 	struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work);
452e25e92e0SRob Clark 	struct msm_gem_submit *submit;
453e25e92e0SRob Clark 	struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
454e25e92e0SRob Clark 	char *comm = NULL, *cmd = NULL;
455e25e92e0SRob Clark 
456c28e2f2bSRob Clark 	mutex_lock(&gpu->lock);
457e25e92e0SRob Clark 
458e25e92e0SRob Clark 	submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
459e25e92e0SRob Clark 	if (submit && submit->fault_dumped)
460e25e92e0SRob Clark 		goto resume_smmu;
461e25e92e0SRob Clark 
462e25e92e0SRob Clark 	if (submit) {
46339ba0c0dSRob Clark 		get_comm_cmdline(submit, &comm, &cmd);
464e25e92e0SRob Clark 
465e25e92e0SRob Clark 		/*
466e25e92e0SRob Clark 		 * When we get GPU iova faults, we can get 1000s of them,
467e25e92e0SRob Clark 		 * but we really only want to log the first one.
468e25e92e0SRob Clark 		 */
469e25e92e0SRob Clark 		submit->fault_dumped = true;
470e25e92e0SRob Clark 	}
471e25e92e0SRob Clark 
472e25e92e0SRob Clark 	/* Record the crash state */
473e25e92e0SRob Clark 	pm_runtime_get_sync(&gpu->pdev->dev);
474e25e92e0SRob Clark 	msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
475e25e92e0SRob Clark 	pm_runtime_put_sync(&gpu->pdev->dev);
476e25e92e0SRob Clark 
477e25e92e0SRob Clark 	kfree(cmd);
478e25e92e0SRob Clark 	kfree(comm);
479e25e92e0SRob Clark 
480e25e92e0SRob Clark resume_smmu:
481e25e92e0SRob Clark 	memset(&gpu->fault_info, 0, sizeof(gpu->fault_info));
482e25e92e0SRob Clark 	gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
483e25e92e0SRob Clark 
484c28e2f2bSRob Clark 	mutex_unlock(&gpu->lock);
485e25e92e0SRob Clark }
486e25e92e0SRob Clark 
hangcheck_timer_reset(struct msm_gpu * gpu)487bd6f82d8SRob Clark static void hangcheck_timer_reset(struct msm_gpu *gpu)
488bd6f82d8SRob Clark {
4891d2fa58eSSamuel Iglesias Gonsalvez 	struct msm_drm_private *priv = gpu->dev->dev_private;
490bd6f82d8SRob Clark 	mod_timer(&gpu->hangcheck_timer,
4911d2fa58eSSamuel Iglesias Gonsalvez 			round_jiffies_up(jiffies + msecs_to_jiffies(priv->hangcheck_period)));
492bd6f82d8SRob Clark }
493bd6f82d8SRob Clark 
made_progress(struct msm_gpu * gpu,struct msm_ringbuffer * ring)494d73b1d02SRob Clark static bool made_progress(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
495d73b1d02SRob Clark {
496d73b1d02SRob Clark 	if (ring->hangcheck_progress_retries >= DRM_MSM_HANGCHECK_PROGRESS_RETRIES)
497d73b1d02SRob Clark 		return false;
498d73b1d02SRob Clark 
499d73b1d02SRob Clark 	if (!gpu->funcs->progress)
500d73b1d02SRob Clark 		return false;
501d73b1d02SRob Clark 
502d73b1d02SRob Clark 	if (!gpu->funcs->progress(gpu, ring))
503d73b1d02SRob Clark 		return false;
504d73b1d02SRob Clark 
505d73b1d02SRob Clark 	ring->hangcheck_progress_retries++;
506d73b1d02SRob Clark 	return true;
507d73b1d02SRob Clark }
508d73b1d02SRob Clark 
hangcheck_handler(struct timer_list * t)509e99e88a9SKees Cook static void hangcheck_handler(struct timer_list *t)
510bd6f82d8SRob Clark {
511e99e88a9SKees Cook 	struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
5126b8819c8SRob Clark 	struct drm_device *dev = gpu->dev;
513f97decacSJordan Crouse 	struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
514f97decacSJordan Crouse 	uint32_t fence = ring->memptrs->fence;
515bd6f82d8SRob Clark 
516f97decacSJordan Crouse 	if (fence != ring->hangcheck_fence) {
517bd6f82d8SRob Clark 		/* some progress has been made.. ya! */
518f97decacSJordan Crouse 		ring->hangcheck_fence = fence;
519d73b1d02SRob Clark 		ring->hangcheck_progress_retries = 0;
520d73b1d02SRob Clark 	} else if (fence_before(fence, ring->fctx->last_fence) &&
521d73b1d02SRob Clark 			!made_progress(gpu, ring)) {
522bd6f82d8SRob Clark 		/* no progress and not done.. hung! */
523f97decacSJordan Crouse 		ring->hangcheck_fence = fence;
524d73b1d02SRob Clark 		ring->hangcheck_progress_retries = 0;
5256a41da17SMamta Shukla 		DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
526f97decacSJordan Crouse 				gpu->name, ring->id);
5276a41da17SMamta Shukla 		DRM_DEV_ERROR(dev->dev, "%s:     completed fence: %u\n",
52826791c48SRob Clark 				gpu->name, fence);
5296a41da17SMamta Shukla 		DRM_DEV_ERROR(dev->dev, "%s:     submitted fence: %u\n",
530f9d5355fSRob Clark 				gpu->name, ring->fctx->last_fence);
531f97decacSJordan Crouse 
5327e688294SRob Clark 		kthread_queue_work(gpu->worker, &gpu->recover_work);
533bd6f82d8SRob Clark 	}
534bd6f82d8SRob Clark 
535bd6f82d8SRob Clark 	/* if still more pending work, reset the hangcheck timer: */
536f9d5355fSRob Clark 	if (fence_after(ring->fctx->last_fence, ring->hangcheck_fence))
537bd6f82d8SRob Clark 		hangcheck_timer_reset(gpu);
5386b8819c8SRob Clark 
5396b8819c8SRob Clark 	/* workaround for missing irq: */
540298287f6SRob Clark 	msm_gpu_retire(gpu);
541bd6f82d8SRob Clark }
542bd6f82d8SRob Clark 
543bd6f82d8SRob Clark /*
54470c70f09SRob Clark  * Performance Counters:
54570c70f09SRob Clark  */
54670c70f09SRob Clark 
54770c70f09SRob Clark /* called under perf_lock */
update_hw_cntrs(struct msm_gpu * gpu,uint32_t ncntrs,uint32_t * cntrs)54870c70f09SRob Clark static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
54970c70f09SRob Clark {
55070c70f09SRob Clark 	uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
55170c70f09SRob Clark 	int i, n = min(ncntrs, gpu->num_perfcntrs);
55270c70f09SRob Clark 
55370c70f09SRob Clark 	/* read current values: */
55470c70f09SRob Clark 	for (i = 0; i < gpu->num_perfcntrs; i++)
55570c70f09SRob Clark 		current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
55670c70f09SRob Clark 
55770c70f09SRob Clark 	/* update cntrs: */
55870c70f09SRob Clark 	for (i = 0; i < n; i++)
55970c70f09SRob Clark 		cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
56070c70f09SRob Clark 
56170c70f09SRob Clark 	/* save current values: */
56270c70f09SRob Clark 	for (i = 0; i < gpu->num_perfcntrs; i++)
56370c70f09SRob Clark 		gpu->last_cntrs[i] = current_cntrs[i];
56470c70f09SRob Clark 
56570c70f09SRob Clark 	return n;
56670c70f09SRob Clark }
56770c70f09SRob Clark 
update_sw_cntrs(struct msm_gpu * gpu)56870c70f09SRob Clark static void update_sw_cntrs(struct msm_gpu *gpu)
56970c70f09SRob Clark {
57070c70f09SRob Clark 	ktime_t time;
57170c70f09SRob Clark 	uint32_t elapsed;
57270c70f09SRob Clark 	unsigned long flags;
57370c70f09SRob Clark 
57470c70f09SRob Clark 	spin_lock_irqsave(&gpu->perf_lock, flags);
57570c70f09SRob Clark 	if (!gpu->perfcntr_active)
57670c70f09SRob Clark 		goto out;
57770c70f09SRob Clark 
57870c70f09SRob Clark 	time = ktime_get();
57970c70f09SRob Clark 	elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
58070c70f09SRob Clark 
58170c70f09SRob Clark 	gpu->totaltime += elapsed;
58270c70f09SRob Clark 	if (gpu->last_sample.active)
58370c70f09SRob Clark 		gpu->activetime += elapsed;
58470c70f09SRob Clark 
58570c70f09SRob Clark 	gpu->last_sample.active = msm_gpu_active(gpu);
58670c70f09SRob Clark 	gpu->last_sample.time = time;
58770c70f09SRob Clark 
58870c70f09SRob Clark out:
58970c70f09SRob Clark 	spin_unlock_irqrestore(&gpu->perf_lock, flags);
59070c70f09SRob Clark }
59170c70f09SRob Clark 
msm_gpu_perfcntr_start(struct msm_gpu * gpu)59270c70f09SRob Clark void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
59370c70f09SRob Clark {
59470c70f09SRob Clark 	unsigned long flags;
59570c70f09SRob Clark 
596eeb75474SRob Clark 	pm_runtime_get_sync(&gpu->pdev->dev);
597eeb75474SRob Clark 
59870c70f09SRob Clark 	spin_lock_irqsave(&gpu->perf_lock, flags);
59970c70f09SRob Clark 	/* we could dynamically enable/disable perfcntr registers too.. */
60070c70f09SRob Clark 	gpu->last_sample.active = msm_gpu_active(gpu);
60170c70f09SRob Clark 	gpu->last_sample.time = ktime_get();
60270c70f09SRob Clark 	gpu->activetime = gpu->totaltime = 0;
60370c70f09SRob Clark 	gpu->perfcntr_active = true;
60470c70f09SRob Clark 	update_hw_cntrs(gpu, 0, NULL);
60570c70f09SRob Clark 	spin_unlock_irqrestore(&gpu->perf_lock, flags);
60670c70f09SRob Clark }
60770c70f09SRob Clark 
msm_gpu_perfcntr_stop(struct msm_gpu * gpu)60870c70f09SRob Clark void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
60970c70f09SRob Clark {
61070c70f09SRob Clark 	gpu->perfcntr_active = false;
611eeb75474SRob Clark 	pm_runtime_put_sync(&gpu->pdev->dev);
61270c70f09SRob Clark }
61370c70f09SRob Clark 
61470c70f09SRob Clark /* returns -errno or # of cntrs sampled */
msm_gpu_perfcntr_sample(struct msm_gpu * gpu,uint32_t * activetime,uint32_t * totaltime,uint32_t ncntrs,uint32_t * cntrs)61570c70f09SRob Clark int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
61670c70f09SRob Clark 		uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
61770c70f09SRob Clark {
61870c70f09SRob Clark 	unsigned long flags;
61970c70f09SRob Clark 	int ret;
62070c70f09SRob Clark 
62170c70f09SRob Clark 	spin_lock_irqsave(&gpu->perf_lock, flags);
62270c70f09SRob Clark 
62370c70f09SRob Clark 	if (!gpu->perfcntr_active) {
62470c70f09SRob Clark 		ret = -EINVAL;
62570c70f09SRob Clark 		goto out;
62670c70f09SRob Clark 	}
62770c70f09SRob Clark 
62870c70f09SRob Clark 	*activetime = gpu->activetime;
62970c70f09SRob Clark 	*totaltime = gpu->totaltime;
63070c70f09SRob Clark 
63170c70f09SRob Clark 	gpu->activetime = gpu->totaltime = 0;
63270c70f09SRob Clark 
63370c70f09SRob Clark 	ret = update_hw_cntrs(gpu, ncntrs, cntrs);
63470c70f09SRob Clark 
63570c70f09SRob Clark out:
63670c70f09SRob Clark 	spin_unlock_irqrestore(&gpu->perf_lock, flags);
63770c70f09SRob Clark 
63870c70f09SRob Clark 	return ret;
63970c70f09SRob Clark }
64070c70f09SRob Clark 
64170c70f09SRob Clark /*
6427198e6b0SRob Clark  * Cmdstream submission/retirement:
6437198e6b0SRob Clark  */
6447198e6b0SRob Clark 
retire_submit(struct msm_gpu * gpu,struct msm_ringbuffer * ring,struct msm_gem_submit * submit)6454241db42SJordan Crouse static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
6464241db42SJordan Crouse 		struct msm_gem_submit *submit)
6477d12a279SRob Clark {
6484241db42SJordan Crouse 	int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
6494241db42SJordan Crouse 	volatile struct msm_gpu_submit_stats *stats;
650cfebe3fdSRob Clark 	u64 elapsed, clock = 0, cycles;
651298287f6SRob Clark 	unsigned long flags;
6527d12a279SRob Clark 
6534241db42SJordan Crouse 	stats = &ring->memptrs->stats[index];
6544241db42SJordan Crouse 	/* Convert 19.2Mhz alwayson ticks to nanoseconds for elapsed time */
6554241db42SJordan Crouse 	elapsed = (stats->alwayson_end - stats->alwayson_start) * 10000;
6564241db42SJordan Crouse 	do_div(elapsed, 192);
6574241db42SJordan Crouse 
658cfebe3fdSRob Clark 	cycles = stats->cpcycles_end - stats->cpcycles_start;
659cfebe3fdSRob Clark 
6604241db42SJordan Crouse 	/* Calculate the clock frequency from the number of CP cycles */
6614241db42SJordan Crouse 	if (elapsed) {
662cfebe3fdSRob Clark 		clock = cycles * 1000;
6634241db42SJordan Crouse 		do_div(clock, elapsed);
6644241db42SJordan Crouse 	}
6654241db42SJordan Crouse 
666cfebe3fdSRob Clark 	submit->queue->ctx->elapsed_ns += elapsed;
667cfebe3fdSRob Clark 	submit->queue->ctx->cycles     += cycles;
668cfebe3fdSRob Clark 
6694241db42SJordan Crouse 	trace_msm_gpu_submit_retired(submit, elapsed, clock,
6704241db42SJordan Crouse 		stats->alwayson_start, stats->alwayson_end);
6714241db42SJordan Crouse 
672be40596bSRob Clark 	msm_submit_retire(submit);
6737d12a279SRob Clark 
674eeb75474SRob Clark 	pm_runtime_mark_last_busy(&gpu->pdev->dev);
675964d2f97SRob Clark 
676298287f6SRob Clark 	spin_lock_irqsave(&ring->submit_lock, flags);
677964d2f97SRob Clark 	list_del(&submit->node);
678298287f6SRob Clark 	spin_unlock_irqrestore(&ring->submit_lock, flags);
679964d2f97SRob Clark 
6809bc95570SRob Clark 	/* Update devfreq on transition from active->idle: */
6819bc95570SRob Clark 	mutex_lock(&gpu->active_lock);
6829bc95570SRob Clark 	gpu->active_submits--;
6839bc95570SRob Clark 	WARN_ON(gpu->active_submits < 0);
6845b26f37dSAkhil P Oommen 	if (!gpu->active_submits) {
6859bc95570SRob Clark 		msm_devfreq_idle(gpu);
68649e47761SRob Clark 		pm_runtime_put_autosuspend(&gpu->pdev->dev);
6875b26f37dSAkhil P Oommen 	}
6885b26f37dSAkhil P Oommen 
6895b26f37dSAkhil P Oommen 	mutex_unlock(&gpu->active_lock);
69049e47761SRob Clark 
691964d2f97SRob Clark 	msm_gem_submit_put(submit);
6927d12a279SRob Clark }
6937d12a279SRob Clark 
retire_submits(struct msm_gpu * gpu)694b6295f9aSRob Clark static void retire_submits(struct msm_gpu *gpu)
6951a370be9SRob Clark {
696f97decacSJordan Crouse 	int i;
6971a370be9SRob Clark 
698f97decacSJordan Crouse 	/* Retire the commits starting with highest priority */
699b1fc2839SJordan Crouse 	for (i = 0; i < gpu->nr_rings; i++) {
700f97decacSJordan Crouse 		struct msm_ringbuffer *ring = gpu->rb[i];
7011a370be9SRob Clark 
70277d20529SRob Clark 		while (true) {
70377d20529SRob Clark 			struct msm_gem_submit *submit = NULL;
704298287f6SRob Clark 			unsigned long flags;
70577d20529SRob Clark 
706298287f6SRob Clark 			spin_lock_irqsave(&ring->submit_lock, flags);
70777d20529SRob Clark 			submit = list_first_entry_or_null(&ring->submits,
70877d20529SRob Clark 					struct msm_gem_submit, node);
709298287f6SRob Clark 			spin_unlock_irqrestore(&ring->submit_lock, flags);
71077d20529SRob Clark 
71177d20529SRob Clark 			/*
71277d20529SRob Clark 			 * If no submit, we are done.  If submit->fence hasn't
71377d20529SRob Clark 			 * been signalled, then later submits are not signalled
71477d20529SRob Clark 			 * either, so we are also done.
71577d20529SRob Clark 			 */
7161d8a5ca4SRob Clark 			if (submit && dma_fence_is_signaled(submit->hw_fence)) {
7174241db42SJordan Crouse 				retire_submit(gpu, ring, submit);
71877d20529SRob Clark 			} else {
71977d20529SRob Clark 				break;
72077d20529SRob Clark 			}
7211a370be9SRob Clark 		}
7221a370be9SRob Clark 	}
723167a668aSRob Clark 
724167a668aSRob Clark 	wake_up_all(&gpu->retire_event);
7251a370be9SRob Clark }
7261a370be9SRob Clark 
retire_worker(struct kthread_work * work)7277e688294SRob Clark static void retire_worker(struct kthread_work *work)
7287198e6b0SRob Clark {
7297198e6b0SRob Clark 	struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
730edd4fc63SRob Clark 
731b6295f9aSRob Clark 	retire_submits(gpu);
7327198e6b0SRob Clark }
7337198e6b0SRob Clark 
7347198e6b0SRob Clark /* call from irq handler to schedule work to retire bo's */
msm_gpu_retire(struct msm_gpu * gpu)7357198e6b0SRob Clark void msm_gpu_retire(struct msm_gpu *gpu)
7367198e6b0SRob Clark {
737298287f6SRob Clark 	int i;
738298287f6SRob Clark 
739298287f6SRob Clark 	for (i = 0; i < gpu->nr_rings; i++)
7403c7a5221SRob Clark 		msm_update_fence(gpu->rb[i]->fctx, gpu->rb[i]->memptrs->fence);
741298287f6SRob Clark 
7427e688294SRob Clark 	kthread_queue_work(gpu->worker, &gpu->retire_work);
74370c70f09SRob Clark 	update_sw_cntrs(gpu);
7447198e6b0SRob Clark }
7457198e6b0SRob Clark 
7467198e6b0SRob Clark /* add bo's to gpu's ring, and kick gpu: */
msm_gpu_submit(struct msm_gpu * gpu,struct msm_gem_submit * submit)74715eb9ad0SJordan Crouse void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
7487198e6b0SRob Clark {
749f97decacSJordan Crouse 	struct msm_ringbuffer *ring = submit->ring;
750298287f6SRob Clark 	unsigned long flags;
7517198e6b0SRob Clark 
752*56a19b79SRob Clark 	WARN_ON(!mutex_is_locked(&gpu->lock));
753*56a19b79SRob Clark 
754eeb75474SRob Clark 	pm_runtime_get_sync(&gpu->pdev->dev);
755eeb75474SRob Clark 
756abe2023bSRob Clark 	msm_gpu_hw_init(gpu);
757f97decacSJordan Crouse 
758*56a19b79SRob Clark 	submit->seqno = submit->hw_fence->seqno;
759*56a19b79SRob Clark 
76070c70f09SRob Clark 	update_sw_cntrs(gpu);
76170c70f09SRob Clark 
762964d2f97SRob Clark 	/*
763964d2f97SRob Clark 	 * ring->submits holds a ref to the submit, to deal with the case
764964d2f97SRob Clark 	 * that a submit completes before msm_ioctl_gem_submit() returns.
765964d2f97SRob Clark 	 */
766964d2f97SRob Clark 	msm_gem_submit_get(submit);
767964d2f97SRob Clark 
768298287f6SRob Clark 	spin_lock_irqsave(&ring->submit_lock, flags);
769964d2f97SRob Clark 	list_add_tail(&submit->node, &ring->submits);
770298287f6SRob Clark 	spin_unlock_irqrestore(&ring->submit_lock, flags);
771964d2f97SRob Clark 
7729bc95570SRob Clark 	/* Update devfreq on transition from idle->active: */
7739bc95570SRob Clark 	mutex_lock(&gpu->active_lock);
7745b26f37dSAkhil P Oommen 	if (!gpu->active_submits) {
7755b26f37dSAkhil P Oommen 		pm_runtime_get(&gpu->pdev->dev);
7769bc95570SRob Clark 		msm_devfreq_active(gpu);
7775b26f37dSAkhil P Oommen 	}
7789bc95570SRob Clark 	gpu->active_submits++;
7799bc95570SRob Clark 	mutex_unlock(&gpu->active_lock);
7809bc95570SRob Clark 
78115eb9ad0SJordan Crouse 	gpu->funcs->submit(gpu, submit);
7821d054c9bSRob Clark 	gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
7831a370be9SRob Clark 
784abe2023bSRob Clark 	pm_runtime_put(&gpu->pdev->dev);
785*56a19b79SRob Clark 	hangcheck_timer_reset(gpu);
7867198e6b0SRob Clark }
7877198e6b0SRob Clark 
7887198e6b0SRob Clark /*
7897198e6b0SRob Clark  * Init/Cleanup:
7907198e6b0SRob Clark  */
7917198e6b0SRob Clark 
irq_handler(int irq,void * data)7927198e6b0SRob Clark static irqreturn_t irq_handler(int irq, void *data)
7937198e6b0SRob Clark {
7947198e6b0SRob Clark 	struct msm_gpu *gpu = data;
7957198e6b0SRob Clark 	return gpu->funcs->irq(gpu);
7967198e6b0SRob Clark }
7977198e6b0SRob Clark 
get_clocks(struct platform_device * pdev,struct msm_gpu * gpu)79898db803fSJordan Crouse static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
79998db803fSJordan Crouse {
8008e3e791dSJordan Crouse 	int ret = devm_clk_bulk_get_all(&pdev->dev, &gpu->grp_clks);
80198db803fSJordan Crouse 
8028e54eea5SJordan Crouse 	if (ret < 1) {
80398db803fSJordan Crouse 		gpu->nr_clocks = 0;
8048e54eea5SJordan Crouse 		return ret;
80598db803fSJordan Crouse 	}
80698db803fSJordan Crouse 
8078e54eea5SJordan Crouse 	gpu->nr_clocks = ret;
80898db803fSJordan Crouse 
8098e54eea5SJordan Crouse 	gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
8108e54eea5SJordan Crouse 		gpu->nr_clocks, "core");
81198db803fSJordan Crouse 
8128e54eea5SJordan Crouse 	gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
8138e54eea5SJordan Crouse 		gpu->nr_clocks, "rbbmtimer");
81498db803fSJordan Crouse 
81598db803fSJordan Crouse 	return 0;
81698db803fSJordan Crouse }
8177198e6b0SRob Clark 
818933415e2SJordan Crouse /* Return a new address space for a msm_drm_private instance */
819933415e2SJordan Crouse struct msm_gem_address_space *
msm_gpu_create_private_address_space(struct msm_gpu * gpu,struct task_struct * task)82025faf2f2SRob Clark msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task)
821933415e2SJordan Crouse {
822933415e2SJordan Crouse 	struct msm_gem_address_space *aspace = NULL;
823933415e2SJordan Crouse 	if (!gpu)
824933415e2SJordan Crouse 		return NULL;
825933415e2SJordan Crouse 
826933415e2SJordan Crouse 	/*
827933415e2SJordan Crouse 	 * If the target doesn't support private address spaces then return
828933415e2SJordan Crouse 	 * the global one
829933415e2SJordan Crouse 	 */
83025faf2f2SRob Clark 	if (gpu->funcs->create_private_address_space) {
831933415e2SJordan Crouse 		aspace = gpu->funcs->create_private_address_space(gpu);
83225faf2f2SRob Clark 		if (!IS_ERR(aspace))
83325faf2f2SRob Clark 			aspace->pid = get_pid(task_pid(task));
83425faf2f2SRob Clark 	}
835933415e2SJordan Crouse 
836933415e2SJordan Crouse 	if (IS_ERR_OR_NULL(aspace))
837933415e2SJordan Crouse 		aspace = msm_gem_address_space_get(gpu->aspace);
838933415e2SJordan Crouse 
839933415e2SJordan Crouse 	return aspace;
840933415e2SJordan Crouse }
841933415e2SJordan Crouse 
msm_gpu_init(struct drm_device * drm,struct platform_device * pdev,struct msm_gpu * gpu,const struct msm_gpu_funcs * funcs,const char * name,struct msm_gpu_config * config)8427198e6b0SRob Clark int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
8437198e6b0SRob Clark 		struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
8445770fc7aSJordan Crouse 		const char *name, struct msm_gpu_config *config)
8457198e6b0SRob Clark {
846d73b1d02SRob Clark 	struct msm_drm_private *priv = drm->dev_private;
847f97decacSJordan Crouse 	int i, ret, nr_rings = config->nr_rings;
848f97decacSJordan Crouse 	void *memptrs;
849f97decacSJordan Crouse 	uint64_t memptrs_iova;
8507198e6b0SRob Clark 
85170c70f09SRob Clark 	if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
85270c70f09SRob Clark 		gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
85370c70f09SRob Clark 
8547198e6b0SRob Clark 	gpu->dev = drm;
8557198e6b0SRob Clark 	gpu->funcs = funcs;
8567198e6b0SRob Clark 	gpu->name = name;
8577198e6b0SRob Clark 
8580737ab95SAkhil P Oommen 	gpu->worker = kthread_create_worker(0, "gpu-worker");
8597e688294SRob Clark 	if (IS_ERR(gpu->worker)) {
8607e688294SRob Clark 		ret = PTR_ERR(gpu->worker);
8617e688294SRob Clark 		gpu->worker = NULL;
8627e688294SRob Clark 		goto fail;
8637e688294SRob Clark 	}
864bd6f82d8SRob Clark 
8657e688294SRob Clark 	sched_set_fifo_low(gpu->worker->task);
8667e688294SRob Clark 
8679bc95570SRob Clark 	mutex_init(&gpu->active_lock);
868c28e2f2bSRob Clark 	mutex_init(&gpu->lock);
869167a668aSRob Clark 	init_waitqueue_head(&gpu->retire_event);
8707e688294SRob Clark 	kthread_init_work(&gpu->retire_work, retire_worker);
8717e688294SRob Clark 	kthread_init_work(&gpu->recover_work, recover_worker);
872e25e92e0SRob Clark 	kthread_init_work(&gpu->fault_work, fault_worker);
8731a370be9SRob Clark 
874d73b1d02SRob Clark 	priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
875d73b1d02SRob Clark 
876d73b1d02SRob Clark 	/*
877d73b1d02SRob Clark 	 * If progress detection is supported, halve the hangcheck timer
878d73b1d02SRob Clark 	 * duration, as it takes two iterations of the hangcheck handler
879d73b1d02SRob Clark 	 * to detect a hang.
880d73b1d02SRob Clark 	 */
881d73b1d02SRob Clark 	if (funcs->progress)
882d73b1d02SRob Clark 		priv->hangcheck_period /= 2;
883d73b1d02SRob Clark 
884e99e88a9SKees Cook 	timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
8857198e6b0SRob Clark 
88670c70f09SRob Clark 	spin_lock_init(&gpu->perf_lock);
88770c70f09SRob Clark 
8887198e6b0SRob Clark 
8897198e6b0SRob Clark 	/* Map registers: */
890c0e745d7SDmitry Baryshkov 	gpu->mmio = msm_ioremap(pdev, config->ioname);
8917198e6b0SRob Clark 	if (IS_ERR(gpu->mmio)) {
8927198e6b0SRob Clark 		ret = PTR_ERR(gpu->mmio);
8937198e6b0SRob Clark 		goto fail;
8947198e6b0SRob Clark 	}
8957198e6b0SRob Clark 
8967198e6b0SRob Clark 	/* Get Interrupt: */
897878411aeSJordan Crouse 	gpu->irq = platform_get_irq(pdev, 0);
8987198e6b0SRob Clark 	if (gpu->irq < 0) {
8997198e6b0SRob Clark 		ret = gpu->irq;
9007198e6b0SRob Clark 		goto fail;
9017198e6b0SRob Clark 	}
9027198e6b0SRob Clark 
9037198e6b0SRob Clark 	ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
9040737ab95SAkhil P Oommen 			IRQF_TRIGGER_HIGH, "gpu-irq", gpu);
9057198e6b0SRob Clark 	if (ret) {
9066a41da17SMamta Shukla 		DRM_DEV_ERROR(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
9077198e6b0SRob Clark 		goto fail;
9087198e6b0SRob Clark 	}
9097198e6b0SRob Clark 
91098db803fSJordan Crouse 	ret = get_clocks(pdev, gpu);
91198db803fSJordan Crouse 	if (ret)
91298db803fSJordan Crouse 		goto fail;
9137198e6b0SRob Clark 
914720c3bb8SRob Clark 	gpu->ebi1_clk = msm_clk_get(pdev, "bus");
9157198e6b0SRob Clark 	DBG("ebi1_clk: %p", gpu->ebi1_clk);
9167198e6b0SRob Clark 	if (IS_ERR(gpu->ebi1_clk))
9177198e6b0SRob Clark 		gpu->ebi1_clk = NULL;
9187198e6b0SRob Clark 
9197198e6b0SRob Clark 	/* Acquire regulators: */
9207198e6b0SRob Clark 	gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
9217198e6b0SRob Clark 	DBG("gpu_reg: %p", gpu->gpu_reg);
9227198e6b0SRob Clark 	if (IS_ERR(gpu->gpu_reg))
9237198e6b0SRob Clark 		gpu->gpu_reg = NULL;
9247198e6b0SRob Clark 
9257198e6b0SRob Clark 	gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
9267198e6b0SRob Clark 	DBG("gpu_cx: %p", gpu->gpu_cx);
9277198e6b0SRob Clark 	if (IS_ERR(gpu->gpu_cx))
9287198e6b0SRob Clark 		gpu->gpu_cx = NULL;
9297198e6b0SRob Clark 
9309cba4056SRob Clark 	platform_set_drvdata(pdev, &gpu->adreno_smmu);
931667ce33eSRob Clark 
932f91c14abSJordan Crouse 	msm_devfreq_init(gpu);
933f91c14abSJordan Crouse 
934ccac7ce3SJordan Crouse 
935ccac7ce3SJordan Crouse 	gpu->aspace = gpu->funcs->create_address_space(gpu, pdev);
9361267a4dfSJordan Crouse 
9371267a4dfSJordan Crouse 	if (gpu->aspace == NULL)
9386a41da17SMamta Shukla 		DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
9391267a4dfSJordan Crouse 	else if (IS_ERR(gpu->aspace)) {
9401267a4dfSJordan Crouse 		ret = PTR_ERR(gpu->aspace);
9411267a4dfSJordan Crouse 		goto fail;
9427198e6b0SRob Clark 	}
943a1ad3523SRob Clark 
944546ec7b4SJordan Crouse 	memptrs = msm_gem_kernel_new(drm,
945546ec7b4SJordan Crouse 		sizeof(struct msm_rbmemptrs) * nr_rings,
9468b5de735SRob Clark 		check_apriv(gpu, MSM_BO_WC), gpu->aspace, &gpu->memptrs_bo,
947f97decacSJordan Crouse 		&memptrs_iova);
948cd414f3dSJordan Crouse 
949f97decacSJordan Crouse 	if (IS_ERR(memptrs)) {
950f97decacSJordan Crouse 		ret = PTR_ERR(memptrs);
9516a41da17SMamta Shukla 		DRM_DEV_ERROR(drm->dev, "could not allocate memptrs: %d\n", ret);
952cd414f3dSJordan Crouse 		goto fail;
953cd414f3dSJordan Crouse 	}
954cd414f3dSJordan Crouse 
9550815d774SJordan Crouse 	msm_gem_object_set_name(gpu->memptrs_bo, "memptrs");
9560815d774SJordan Crouse 
957f97decacSJordan Crouse 	if (nr_rings > ARRAY_SIZE(gpu->rb)) {
95839ae0d3eSArnd Bergmann 		DRM_DEV_INFO_ONCE(drm->dev, "Only creating %zu ringbuffers\n",
959f97decacSJordan Crouse 			ARRAY_SIZE(gpu->rb));
960f97decacSJordan Crouse 		nr_rings = ARRAY_SIZE(gpu->rb);
961f97decacSJordan Crouse 	}
962f97decacSJordan Crouse 
963f97decacSJordan Crouse 	/* Create ringbuffer(s): */
964f97decacSJordan Crouse 	for (i = 0; i < nr_rings; i++) {
965f97decacSJordan Crouse 		gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
966f97decacSJordan Crouse 
967f97decacSJordan Crouse 		if (IS_ERR(gpu->rb[i])) {
968f97decacSJordan Crouse 			ret = PTR_ERR(gpu->rb[i]);
9696a41da17SMamta Shukla 			DRM_DEV_ERROR(drm->dev,
970f97decacSJordan Crouse 				"could not create ringbuffer %d: %d\n", i, ret);
9717198e6b0SRob Clark 			goto fail;
9727198e6b0SRob Clark 		}
9737198e6b0SRob Clark 
974f97decacSJordan Crouse 		memptrs += sizeof(struct msm_rbmemptrs);
975f97decacSJordan Crouse 		memptrs_iova += sizeof(struct msm_rbmemptrs);
976f97decacSJordan Crouse 	}
977f97decacSJordan Crouse 
978f97decacSJordan Crouse 	gpu->nr_rings = nr_rings;
979f97decacSJordan Crouse 
98090f45c42SRob Clark 	refcount_set(&gpu->sysprof_active, 1);
98190f45c42SRob Clark 
9827198e6b0SRob Clark 	return 0;
9837198e6b0SRob Clark 
9847198e6b0SRob Clark fail:
985f97decacSJordan Crouse 	for (i = 0; i < ARRAY_SIZE(gpu->rb); i++)  {
986f97decacSJordan Crouse 		msm_ringbuffer_destroy(gpu->rb[i]);
987f97decacSJordan Crouse 		gpu->rb[i] = NULL;
988f97decacSJordan Crouse 	}
989f97decacSJordan Crouse 
990030af2b0SRob Clark 	msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
991cd414f3dSJordan Crouse 
9921267a4dfSJordan Crouse 	platform_set_drvdata(pdev, NULL);
9937198e6b0SRob Clark 	return ret;
9947198e6b0SRob Clark }
9957198e6b0SRob Clark 
msm_gpu_cleanup(struct msm_gpu * gpu)9967198e6b0SRob Clark void msm_gpu_cleanup(struct msm_gpu *gpu)
9977198e6b0SRob Clark {
998f97decacSJordan Crouse 	int i;
999f97decacSJordan Crouse 
10007198e6b0SRob Clark 	DBG("%s", gpu->name);
10017198e6b0SRob Clark 
1002f97decacSJordan Crouse 	for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
1003f97decacSJordan Crouse 		msm_ringbuffer_destroy(gpu->rb[i]);
1004f97decacSJordan Crouse 		gpu->rb[i] = NULL;
10057198e6b0SRob Clark 	}
1006cd414f3dSJordan Crouse 
1007030af2b0SRob Clark 	msm_gem_kernel_put(gpu->memptrs_bo, gpu->aspace);
1008cd414f3dSJordan Crouse 
1009cd414f3dSJordan Crouse 	if (!IS_ERR_OR_NULL(gpu->aspace)) {
101053bf7f7aSDrew Davenport 		gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu);
10111267a4dfSJordan Crouse 		msm_gem_address_space_put(gpu->aspace);
10121267a4dfSJordan Crouse 	}
10137e688294SRob Clark 
10147e688294SRob Clark 	if (gpu->worker) {
10157e688294SRob Clark 		kthread_destroy_worker(gpu->worker);
10167e688294SRob Clark 	}
1017ec793cf0SAkhil P Oommen 
1018af5b4fffSRob Clark 	msm_devfreq_cleanup(gpu);
101976efc245SAkhil P Oommen 
102076efc245SAkhil P Oommen 	platform_set_drvdata(gpu->pdev, NULL);
10217198e6b0SRob Clark }
1022