xref: /openbmc/linux/drivers/gpu/drm/msm/msm_gpu.h (revision af5b4fff0fe80c8a43fa218e10c55ab8d2ff4dcb)
1caab277bSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
27198e6b0SRob Clark /*
37198e6b0SRob Clark  * Copyright (C) 2013 Red Hat
47198e6b0SRob Clark  * Author: Rob Clark <robdclark@gmail.com>
57198e6b0SRob Clark  */
67198e6b0SRob Clark 
77198e6b0SRob Clark #ifndef __MSM_GPU_H__
87198e6b0SRob Clark #define __MSM_GPU_H__
97198e6b0SRob Clark 
109cba4056SRob Clark #include <linux/adreno-smmu-priv.h>
117198e6b0SRob Clark #include <linux/clk.h>
12fcf9d0b7SJordan Crouse #include <linux/interconnect.h>
131f60d114SSharat Masetty #include <linux/pm_opp.h>
147198e6b0SRob Clark #include <linux/regulator/consumer.h>
157198e6b0SRob Clark 
167198e6b0SRob Clark #include "msm_drv.h"
17ca762a8aSRob Clark #include "msm_fence.h"
187198e6b0SRob Clark #include "msm_ringbuffer.h"
19604234f3SJordan Crouse #include "msm_gem.h"
207198e6b0SRob Clark 
217198e6b0SRob Clark struct msm_gem_submit;
2270c70f09SRob Clark struct msm_gpu_perfcntr;
23e00e473dSJordan Crouse struct msm_gpu_state;
247198e6b0SRob Clark 
255770fc7aSJordan Crouse struct msm_gpu_config {
265770fc7aSJordan Crouse 	const char *ioname;
27f97decacSJordan Crouse 	unsigned int nr_rings;
285770fc7aSJordan Crouse };
295770fc7aSJordan Crouse 
307198e6b0SRob Clark /* So far, with hardware that I've seen to date, we can have:
317198e6b0SRob Clark  *  + zero, one, or two z180 2d cores
327198e6b0SRob Clark  *  + a3xx or a2xx 3d core, which share a common CP (the firmware
337198e6b0SRob Clark  *    for the CP seems to implement some different PM4 packet types
347198e6b0SRob Clark  *    but the basics of cmdstream submission are the same)
357198e6b0SRob Clark  *
367198e6b0SRob Clark  * Which means that the eventual complete "class" hierarchy, once
377198e6b0SRob Clark  * support for all past and present hw is in place, becomes:
387198e6b0SRob Clark  *  + msm_gpu
397198e6b0SRob Clark  *    + adreno_gpu
407198e6b0SRob Clark  *      + a3xx_gpu
417198e6b0SRob Clark  *      + a2xx_gpu
427198e6b0SRob Clark  *    + z180_gpu
437198e6b0SRob Clark  */
447198e6b0SRob Clark struct msm_gpu_funcs {
457198e6b0SRob Clark 	int (*get_param)(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
467198e6b0SRob Clark 	int (*hw_init)(struct msm_gpu *gpu);
477198e6b0SRob Clark 	int (*pm_suspend)(struct msm_gpu *gpu);
487198e6b0SRob Clark 	int (*pm_resume)(struct msm_gpu *gpu);
4915eb9ad0SJordan Crouse 	void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
50f97decacSJordan Crouse 	void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
517198e6b0SRob Clark 	irqreturn_t (*irq)(struct msm_gpu *irq);
52f97decacSJordan Crouse 	struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
53bd6f82d8SRob Clark 	void (*recover)(struct msm_gpu *gpu);
547198e6b0SRob Clark 	void (*destroy)(struct msm_gpu *gpu);
55c878a628SArnd Bergmann #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
567198e6b0SRob Clark 	/* show GPU status in debugfs: */
574f776f45SJordan Crouse 	void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
58c0fec7f5SJordan Crouse 			struct drm_printer *p);
59331dc0bcSRob Clark 	/* for generation specific debugfs: */
607ce84471SWambui Karuga 	void (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
617198e6b0SRob Clark #endif
62de0a3d09SSharat Masetty 	unsigned long (*gpu_busy)(struct msm_gpu *gpu);
63e00e473dSJordan Crouse 	struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
64c0fec7f5SJordan Crouse 	int (*gpu_state_put)(struct msm_gpu_state *state);
65de0a3d09SSharat Masetty 	unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
661f60d114SSharat Masetty 	void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp);
67ccac7ce3SJordan Crouse 	struct msm_gem_address_space *(*create_address_space)
68ccac7ce3SJordan Crouse 		(struct msm_gpu *gpu, struct platform_device *pdev);
69933415e2SJordan Crouse 	struct msm_gem_address_space *(*create_private_address_space)
70933415e2SJordan Crouse 		(struct msm_gpu *gpu);
718907afb4SJordan Crouse 	uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
727198e6b0SRob Clark };
737198e6b0SRob Clark 
74e25e92e0SRob Clark /* Additional state for iommu faults: */
75e25e92e0SRob Clark struct msm_gpu_fault_info {
76e25e92e0SRob Clark 	u64 ttbr0;
77e25e92e0SRob Clark 	unsigned long iova;
78e25e92e0SRob Clark 	int flags;
79e25e92e0SRob Clark 	const char *type;
80e25e92e0SRob Clark 	const char *block;
81e25e92e0SRob Clark };
82e25e92e0SRob Clark 
83*af5b4fffSRob Clark /**
84*af5b4fffSRob Clark  * struct msm_gpu_devfreq - devfreq related state
85*af5b4fffSRob Clark  */
86*af5b4fffSRob Clark struct msm_gpu_devfreq {
87*af5b4fffSRob Clark 	/** devfreq: devfreq instance */
88*af5b4fffSRob Clark 	struct devfreq *devfreq;
89*af5b4fffSRob Clark 
90*af5b4fffSRob Clark 	/**
91*af5b4fffSRob Clark 	 * busy_cycles:
92*af5b4fffSRob Clark 	 *
93*af5b4fffSRob Clark 	 * Used by implementation of gpu->gpu_busy() to track the last
94*af5b4fffSRob Clark 	 * busy counter value, for calculating elapsed busy cycles since
95*af5b4fffSRob Clark 	 * last sampling period.
96*af5b4fffSRob Clark 	 */
97*af5b4fffSRob Clark 	u64 busy_cycles;
98*af5b4fffSRob Clark 
99*af5b4fffSRob Clark 	/** time: Time of last sampling period. */
100*af5b4fffSRob Clark 	ktime_t time;
101*af5b4fffSRob Clark };
102*af5b4fffSRob Clark 
1037198e6b0SRob Clark struct msm_gpu {
1047198e6b0SRob Clark 	const char *name;
1057198e6b0SRob Clark 	struct drm_device *dev;
106eeb75474SRob Clark 	struct platform_device *pdev;
1077198e6b0SRob Clark 	const struct msm_gpu_funcs *funcs;
1087198e6b0SRob Clark 
1099cba4056SRob Clark 	struct adreno_smmu_priv adreno_smmu;
1109cba4056SRob Clark 
11170c70f09SRob Clark 	/* performance counters (hw & sw): */
11270c70f09SRob Clark 	spinlock_t perf_lock;
11370c70f09SRob Clark 	bool perfcntr_active;
11470c70f09SRob Clark 	struct {
11570c70f09SRob Clark 		bool active;
11670c70f09SRob Clark 		ktime_t time;
11770c70f09SRob Clark 	} last_sample;
11870c70f09SRob Clark 	uint32_t totaltime, activetime;    /* sw counters */
11970c70f09SRob Clark 	uint32_t last_cntrs[5];            /* hw counters */
12070c70f09SRob Clark 	const struct msm_gpu_perfcntr *perfcntrs;
12170c70f09SRob Clark 	uint32_t num_perfcntrs;
12270c70f09SRob Clark 
123f97decacSJordan Crouse 	struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
124f97decacSJordan Crouse 	int nr_rings;
1257198e6b0SRob Clark 
126d984457bSRob Clark 	/*
127d984457bSRob Clark 	 * List of GEM active objects on this gpu.  Protected by
128d984457bSRob Clark 	 * msm_drm_private::mm_lock
129d984457bSRob Clark 	 */
1307198e6b0SRob Clark 	struct list_head active_list;
1317198e6b0SRob Clark 
132eeb75474SRob Clark 	/* does gpu need hw_init? */
133eeb75474SRob Clark 	bool needs_hw_init;
13437d77c3aSRob Clark 
13548dc4241SRob Clark 	/* number of GPU hangs (for all contexts) */
13648dc4241SRob Clark 	int global_faults;
13748dc4241SRob Clark 
1387198e6b0SRob Clark 	void __iomem *mmio;
1397198e6b0SRob Clark 	int irq;
1407198e6b0SRob Clark 
141667ce33eSRob Clark 	struct msm_gem_address_space *aspace;
1427198e6b0SRob Clark 
1437198e6b0SRob Clark 	/* Power Control: */
1447198e6b0SRob Clark 	struct regulator *gpu_reg, *gpu_cx;
1458e54eea5SJordan Crouse 	struct clk_bulk_data *grp_clks;
14698db803fSJordan Crouse 	int nr_clocks;
14798db803fSJordan Crouse 	struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
1481babd706SJordan Crouse 	uint32_t fast_rate;
149bd6f82d8SRob Clark 
15037d77c3aSRob Clark 	/* Hang and Inactivity Detection:
15137d77c3aSRob Clark 	 */
15237d77c3aSRob Clark #define DRM_MSM_INACTIVE_PERIOD   66 /* in ms (roughly four frames) */
153eeb75474SRob Clark 
1541d2fa58eSSamuel Iglesias Gonsalvez #define DRM_MSM_HANGCHECK_DEFAULT_PERIOD 500 /* in ms */
155bd6f82d8SRob Clark 	struct timer_list hangcheck_timer;
1567e688294SRob Clark 
157e25e92e0SRob Clark 	/* Fault info for most recent iova fault: */
158e25e92e0SRob Clark 	struct msm_gpu_fault_info fault_info;
159e25e92e0SRob Clark 
160e25e92e0SRob Clark 	/* work for handling GPU ioval faults: */
161e25e92e0SRob Clark 	struct kthread_work fault_work;
162e25e92e0SRob Clark 
1637e688294SRob Clark 	/* work for handling GPU recovery: */
1647e688294SRob Clark 	struct kthread_work recover_work;
1657e688294SRob Clark 
1667e688294SRob Clark 	/* work for handling active-list retiring: */
1677e688294SRob Clark 	struct kthread_work retire_work;
1687e688294SRob Clark 
1697e688294SRob Clark 	/* worker for retire/recover: */
1707e688294SRob Clark 	struct kthread_worker *worker;
1711a370be9SRob Clark 
172cd414f3dSJordan Crouse 	struct drm_gem_object *memptrs_bo;
173f91c14abSJordan Crouse 
174*af5b4fffSRob Clark 	struct msm_gpu_devfreq devfreq;
175c0fec7f5SJordan Crouse 
1763ab1c5ccSRob Clark 	uint32_t suspend_count;
1773ab1c5ccSRob Clark 
178c0fec7f5SJordan Crouse 	struct msm_gpu_state *crashstate;
179604234f3SJordan Crouse 	/* True if the hardware supports expanded apriv (a650 and newer) */
180604234f3SJordan Crouse 	bool hw_apriv;
181ec793cf0SAkhil P Oommen 
182ec793cf0SAkhil P Oommen 	struct thermal_cooling_device *cooling;
1837198e6b0SRob Clark };
1847198e6b0SRob Clark 
18569a9313bSRob Clark static inline struct msm_gpu *dev_to_gpu(struct device *dev)
18669a9313bSRob Clark {
1879cba4056SRob Clark 	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev);
1889cba4056SRob Clark 	return container_of(adreno_smmu, struct msm_gpu, adreno_smmu);
18969a9313bSRob Clark }
19069a9313bSRob Clark 
191f97decacSJordan Crouse /* It turns out that all targets use the same ringbuffer size */
192f97decacSJordan Crouse #define MSM_GPU_RINGBUFFER_SZ SZ_32K
1934d87fc32SJordan Crouse #define MSM_GPU_RINGBUFFER_BLKSIZE 32
1944d87fc32SJordan Crouse 
1954d87fc32SJordan Crouse #define MSM_GPU_RB_CNTL_DEFAULT \
1964d87fc32SJordan Crouse 		(AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | \
1974d87fc32SJordan Crouse 		AXXX_CP_RB_CNTL_BLKSZ(ilog2(MSM_GPU_RINGBUFFER_BLKSIZE / 8)))
198f97decacSJordan Crouse 
19937d77c3aSRob Clark static inline bool msm_gpu_active(struct msm_gpu *gpu)
20037d77c3aSRob Clark {
201f97decacSJordan Crouse 	int i;
202f97decacSJordan Crouse 
203f97decacSJordan Crouse 	for (i = 0; i < gpu->nr_rings; i++) {
204f97decacSJordan Crouse 		struct msm_ringbuffer *ring = gpu->rb[i];
205f97decacSJordan Crouse 
206f97decacSJordan Crouse 		if (ring->seqno > ring->memptrs->fence)
207f97decacSJordan Crouse 			return true;
208f97decacSJordan Crouse 	}
209f97decacSJordan Crouse 
210f97decacSJordan Crouse 	return false;
21137d77c3aSRob Clark }
21237d77c3aSRob Clark 
21370c70f09SRob Clark /* Perf-Counters:
21470c70f09SRob Clark  * The select_reg and select_val are just there for the benefit of the child
21570c70f09SRob Clark  * class that actually enables the perf counter..  but msm_gpu base class
21670c70f09SRob Clark  * will handle sampling/displaying the counters.
21770c70f09SRob Clark  */
21870c70f09SRob Clark 
21970c70f09SRob Clark struct msm_gpu_perfcntr {
22070c70f09SRob Clark 	uint32_t select_reg;
22170c70f09SRob Clark 	uint32_t sample_reg;
22270c70f09SRob Clark 	uint32_t select_val;
22370c70f09SRob Clark 	const char *name;
22470c70f09SRob Clark };
22570c70f09SRob Clark 
226f7de1545SJordan Crouse struct msm_gpu_submitqueue {
227f7de1545SJordan Crouse 	int id;
228f7de1545SJordan Crouse 	u32 flags;
229f7de1545SJordan Crouse 	u32 prio;
230f7de1545SJordan Crouse 	int faults;
231cf655d61SJordan Crouse 	struct msm_file_private *ctx;
232f7de1545SJordan Crouse 	struct list_head node;
233f7de1545SJordan Crouse 	struct kref ref;
234f7de1545SJordan Crouse };
235f7de1545SJordan Crouse 
236cdb95931SJordan Crouse struct msm_gpu_state_bo {
237cdb95931SJordan Crouse 	u64 iova;
238cdb95931SJordan Crouse 	size_t size;
239cdb95931SJordan Crouse 	void *data;
2401df4289dSSharat Masetty 	bool encoded;
241cdb95931SJordan Crouse };
242cdb95931SJordan Crouse 
243e00e473dSJordan Crouse struct msm_gpu_state {
244c0fec7f5SJordan Crouse 	struct kref ref;
2453530a17fSArnd Bergmann 	struct timespec64 time;
246e00e473dSJordan Crouse 
247e00e473dSJordan Crouse 	struct {
248e00e473dSJordan Crouse 		u64 iova;
249e00e473dSJordan Crouse 		u32 fence;
250e00e473dSJordan Crouse 		u32 seqno;
251e00e473dSJordan Crouse 		u32 rptr;
252e00e473dSJordan Crouse 		u32 wptr;
25343a56687SJordan Crouse 		void *data;
25443a56687SJordan Crouse 		int data_size;
2551df4289dSSharat Masetty 		bool encoded;
256e00e473dSJordan Crouse 	} ring[MSM_GPU_MAX_RINGS];
257e00e473dSJordan Crouse 
258e00e473dSJordan Crouse 	int nr_registers;
259e00e473dSJordan Crouse 	u32 *registers;
260e00e473dSJordan Crouse 
261e00e473dSJordan Crouse 	u32 rbbm_status;
262c0fec7f5SJordan Crouse 
263c0fec7f5SJordan Crouse 	char *comm;
264c0fec7f5SJordan Crouse 	char *cmd;
265cdb95931SJordan Crouse 
266e25e92e0SRob Clark 	struct msm_gpu_fault_info fault_info;
267e25e92e0SRob Clark 
268cdb95931SJordan Crouse 	int nr_bos;
269cdb95931SJordan Crouse 	struct msm_gpu_state_bo *bos;
270e00e473dSJordan Crouse };
271e00e473dSJordan Crouse 
2727198e6b0SRob Clark static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
2737198e6b0SRob Clark {
2747198e6b0SRob Clark 	msm_writel(data, gpu->mmio + (reg << 2));
2757198e6b0SRob Clark }
2767198e6b0SRob Clark 
2777198e6b0SRob Clark static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
2787198e6b0SRob Clark {
2797198e6b0SRob Clark 	return msm_readl(gpu->mmio + (reg << 2));
2807198e6b0SRob Clark }
2817198e6b0SRob Clark 
282ae53a829SJordan Crouse static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
283ae53a829SJordan Crouse {
28440a72b0cSSharat Masetty 	msm_rmw(gpu->mmio + (reg << 2), mask, or);
285ae53a829SJordan Crouse }
286ae53a829SJordan Crouse 
287ae53a829SJordan Crouse static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi)
288ae53a829SJordan Crouse {
289ae53a829SJordan Crouse 	u64 val;
290ae53a829SJordan Crouse 
291ae53a829SJordan Crouse 	/*
292ae53a829SJordan Crouse 	 * Why not a readq here? Two reasons: 1) many of the LO registers are
293ae53a829SJordan Crouse 	 * not quad word aligned and 2) the GPU hardware designers have a bit
294ae53a829SJordan Crouse 	 * of a history of putting registers where they fit, especially in
295ae53a829SJordan Crouse 	 * spins. The longer a GPU family goes the higher the chance that
296ae53a829SJordan Crouse 	 * we'll get burned.  We could do a series of validity checks if we
297ae53a829SJordan Crouse 	 * wanted to, but really is a readq() that much better? Nah.
298ae53a829SJordan Crouse 	 */
299ae53a829SJordan Crouse 
300ae53a829SJordan Crouse 	/*
301ae53a829SJordan Crouse 	 * For some lo/hi registers (like perfcounters), the hi value is latched
302ae53a829SJordan Crouse 	 * when the lo is read, so make sure to read the lo first to trigger
303ae53a829SJordan Crouse 	 * that
304ae53a829SJordan Crouse 	 */
305ae53a829SJordan Crouse 	val = (u64) msm_readl(gpu->mmio + (lo << 2));
306ae53a829SJordan Crouse 	val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32);
307ae53a829SJordan Crouse 
308ae53a829SJordan Crouse 	return val;
309ae53a829SJordan Crouse }
310ae53a829SJordan Crouse 
311ae53a829SJordan Crouse static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
312ae53a829SJordan Crouse {
313ae53a829SJordan Crouse 	/* Why not a writeq here? Read the screed above */
314ae53a829SJordan Crouse 	msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2));
315ae53a829SJordan Crouse 	msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2));
316ae53a829SJordan Crouse }
317ae53a829SJordan Crouse 
3187198e6b0SRob Clark int msm_gpu_pm_suspend(struct msm_gpu *gpu);
3197198e6b0SRob Clark int msm_gpu_pm_resume(struct msm_gpu *gpu);
320*af5b4fffSRob Clark 
321*af5b4fffSRob Clark void msm_devfreq_init(struct msm_gpu *gpu);
322*af5b4fffSRob Clark void msm_devfreq_cleanup(struct msm_gpu *gpu);
323*af5b4fffSRob Clark void msm_devfreq_resume(struct msm_gpu *gpu);
324*af5b4fffSRob Clark void msm_devfreq_suspend(struct msm_gpu *gpu);
3257198e6b0SRob Clark 
326eeb75474SRob Clark int msm_gpu_hw_init(struct msm_gpu *gpu);
327eeb75474SRob Clark 
32870c70f09SRob Clark void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
32970c70f09SRob Clark void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
33070c70f09SRob Clark int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
33170c70f09SRob Clark 		uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs);
33270c70f09SRob Clark 
3337198e6b0SRob Clark void msm_gpu_retire(struct msm_gpu *gpu);
33415eb9ad0SJordan Crouse void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
3357198e6b0SRob Clark 
3367198e6b0SRob Clark int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
3377198e6b0SRob Clark 		struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
3385770fc7aSJordan Crouse 		const char *name, struct msm_gpu_config *config);
3395770fc7aSJordan Crouse 
340933415e2SJordan Crouse struct msm_gem_address_space *
34125faf2f2SRob Clark msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task);
342933415e2SJordan Crouse 
3437198e6b0SRob Clark void msm_gpu_cleanup(struct msm_gpu *gpu);
3447198e6b0SRob Clark 
345e2550b7aSRob Clark struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
346bfd28b13SRob Clark void __init adreno_register(void);
347bfd28b13SRob Clark void __exit adreno_unregister(void);
3487198e6b0SRob Clark 
349f7de1545SJordan Crouse static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue)
350f7de1545SJordan Crouse {
351f7de1545SJordan Crouse 	if (queue)
352f7de1545SJordan Crouse 		kref_put(&queue->ref, msm_submitqueue_destroy);
353f7de1545SJordan Crouse }
354f7de1545SJordan Crouse 
355c0fec7f5SJordan Crouse static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu)
356c0fec7f5SJordan Crouse {
357c0fec7f5SJordan Crouse 	struct msm_gpu_state *state = NULL;
358c0fec7f5SJordan Crouse 
359c0fec7f5SJordan Crouse 	mutex_lock(&gpu->dev->struct_mutex);
360c0fec7f5SJordan Crouse 
361c0fec7f5SJordan Crouse 	if (gpu->crashstate) {
362c0fec7f5SJordan Crouse 		kref_get(&gpu->crashstate->ref);
363c0fec7f5SJordan Crouse 		state = gpu->crashstate;
364c0fec7f5SJordan Crouse 	}
365c0fec7f5SJordan Crouse 
366c0fec7f5SJordan Crouse 	mutex_unlock(&gpu->dev->struct_mutex);
367c0fec7f5SJordan Crouse 
368c0fec7f5SJordan Crouse 	return state;
369c0fec7f5SJordan Crouse }
370c0fec7f5SJordan Crouse 
371c0fec7f5SJordan Crouse static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
372c0fec7f5SJordan Crouse {
373c0fec7f5SJordan Crouse 	mutex_lock(&gpu->dev->struct_mutex);
374c0fec7f5SJordan Crouse 
375c0fec7f5SJordan Crouse 	if (gpu->crashstate) {
376c0fec7f5SJordan Crouse 		if (gpu->funcs->gpu_state_put(gpu->crashstate))
377c0fec7f5SJordan Crouse 			gpu->crashstate = NULL;
378c0fec7f5SJordan Crouse 	}
379c0fec7f5SJordan Crouse 
380c0fec7f5SJordan Crouse 	mutex_unlock(&gpu->dev->struct_mutex);
381c0fec7f5SJordan Crouse }
382c0fec7f5SJordan Crouse 
383604234f3SJordan Crouse /*
384604234f3SJordan Crouse  * Simple macro to semi-cleanly add the MAP_PRIV flag for targets that can
385604234f3SJordan Crouse  * support expanded privileges
386604234f3SJordan Crouse  */
387604234f3SJordan Crouse #define check_apriv(gpu, flags) \
388604234f3SJordan Crouse 	(((gpu)->hw_apriv ? MSM_BO_MAP_PRIV : 0) | (flags))
389604234f3SJordan Crouse 
390604234f3SJordan Crouse 
3917198e6b0SRob Clark #endif /* __MSM_GPU_H__ */
392