1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2015-2018 Etnaviv Project
4  */
5 
6 #ifndef __ETNAVIV_GPU_H__
7 #define __ETNAVIV_GPU_H__
8 
9 #include "etnaviv_cmdbuf.h"
10 #include "etnaviv_gem.h"
11 #include "etnaviv_mmu.h"
12 #include "etnaviv_drv.h"
13 #include "common.xml.h"
14 
15 struct etnaviv_gem_submit;
16 struct etnaviv_vram_mapping;
17 
18 struct etnaviv_chip_identity {
19 	u32 model;
20 	u32 revision;
21 	u32 product_id;
22 	u32 customer_id;
23 	u32 eco_id;
24 
25 	/* Supported feature fields. */
26 	u32 features;
27 
28 	/* Supported minor feature fields. */
29 	u32 minor_features0;
30 	u32 minor_features1;
31 	u32 minor_features2;
32 	u32 minor_features3;
33 	u32 minor_features4;
34 	u32 minor_features5;
35 	u32 minor_features6;
36 	u32 minor_features7;
37 	u32 minor_features8;
38 	u32 minor_features9;
39 	u32 minor_features10;
40 	u32 minor_features11;
41 
42 	/* Number of streams supported. */
43 	u32 stream_count;
44 
45 	/* Total number of temporary registers per thread. */
46 	u32 register_max;
47 
48 	/* Maximum number of threads. */
49 	u32 thread_count;
50 
51 	/* Number of shader cores. */
52 	u32 shader_core_count;
53 
54 	/* Number of Neural Network cores. */
55 	u32 nn_core_count;
56 
57 	/* Size of the vertex cache. */
58 	u32 vertex_cache_size;
59 
60 	/* Number of entries in the vertex output buffer. */
61 	u32 vertex_output_buffer_size;
62 
63 	/* Number of pixel pipes. */
64 	u32 pixel_pipes;
65 
66 	/* Number of instructions. */
67 	u32 instruction_count;
68 
69 	/* Number of constants. */
70 	u32 num_constants;
71 
72 	/* Buffer size */
73 	u32 buffer_size;
74 
75 	/* Number of varyings */
76 	u8 varyings_count;
77 };
78 
79 enum etnaviv_sec_mode {
80 	ETNA_SEC_NONE = 0,
81 	ETNA_SEC_KERNEL,
82 	ETNA_SEC_TZ
83 };
84 
85 struct etnaviv_event {
86 	struct dma_fence *fence;
87 	struct etnaviv_gem_submit *submit;
88 
89 	void (*sync_point)(struct etnaviv_gpu *gpu, struct etnaviv_event *event);
90 };
91 
92 struct etnaviv_cmdbuf_suballoc;
93 struct regulator;
94 struct clk;
95 
96 #define ETNA_NR_EVENTS 30
97 
98 enum etnaviv_gpu_state {
99 	ETNA_GPU_STATE_UNKNOWN = 0,
100 	ETNA_GPU_STATE_IDENTIFIED,
101 	ETNA_GPU_STATE_RESET,
102 	ETNA_GPU_STATE_INITIALIZED,
103 	ETNA_GPU_STATE_RUNNING,
104 	ETNA_GPU_STATE_FAULT,
105 };
106 
107 struct etnaviv_gpu {
108 	struct drm_device *drm;
109 	struct thermal_cooling_device *cooling;
110 	struct device *dev;
111 	struct mutex lock;
112 	struct etnaviv_chip_identity identity;
113 	enum etnaviv_sec_mode sec_mode;
114 	struct workqueue_struct *wq;
115 	struct mutex sched_lock;
116 	struct drm_gpu_scheduler sched;
117 	enum etnaviv_gpu_state state;
118 
119 	/* 'ring'-buffer: */
120 	struct etnaviv_cmdbuf buffer;
121 	int exec_state;
122 
123 	/* event management: */
124 	DECLARE_BITMAP(event_bitmap, ETNA_NR_EVENTS);
125 	struct etnaviv_event event[ETNA_NR_EVENTS];
126 	struct completion event_free;
127 	spinlock_t event_spinlock;
128 
129 	u32 idle_mask;
130 
131 	/* Fencing support */
132 	struct xarray user_fences;
133 	u32 next_user_fence;
134 	u32 next_fence;
135 	u32 completed_fence;
136 	wait_queue_head_t fence_event;
137 	u64 fence_context;
138 	spinlock_t fence_spinlock;
139 
140 	/* worker for handling 'sync' points: */
141 	struct work_struct sync_point_work;
142 	int sync_point_event;
143 
144 	/* hang detection */
145 	u32 hangcheck_dma_addr;
146 	u32 hangcheck_fence;
147 
148 	void __iomem *mmio;
149 	int irq;
150 
151 	struct etnaviv_iommu_context *mmu_context;
152 	unsigned int flush_seq;
153 
154 	/* Power Control: */
155 	struct clk *clk_bus;
156 	struct clk *clk_reg;
157 	struct clk *clk_core;
158 	struct clk *clk_shader;
159 
160 	unsigned int freq_scale;
161 	unsigned int fe_waitcycles;
162 	unsigned long base_rate_core;
163 	unsigned long base_rate_shader;
164 };
165 
gpu_write(struct etnaviv_gpu * gpu,u32 reg,u32 data)166 static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
167 {
168 	writel(data, gpu->mmio + reg);
169 }
170 
gpu_read(struct etnaviv_gpu * gpu,u32 reg)171 static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg)
172 {
173 	return readl(gpu->mmio + reg);
174 }
175 
gpu_fix_power_address(struct etnaviv_gpu * gpu,u32 reg)176 static inline u32 gpu_fix_power_address(struct etnaviv_gpu *gpu, u32 reg)
177 {
178 	/* Power registers in GC300 < 2.0 are offset by 0x100 */
179 	if (gpu->identity.model == chipModel_GC300 &&
180 	    gpu->identity.revision < 0x2000)
181 		reg += 0x100;
182 
183 	return reg;
184 }
185 
gpu_write_power(struct etnaviv_gpu * gpu,u32 reg,u32 data)186 static inline void gpu_write_power(struct etnaviv_gpu *gpu, u32 reg, u32 data)
187 {
188 	writel(data, gpu->mmio + gpu_fix_power_address(gpu, reg));
189 }
190 
gpu_read_power(struct etnaviv_gpu * gpu,u32 reg)191 static inline u32 gpu_read_power(struct etnaviv_gpu *gpu, u32 reg)
192 {
193 	return readl(gpu->mmio + gpu_fix_power_address(gpu, reg));
194 }
195 
196 int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value);
197 
198 int etnaviv_gpu_init(struct etnaviv_gpu *gpu);
199 bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu);
200 
201 #ifdef CONFIG_DEBUG_FS
202 int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m);
203 #endif
204 
205 void etnaviv_gpu_recover_hang(struct etnaviv_gem_submit *submit);
206 void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
207 int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
208 	u32 fence, struct drm_etnaviv_timespec *timeout);
209 int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
210 	struct etnaviv_gem_object *etnaviv_obj,
211 	struct drm_etnaviv_timespec *timeout);
212 struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit);
213 int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
214 void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
215 int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms);
216 void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch);
217 
218 extern struct platform_driver etnaviv_gpu_driver;
219 
220 #endif /* __ETNAVIV_GPU_H__ */
221