xref: /openbmc/linux/drivers/gpu/drm/msm/adreno/a3xx_gpu.c (revision b34081f1)
1 /*
2  * Copyright (C) 2013 Red Hat
3  * Author: Rob Clark <robdclark@gmail.com>
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 as published by
7  * the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17 
18 #include "a3xx_gpu.h"
19 
20 #define A3XX_INT0_MASK \
21 	(A3XX_INT0_RBBM_AHB_ERROR |        \
22 	 A3XX_INT0_RBBM_ATB_BUS_OVERFLOW | \
23 	 A3XX_INT0_CP_T0_PACKET_IN_IB |    \
24 	 A3XX_INT0_CP_OPCODE_ERROR |       \
25 	 A3XX_INT0_CP_RESERVED_BIT_ERROR | \
26 	 A3XX_INT0_CP_HW_FAULT |           \
27 	 A3XX_INT0_CP_IB1_INT |            \
28 	 A3XX_INT0_CP_IB2_INT |            \
29 	 A3XX_INT0_CP_RB_INT |             \
30 	 A3XX_INT0_CP_REG_PROTECT_FAULT |  \
31 	 A3XX_INT0_CP_AHB_ERROR_HALT |     \
32 	 A3XX_INT0_UCHE_OOB_ACCESS)
33 
34 static struct platform_device *a3xx_pdev;
35 
36 static void a3xx_me_init(struct msm_gpu *gpu)
37 {
38 	struct msm_ringbuffer *ring = gpu->rb;
39 
40 	OUT_PKT3(ring, CP_ME_INIT, 17);
41 	OUT_RING(ring, 0x000003f7);
42 	OUT_RING(ring, 0x00000000);
43 	OUT_RING(ring, 0x00000000);
44 	OUT_RING(ring, 0x00000000);
45 	OUT_RING(ring, 0x00000080);
46 	OUT_RING(ring, 0x00000100);
47 	OUT_RING(ring, 0x00000180);
48 	OUT_RING(ring, 0x00006600);
49 	OUT_RING(ring, 0x00000150);
50 	OUT_RING(ring, 0x0000014e);
51 	OUT_RING(ring, 0x00000154);
52 	OUT_RING(ring, 0x00000001);
53 	OUT_RING(ring, 0x00000000);
54 	OUT_RING(ring, 0x00000000);
55 	OUT_RING(ring, 0x00000000);
56 	OUT_RING(ring, 0x00000000);
57 	OUT_RING(ring, 0x00000000);
58 
59 	gpu->funcs->flush(gpu);
60 	gpu->funcs->idle(gpu);
61 }
62 
63 static int a3xx_hw_init(struct msm_gpu *gpu)
64 {
65 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
66 	uint32_t *ptr, len;
67 	int i, ret;
68 
69 	DBG("%s", gpu->name);
70 
71 	if (adreno_is_a305(adreno_gpu)) {
72 		/* Set up 16 deep read/write request queues: */
73 		gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
74 		gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
75 		gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
76 		gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
77 		gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
78 		gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
79 		gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
80 		/* Enable WR-REQ: */
81 		gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff);
82 		/* Set up round robin arbitration between both AXI ports: */
83 		gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
84 		/* Set up AOOO: */
85 		gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
86 		gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
87 
88 	} else if (adreno_is_a320(adreno_gpu)) {
89 		/* Set up 16 deep read/write request queues: */
90 		gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
91 		gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
92 		gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
93 		gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
94 		gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
95 		gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
96 		gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
97 		/* Enable WR-REQ: */
98 		gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff);
99 		/* Set up round robin arbitration between both AXI ports: */
100 		gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
101 		/* Set up AOOO: */
102 		gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
103 		gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
104 		/* Enable 1K sort: */
105 		gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x000000ff);
106 		gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
107 
108 	} else if (adreno_is_a330(adreno_gpu)) {
109 		/* Set up 16 deep read/write request queues: */
110 		gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
111 		gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x18181818);
112 		gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x18181818);
113 		gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x18181818);
114 		gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
115 		gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
116 		gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x18181818);
117 		/* Enable WR-REQ: */
118 		gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f);
119 		/* Set up round robin arbitration between both AXI ports: */
120 		gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
121 		/* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
122 		gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001);
123 		/* Set up AOOO: */
124 		gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000ffff);
125 		gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0xffffffff);
126 		/* Enable 1K sort: */
127 		gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001ffff);
128 		gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
129 		/* Disable VBIF clock gating. This is to enable AXI running
130 		 * higher frequency than GPU:
131 		 */
132 		gpu_write(gpu, REG_A3XX_VBIF_CLKON, 0x00000001);
133 
134 	} else {
135 		BUG();
136 	}
137 
138 	/* Make all blocks contribute to the GPU BUSY perf counter: */
139 	gpu_write(gpu, REG_A3XX_RBBM_GPU_BUSY_MASKED, 0xffffffff);
140 
141 	/* Tune the hystersis counters for SP and CP idle detection: */
142 	gpu_write(gpu, REG_A3XX_RBBM_SP_HYST_CNT, 0x10);
143 	gpu_write(gpu, REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
144 
145 	/* Enable the RBBM error reporting bits.  This lets us get
146 	 * useful information on failure:
147 	 */
148 	gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL0, 0x00000001);
149 
150 	/* Enable AHB error reporting: */
151 	gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL1, 0xa6ffffff);
152 
153 	/* Turn on the power counters: */
154 	gpu_write(gpu, REG_A3XX_RBBM_RBBM_CTL, 0x00030000);
155 
156 	/* Turn on hang detection - this spews a lot of useful information
157 	 * into the RBBM registers on a hang:
158 	 */
159 	gpu_write(gpu, REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL, 0x00010fff);
160 
161 	/* Enable 64-byte cacheline size. HW Default is 32-byte (0x000000E0): */
162 	gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001);
163 
164 	/* Enable Clock gating: */
165 	gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff);
166 
167 	/* Set the OCMEM base address for A330 */
168 //TODO:
169 //	if (adreno_is_a330(adreno_gpu)) {
170 //		gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR,
171 //			(unsigned int)(a3xx_gpu->ocmem_base >> 14));
172 //	}
173 
174 	/* Turn on performance counters: */
175 	gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01);
176 
177 	/* Set SP perfcounter 7 to count SP_FS_FULL_ALU_INSTRUCTIONS
178 	 * we will use this to augment our hang detection:
179 	 */
180 	gpu_write(gpu, REG_A3XX_SP_PERFCOUNTER7_SELECT,
181 			SP_FS_FULL_ALU_INSTRUCTIONS);
182 
183 	gpu_write(gpu, REG_A3XX_RBBM_INT_0_MASK, A3XX_INT0_MASK);
184 
185 	ret = adreno_hw_init(gpu);
186 	if (ret)
187 		return ret;
188 
189 	/* setup access protection: */
190 	gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007);
191 
192 	/* RBBM registers */
193 	gpu_write(gpu, REG_A3XX_CP_PROTECT(0), 0x63000040);
194 	gpu_write(gpu, REG_A3XX_CP_PROTECT(1), 0x62000080);
195 	gpu_write(gpu, REG_A3XX_CP_PROTECT(2), 0x600000cc);
196 	gpu_write(gpu, REG_A3XX_CP_PROTECT(3), 0x60000108);
197 	gpu_write(gpu, REG_A3XX_CP_PROTECT(4), 0x64000140);
198 	gpu_write(gpu, REG_A3XX_CP_PROTECT(5), 0x66000400);
199 
200 	/* CP registers */
201 	gpu_write(gpu, REG_A3XX_CP_PROTECT(6), 0x65000700);
202 	gpu_write(gpu, REG_A3XX_CP_PROTECT(7), 0x610007d8);
203 	gpu_write(gpu, REG_A3XX_CP_PROTECT(8), 0x620007e0);
204 	gpu_write(gpu, REG_A3XX_CP_PROTECT(9), 0x61001178);
205 	gpu_write(gpu, REG_A3XX_CP_PROTECT(10), 0x64001180);
206 
207 	/* RB registers */
208 	gpu_write(gpu, REG_A3XX_CP_PROTECT(11), 0x60003300);
209 
210 	/* VBIF registers */
211 	gpu_write(gpu, REG_A3XX_CP_PROTECT(12), 0x6b00c000);
212 
213 	/* NOTE: PM4/micro-engine firmware registers look to be the same
214 	 * for a2xx and a3xx.. we could possibly push that part down to
215 	 * adreno_gpu base class.  Or push both PM4 and PFP but
216 	 * parameterize the pfp ucode addr/data registers..
217 	 */
218 
219 	/* Load PM4: */
220 	ptr = (uint32_t *)(adreno_gpu->pm4->data);
221 	len = adreno_gpu->pm4->size / 4;
222 	DBG("loading PM4 ucode version: %u", ptr[0]);
223 
224 	gpu_write(gpu, REG_AXXX_CP_DEBUG,
225 			AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE |
226 			AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE);
227 	gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0);
228 	for (i = 1; i < len; i++)
229 		gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]);
230 
231 	/* Load PFP: */
232 	ptr = (uint32_t *)(adreno_gpu->pfp->data);
233 	len = adreno_gpu->pfp->size / 4;
234 	DBG("loading PFP ucode version: %u", ptr[0]);
235 
236 	gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0);
237 	for (i = 1; i < len; i++)
238 		gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]);
239 
240 	/* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
241 	if (adreno_is_a305(adreno_gpu) || adreno_is_a320(adreno_gpu))
242 		gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS,
243 				AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) |
244 				AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) |
245 				AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(14));
246 
247 
248 	/* clear ME_HALT to start micro engine */
249 	gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
250 
251 	a3xx_me_init(gpu);
252 
253 	return 0;
254 }
255 
256 static void a3xx_destroy(struct msm_gpu *gpu)
257 {
258 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
259 	struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu);
260 
261 	DBG("%s", gpu->name);
262 
263 	adreno_gpu_cleanup(adreno_gpu);
264 	put_device(&a3xx_gpu->pdev->dev);
265 	kfree(a3xx_gpu);
266 }
267 
268 static void a3xx_idle(struct msm_gpu *gpu)
269 {
270 	unsigned long t;
271 
272 	/* wait for ringbuffer to drain: */
273 	adreno_idle(gpu);
274 
275 	t = jiffies + ADRENO_IDLE_TIMEOUT;
276 
277 	/* then wait for GPU to finish: */
278 	do {
279 		uint32_t rbbm_status = gpu_read(gpu, REG_A3XX_RBBM_STATUS);
280 		if (!(rbbm_status & A3XX_RBBM_STATUS_GPU_BUSY))
281 			return;
282 	} while(time_before(jiffies, t));
283 
284 	DRM_ERROR("timeout waiting for %s to idle!\n", gpu->name);
285 
286 	/* TODO maybe we need to reset GPU here to recover from hang? */
287 }
288 
289 static irqreturn_t a3xx_irq(struct msm_gpu *gpu)
290 {
291 	uint32_t status;
292 
293 	status = gpu_read(gpu, REG_A3XX_RBBM_INT_0_STATUS);
294 	DBG("%s: %08x", gpu->name, status);
295 
296 	// TODO
297 
298 	gpu_write(gpu, REG_A3XX_RBBM_INT_CLEAR_CMD, status);
299 
300 	msm_gpu_retire(gpu);
301 
302 	return IRQ_HANDLED;
303 }
304 
305 #ifdef CONFIG_DEBUG_FS
306 static const unsigned int a3xx_registers[] = {
307 	0x0000, 0x0002, 0x0010, 0x0012, 0x0018, 0x0018, 0x0020, 0x0027,
308 	0x0029, 0x002b, 0x002e, 0x0033, 0x0040, 0x0042, 0x0050, 0x005c,
309 	0x0060, 0x006c, 0x0080, 0x0082, 0x0084, 0x0088, 0x0090, 0x00e5,
310 	0x00ea, 0x00ed, 0x0100, 0x0100, 0x0110, 0x0123, 0x01c0, 0x01c1,
311 	0x01c3, 0x01c5, 0x01c7, 0x01c7, 0x01d5, 0x01d9, 0x01dc, 0x01dd,
312 	0x01ea, 0x01ea, 0x01ee, 0x01f1, 0x01f5, 0x01f5, 0x01fc, 0x01ff,
313 	0x0440, 0x0440, 0x0443, 0x0443, 0x0445, 0x0445, 0x044d, 0x044f,
314 	0x0452, 0x0452, 0x0454, 0x046f, 0x047c, 0x047c, 0x047f, 0x047f,
315 	0x0578, 0x057f, 0x0600, 0x0602, 0x0605, 0x0607, 0x060a, 0x060e,
316 	0x0612, 0x0614, 0x0c01, 0x0c02, 0x0c06, 0x0c1d, 0x0c3d, 0x0c3f,
317 	0x0c48, 0x0c4b, 0x0c80, 0x0c80, 0x0c88, 0x0c8b, 0x0ca0, 0x0cb7,
318 	0x0cc0, 0x0cc1, 0x0cc6, 0x0cc7, 0x0ce4, 0x0ce5, 0x0e00, 0x0e05,
319 	0x0e0c, 0x0e0c, 0x0e22, 0x0e23, 0x0e41, 0x0e45, 0x0e64, 0x0e65,
320 	0x0e80, 0x0e82, 0x0e84, 0x0e89, 0x0ea0, 0x0ea1, 0x0ea4, 0x0ea7,
321 	0x0ec4, 0x0ecb, 0x0ee0, 0x0ee0, 0x0f00, 0x0f01, 0x0f03, 0x0f09,
322 	0x2040, 0x2040, 0x2044, 0x2044, 0x2048, 0x204d, 0x2068, 0x2069,
323 	0x206c, 0x206d, 0x2070, 0x2070, 0x2072, 0x2072, 0x2074, 0x2075,
324 	0x2079, 0x207a, 0x20c0, 0x20d3, 0x20e4, 0x20ef, 0x2100, 0x2109,
325 	0x210c, 0x210c, 0x210e, 0x210e, 0x2110, 0x2111, 0x2114, 0x2115,
326 	0x21e4, 0x21e4, 0x21ea, 0x21ea, 0x21ec, 0x21ed, 0x21f0, 0x21f0,
327 	0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a, 0x2240, 0x227e,
328 	0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8,
329 	0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7,
330 	0x22ff, 0x22ff, 0x2340, 0x2343, 0x2348, 0x2349, 0x2350, 0x2356,
331 	0x2360, 0x2360, 0x2440, 0x2440, 0x2444, 0x2444, 0x2448, 0x244d,
332 	0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470, 0x2472, 0x2472,
333 	0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3, 0x24e4, 0x24ef,
334 	0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e, 0x2510, 0x2511,
335 	0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea, 0x25ec, 0x25ed,
336 	0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617, 0x261a, 0x261a,
337 	0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0, 0x26c4, 0x26ce,
338 	0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9, 0x26ec, 0x26ec,
339 	0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743, 0x2748, 0x2749,
340 	0x2750, 0x2756, 0x2760, 0x2760, 0x300c, 0x300e, 0x301c, 0x301d,
341 	0x302a, 0x302a, 0x302c, 0x302d, 0x3030, 0x3031, 0x3034, 0x3036,
342 	0x303c, 0x303c, 0x305e, 0x305f,
343 };
344 
345 static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
346 {
347 	int i;
348 
349 	adreno_show(gpu, m);
350 	seq_printf(m, "status:   %08x\n",
351 			gpu_read(gpu, REG_A3XX_RBBM_STATUS));
352 
353 	/* dump these out in a form that can be parsed by demsm: */
354 	seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
355 	for (i = 0; i < ARRAY_SIZE(a3xx_registers); i += 2) {
356 		uint32_t start = a3xx_registers[i];
357 		uint32_t end   = a3xx_registers[i+1];
358 		uint32_t addr;
359 
360 		for (addr = start; addr <= end; addr++) {
361 			uint32_t val = gpu_read(gpu, addr);
362 			seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
363 		}
364 	}
365 }
366 #endif
367 
368 static const struct adreno_gpu_funcs funcs = {
369 	.base = {
370 		.get_param = adreno_get_param,
371 		.hw_init = a3xx_hw_init,
372 		.pm_suspend = msm_gpu_pm_suspend,
373 		.pm_resume = msm_gpu_pm_resume,
374 		.recover = adreno_recover,
375 		.last_fence = adreno_last_fence,
376 		.submit = adreno_submit,
377 		.flush = adreno_flush,
378 		.idle = a3xx_idle,
379 		.irq = a3xx_irq,
380 		.destroy = a3xx_destroy,
381 #ifdef CONFIG_DEBUG_FS
382 		.show = a3xx_show,
383 #endif
384 	},
385 };
386 
387 struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
388 {
389 	struct a3xx_gpu *a3xx_gpu = NULL;
390 	struct msm_gpu *gpu;
391 	struct platform_device *pdev = a3xx_pdev;
392 	struct adreno_platform_config *config;
393 	int ret;
394 
395 	if (!pdev) {
396 		dev_err(dev->dev, "no a3xx device\n");
397 		ret = -ENXIO;
398 		goto fail;
399 	}
400 
401 	config = pdev->dev.platform_data;
402 
403 	a3xx_gpu = kzalloc(sizeof(*a3xx_gpu), GFP_KERNEL);
404 	if (!a3xx_gpu) {
405 		ret = -ENOMEM;
406 		goto fail;
407 	}
408 
409 	gpu = &a3xx_gpu->base.base;
410 
411 	get_device(&pdev->dev);
412 	a3xx_gpu->pdev = pdev;
413 
414 	gpu->fast_rate = config->fast_rate;
415 	gpu->slow_rate = config->slow_rate;
416 	gpu->bus_freq  = config->bus_freq;
417 
418 	DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
419 			gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
420 
421 	ret = adreno_gpu_init(dev, pdev, &a3xx_gpu->base,
422 			&funcs, config->rev);
423 	if (ret)
424 		goto fail;
425 
426 	return &a3xx_gpu->base.base;
427 
428 fail:
429 	if (a3xx_gpu)
430 		a3xx_destroy(&a3xx_gpu->base.base);
431 
432 	return ERR_PTR(ret);
433 }
434 
435 /*
436  * The a3xx device:
437  */
438 
439 static int a3xx_probe(struct platform_device *pdev)
440 {
441 	static struct adreno_platform_config config = {};
442 #ifdef CONFIG_OF
443 	/* TODO */
444 #else
445 	uint32_t version = socinfo_get_version();
446 	if (cpu_is_apq8064ab()) {
447 		config.fast_rate = 450000000;
448 		config.slow_rate = 27000000;
449 		config.bus_freq  = 4;
450 		config.rev = ADRENO_REV(3, 2, 1, 0);
451 	} else if (cpu_is_apq8064() || cpu_is_msm8960ab()) {
452 		config.fast_rate = 400000000;
453 		config.slow_rate = 27000000;
454 		config.bus_freq  = 4;
455 
456 		if (SOCINFO_VERSION_MAJOR(version) == 2)
457 			config.rev = ADRENO_REV(3, 2, 0, 2);
458 		else if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
459 				(SOCINFO_VERSION_MINOR(version) == 1))
460 			config.rev = ADRENO_REV(3, 2, 0, 1);
461 		else
462 			config.rev = ADRENO_REV(3, 2, 0, 0);
463 
464 	} else if (cpu_is_msm8930()) {
465 		config.fast_rate = 400000000;
466 		config.slow_rate = 27000000;
467 		config.bus_freq  = 3;
468 
469 		if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
470 			(SOCINFO_VERSION_MINOR(version) == 2))
471 			config.rev = ADRENO_REV(3, 0, 5, 2);
472 		else
473 			config.rev = ADRENO_REV(3, 0, 5, 0);
474 
475 	}
476 #endif
477 	pdev->dev.platform_data = &config;
478 	a3xx_pdev = pdev;
479 	return 0;
480 }
481 
482 static int a3xx_remove(struct platform_device *pdev)
483 {
484 	a3xx_pdev = NULL;
485 	return 0;
486 }
487 
488 static struct platform_driver a3xx_driver = {
489 	.probe = a3xx_probe,
490 	.remove = a3xx_remove,
491 	.driver.name = "kgsl-3d0",
492 };
493 
494 void __init a3xx_register(void)
495 {
496 	platform_driver_register(&a3xx_driver);
497 }
498 
499 void __exit a3xx_unregister(void)
500 {
501 	platform_driver_unregister(&a3xx_driver);
502 }
503