xref: /openbmc/linux/drivers/gpu/drm/msm/adreno/a4xx_gpu.c (revision 3f7759e7)
197fb5e8dSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
223bd62fdSAravind Ganesan /* Copyright (c) 2014 The Linux Foundation. All rights reserved.
323bd62fdSAravind Ganesan  */
423bd62fdSAravind Ganesan #include "a4xx_gpu.h"
523bd62fdSAravind Ganesan 
623bd62fdSAravind Ganesan #define A4XX_INT0_MASK \
723bd62fdSAravind Ganesan 	(A4XX_INT0_RBBM_AHB_ERROR |        \
823bd62fdSAravind Ganesan 	 A4XX_INT0_RBBM_ATB_BUS_OVERFLOW | \
923bd62fdSAravind Ganesan 	 A4XX_INT0_CP_T0_PACKET_IN_IB |    \
1023bd62fdSAravind Ganesan 	 A4XX_INT0_CP_OPCODE_ERROR |       \
1123bd62fdSAravind Ganesan 	 A4XX_INT0_CP_RESERVED_BIT_ERROR | \
1223bd62fdSAravind Ganesan 	 A4XX_INT0_CP_HW_FAULT |           \
1323bd62fdSAravind Ganesan 	 A4XX_INT0_CP_IB1_INT |            \
1423bd62fdSAravind Ganesan 	 A4XX_INT0_CP_IB2_INT |            \
1523bd62fdSAravind Ganesan 	 A4XX_INT0_CP_RB_INT |             \
1623bd62fdSAravind Ganesan 	 A4XX_INT0_CP_REG_PROTECT_FAULT |  \
1723bd62fdSAravind Ganesan 	 A4XX_INT0_CP_AHB_ERROR_HALT |     \
1879d57bf6SBjorn Andersson 	 A4XX_INT0_CACHE_FLUSH_TS |        \
1923bd62fdSAravind Ganesan 	 A4XX_INT0_UCHE_OOB_ACCESS)
2023bd62fdSAravind Ganesan 
2123bd62fdSAravind Ganesan extern bool hang_debug;
2223bd62fdSAravind Ganesan static void a4xx_dump(struct msm_gpu *gpu);
23e895c7bdSJordan Crouse static bool a4xx_idle(struct msm_gpu *gpu);
2423bd62fdSAravind Ganesan 
252fb7487aSJordan Crouse static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
262fb7487aSJordan Crouse {
272fb7487aSJordan Crouse 	struct msm_drm_private *priv = gpu->dev->dev_private;
282fb7487aSJordan Crouse 	struct msm_ringbuffer *ring = submit->ring;
292fb7487aSJordan Crouse 	unsigned int i;
302fb7487aSJordan Crouse 
312fb7487aSJordan Crouse 	for (i = 0; i < submit->nr_cmds; i++) {
322fb7487aSJordan Crouse 		switch (submit->cmd[i].type) {
332fb7487aSJordan Crouse 		case MSM_SUBMIT_CMD_IB_TARGET_BUF:
342fb7487aSJordan Crouse 			/* ignore IB-targets */
352fb7487aSJordan Crouse 			break;
362fb7487aSJordan Crouse 		case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
372fb7487aSJordan Crouse 			/* ignore if there has not been a ctx switch: */
382fb7487aSJordan Crouse 			if (priv->lastctx == submit->queue->ctx)
392fb7487aSJordan Crouse 				break;
402fb7487aSJordan Crouse 			fallthrough;
412fb7487aSJordan Crouse 		case MSM_SUBMIT_CMD_BUF:
422fb7487aSJordan Crouse 			OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFE, 2);
432fb7487aSJordan Crouse 			OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
442fb7487aSJordan Crouse 			OUT_RING(ring, submit->cmd[i].size);
452fb7487aSJordan Crouse 			OUT_PKT2(ring);
462fb7487aSJordan Crouse 			break;
472fb7487aSJordan Crouse 		}
482fb7487aSJordan Crouse 	}
492fb7487aSJordan Crouse 
502fb7487aSJordan Crouse 	OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
512fb7487aSJordan Crouse 	OUT_RING(ring, submit->seqno);
522fb7487aSJordan Crouse 
532fb7487aSJordan Crouse 	/* Flush HLSQ lazy updates to make sure there is nothing
542fb7487aSJordan Crouse 	 * pending for indirect loads after the timestamp has
552fb7487aSJordan Crouse 	 * passed:
562fb7487aSJordan Crouse 	 */
572fb7487aSJordan Crouse 	OUT_PKT3(ring, CP_EVENT_WRITE, 1);
582fb7487aSJordan Crouse 	OUT_RING(ring, HLSQ_FLUSH);
592fb7487aSJordan Crouse 
602fb7487aSJordan Crouse 	/* wait for idle before cache flush/interrupt */
612fb7487aSJordan Crouse 	OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
622fb7487aSJordan Crouse 	OUT_RING(ring, 0x00000000);
632fb7487aSJordan Crouse 
642fb7487aSJordan Crouse 	/* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
652fb7487aSJordan Crouse 	OUT_PKT3(ring, CP_EVENT_WRITE, 3);
662fb7487aSJordan Crouse 	OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
672fb7487aSJordan Crouse 	OUT_RING(ring, rbmemptr(ring, fence));
682fb7487aSJordan Crouse 	OUT_RING(ring, submit->seqno);
692fb7487aSJordan Crouse 
702fb7487aSJordan Crouse 	adreno_flush(gpu, ring, REG_A4XX_CP_RB_WPTR);
712fb7487aSJordan Crouse }
722fb7487aSJordan Crouse 
7323bd62fdSAravind Ganesan /*
7423bd62fdSAravind Ganesan  * a4xx_enable_hwcg() - Program the clock control registers
7523bd62fdSAravind Ganesan  * @device: The adreno device pointer
7623bd62fdSAravind Ganesan  */
7723bd62fdSAravind Ganesan static void a4xx_enable_hwcg(struct msm_gpu *gpu)
7823bd62fdSAravind Ganesan {
7923bd62fdSAravind Ganesan 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
8023bd62fdSAravind Ganesan 	unsigned int i;
8123bd62fdSAravind Ganesan 	for (i = 0; i < 4; i++)
8223bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TP(i), 0x02222202);
8323bd62fdSAravind Ganesan 	for (i = 0; i < 4; i++)
8423bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_TP(i), 0x00002222);
8523bd62fdSAravind Ganesan 	for (i = 0; i < 4; i++)
8623bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TP(i), 0x0E739CE7);
8723bd62fdSAravind Ganesan 	for (i = 0; i < 4; i++)
8823bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TP(i), 0x00111111);
8923bd62fdSAravind Ganesan 	for (i = 0; i < 4; i++)
9023bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_SP(i), 0x22222222);
9123bd62fdSAravind Ganesan 	for (i = 0; i < 4; i++)
9223bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_SP(i), 0x00222222);
9323bd62fdSAravind Ganesan 	for (i = 0; i < 4; i++)
9423bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_SP(i), 0x00000104);
9523bd62fdSAravind Ganesan 	for (i = 0; i < 4; i++)
9623bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_SP(i), 0x00000081);
9723bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_UCHE, 0x22222222);
9823bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_UCHE, 0x02222222);
9923bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL3_UCHE, 0x00000000);
10023bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL4_UCHE, 0x00000000);
10123bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_UCHE, 0x00004444);
10223bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_UCHE, 0x00001112);
10323bd62fdSAravind Ganesan 	for (i = 0; i < 4; i++)
10423bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_RB(i), 0x22222222);
10523bd62fdSAravind Ganesan 
10623bd62fdSAravind Ganesan 	/* Disable L1 clocking in A420 due to CCU issues with it */
10723bd62fdSAravind Ganesan 	for (i = 0; i < 4; i++) {
10823bd62fdSAravind Ganesan 		if (adreno_is_a420(adreno_gpu)) {
10923bd62fdSAravind Ganesan 			gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_RB(i),
11023bd62fdSAravind Ganesan 					0x00002020);
11123bd62fdSAravind Ganesan 		} else {
11223bd62fdSAravind Ganesan 			gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_RB(i),
11323bd62fdSAravind Ganesan 					0x00022020);
11423bd62fdSAravind Ganesan 		}
11523bd62fdSAravind Ganesan 	}
11623bd62fdSAravind Ganesan 
117dc0fa5ebSShawn Guo 	/* No CCU for A405 */
118dc0fa5ebSShawn Guo 	if (!adreno_is_a405(adreno_gpu)) {
11923bd62fdSAravind Ganesan 		for (i = 0; i < 4; i++) {
12023bd62fdSAravind Ganesan 			gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(i),
12123bd62fdSAravind Ganesan 					0x00000922);
12223bd62fdSAravind Ganesan 		}
12323bd62fdSAravind Ganesan 
12423bd62fdSAravind Ganesan 		for (i = 0; i < 4; i++) {
12523bd62fdSAravind Ganesan 			gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(i),
12623bd62fdSAravind Ganesan 					0x00000000);
12723bd62fdSAravind Ganesan 		}
12823bd62fdSAravind Ganesan 
12923bd62fdSAravind Ganesan 		for (i = 0; i < 4; i++) {
13023bd62fdSAravind Ganesan 			gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(i),
13123bd62fdSAravind Ganesan 					0x00000001);
13223bd62fdSAravind Ganesan 		}
133dc0fa5ebSShawn Guo 	}
13423bd62fdSAravind Ganesan 
13523bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_MODE_GPC, 0x02222222);
13623bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_GPC, 0x04100104);
13723bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_GPC, 0x00022222);
13823bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_COM_DCOM, 0x00000022);
13923bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_COM_DCOM, 0x0000010F);
14023bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_COM_DCOM, 0x00000022);
14123bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TSE_RAS_RBBM, 0x00222222);
14223bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00004104);
14323bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00000222);
14423bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_HLSQ , 0x00000000);
14523bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000);
146357ff00bSCraig Stout 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00220000);
147357ff00bSCraig Stout 	/* Early A430's have a timing issue with SP/TP power collapse;
148357ff00bSCraig Stout 	   disabling HW clock gating prevents it. */
149357ff00bSCraig Stout 	if (adreno_is_a430(adreno_gpu) && adreno_gpu->rev.patchid < 2)
150357ff00bSCraig Stout 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0);
151357ff00bSCraig Stout 	else
15223bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA);
15323bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2, 0);
15423bd62fdSAravind Ganesan }
15523bd62fdSAravind Ganesan 
156357ff00bSCraig Stout 
157c4a8d475SJordan Crouse static bool a4xx_me_init(struct msm_gpu *gpu)
15823bd62fdSAravind Ganesan {
159f97decacSJordan Crouse 	struct msm_ringbuffer *ring = gpu->rb[0];
16023bd62fdSAravind Ganesan 
16123bd62fdSAravind Ganesan 	OUT_PKT3(ring, CP_ME_INIT, 17);
16223bd62fdSAravind Ganesan 	OUT_RING(ring, 0x000003f7);
16323bd62fdSAravind Ganesan 	OUT_RING(ring, 0x00000000);
16423bd62fdSAravind Ganesan 	OUT_RING(ring, 0x00000000);
16523bd62fdSAravind Ganesan 	OUT_RING(ring, 0x00000000);
16623bd62fdSAravind Ganesan 	OUT_RING(ring, 0x00000080);
16723bd62fdSAravind Ganesan 	OUT_RING(ring, 0x00000100);
16823bd62fdSAravind Ganesan 	OUT_RING(ring, 0x00000180);
16923bd62fdSAravind Ganesan 	OUT_RING(ring, 0x00006600);
17023bd62fdSAravind Ganesan 	OUT_RING(ring, 0x00000150);
17123bd62fdSAravind Ganesan 	OUT_RING(ring, 0x0000014e);
17223bd62fdSAravind Ganesan 	OUT_RING(ring, 0x00000154);
17323bd62fdSAravind Ganesan 	OUT_RING(ring, 0x00000001);
17423bd62fdSAravind Ganesan 	OUT_RING(ring, 0x00000000);
17523bd62fdSAravind Ganesan 	OUT_RING(ring, 0x00000000);
17623bd62fdSAravind Ganesan 	OUT_RING(ring, 0x00000000);
17723bd62fdSAravind Ganesan 	OUT_RING(ring, 0x00000000);
17823bd62fdSAravind Ganesan 	OUT_RING(ring, 0x00000000);
17923bd62fdSAravind Ganesan 
1802fb7487aSJordan Crouse 	adreno_flush(gpu, ring, REG_A4XX_CP_RB_WPTR);
181e895c7bdSJordan Crouse 	return a4xx_idle(gpu);
18223bd62fdSAravind Ganesan }
18323bd62fdSAravind Ganesan 
18423bd62fdSAravind Ganesan static int a4xx_hw_init(struct msm_gpu *gpu)
18523bd62fdSAravind Ganesan {
18623bd62fdSAravind Ganesan 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
18723bd62fdSAravind Ganesan 	struct a4xx_gpu *a4xx_gpu = to_a4xx_gpu(adreno_gpu);
18823bd62fdSAravind Ganesan 	uint32_t *ptr, len;
18923bd62fdSAravind Ganesan 	int i, ret;
19023bd62fdSAravind Ganesan 
191dc0fa5ebSShawn Guo 	if (adreno_is_a405(adreno_gpu)) {
192dc0fa5ebSShawn Guo 		gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
193dc0fa5ebSShawn Guo 	} else if (adreno_is_a420(adreno_gpu)) {
19423bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT, 0x0001001F);
19523bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4);
19623bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
19723bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
19823bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018);
19923bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
20023bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
20123bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
202357ff00bSCraig Stout 	} else if (adreno_is_a430(adreno_gpu)) {
203357ff00bSCraig Stout 		gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
204357ff00bSCraig Stout 		gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
205357ff00bSCraig Stout 		gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018);
206357ff00bSCraig Stout 		gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
207357ff00bSCraig Stout 		gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
208357ff00bSCraig Stout 		gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
20923bd62fdSAravind Ganesan 	} else {
21023bd62fdSAravind Ganesan 		BUG();
21123bd62fdSAravind Ganesan 	}
21223bd62fdSAravind Ganesan 
21323bd62fdSAravind Ganesan 	/* Make all blocks contribute to the GPU BUSY perf counter */
21423bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_GPU_BUSY_MASKED, 0xffffffff);
21523bd62fdSAravind Ganesan 
21623bd62fdSAravind Ganesan 	/* Tune the hystersis counters for SP and CP idle detection */
21723bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_SP_HYST_CNT, 0x10);
21823bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
21923bd62fdSAravind Ganesan 
220357ff00bSCraig Stout 	if (adreno_is_a430(adreno_gpu)) {
221357ff00bSCraig Stout 		gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL2, 0x30);
222357ff00bSCraig Stout 	}
223357ff00bSCraig Stout 
22423bd62fdSAravind Ganesan 	 /* Enable the RBBM error reporting bits */
22523bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL0, 0x00000001);
22623bd62fdSAravind Ganesan 
22723bd62fdSAravind Ganesan 	/* Enable AHB error reporting*/
22823bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL1, 0xa6ffffff);
22923bd62fdSAravind Ganesan 
23023bd62fdSAravind Ganesan 	/* Enable power counters*/
23123bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_RBBM_CTL, 0x00000030);
23223bd62fdSAravind Ganesan 
23323bd62fdSAravind Ganesan 	/*
23423bd62fdSAravind Ganesan 	 * Turn on hang detection - this spews a lot of useful information
23523bd62fdSAravind Ganesan 	 * into the RBBM registers on a hang:
23623bd62fdSAravind Ganesan 	 */
23723bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_INTERFACE_HANG_INT_CTL,
23823bd62fdSAravind Ganesan 			(1 << 30) | 0xFFFF);
23923bd62fdSAravind Ganesan 
24023bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RB_GMEM_BASE_ADDR,
24126c0b26dSBrian Masney 			(unsigned int)(a4xx_gpu->ocmem.base >> 14));
24223bd62fdSAravind Ganesan 
24323bd62fdSAravind Ganesan 	/* Turn on performance counters: */
24423bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_PERFCTR_CTL, 0x01);
24523bd62fdSAravind Ganesan 
2466c77d1abSRob Clark 	/* use the first CP counter for timestamp queries.. userspace may set
2476c77d1abSRob Clark 	 * this as well but it selects the same counter/countable:
2486c77d1abSRob Clark 	 */
2496c77d1abSRob Clark 	gpu_write(gpu, REG_A4XX_CP_PERFCTR_CP_SEL_0, CP_ALWAYS_COUNT);
2506c77d1abSRob Clark 
251357ff00bSCraig Stout 	if (adreno_is_a430(adreno_gpu))
252357ff00bSCraig Stout 		gpu_write(gpu, REG_A4XX_UCHE_CACHE_WAYS_VFD, 0x07);
253357ff00bSCraig Stout 
25423bd62fdSAravind Ganesan 	/* Disable L2 bypass to avoid UCHE out of bounds errors */
25523bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, 0xffff0000);
25623bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, 0xffff0000);
25723bd62fdSAravind Ganesan 
25823bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_DEBUG, (1 << 25) |
25923bd62fdSAravind Ganesan 			(adreno_is_a420(adreno_gpu) ? (1 << 29) : 0));
26023bd62fdSAravind Ganesan 
261357ff00bSCraig Stout 	/* On A430 enable SP regfile sleep for power savings */
262357ff00bSCraig Stout 	/* TODO downstream does this for !420, so maybe applies for 405 too? */
263357ff00bSCraig Stout 	if (!adreno_is_a420(adreno_gpu)) {
264357ff00bSCraig Stout 		gpu_write(gpu, REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_0,
265357ff00bSCraig Stout 			0x00000441);
266357ff00bSCraig Stout 		gpu_write(gpu, REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1,
267357ff00bSCraig Stout 			0x00000441);
268357ff00bSCraig Stout 	}
269357ff00bSCraig Stout 
27023bd62fdSAravind Ganesan 	a4xx_enable_hwcg(gpu);
27123bd62fdSAravind Ganesan 
27223bd62fdSAravind Ganesan 	/*
27323bd62fdSAravind Ganesan 	 * For A420 set RBBM_CLOCK_DELAY_HLSQ.CGC_HLSQ_TP_EARLY_CYC >= 2
27423bd62fdSAravind Ganesan 	 * due to timing issue with HLSQ_TP_CLK_EN
27523bd62fdSAravind Ganesan 	 */
27623bd62fdSAravind Ganesan 	if (adreno_is_a420(adreno_gpu)) {
27723bd62fdSAravind Ganesan 		unsigned int val;
27823bd62fdSAravind Ganesan 		val = gpu_read(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ);
27923bd62fdSAravind Ganesan 		val &= ~A4XX_CGC_HLSQ_EARLY_CYC__MASK;
28023bd62fdSAravind Ganesan 		val |= 2 << A4XX_CGC_HLSQ_EARLY_CYC__SHIFT;
28123bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, val);
28223bd62fdSAravind Ganesan 	}
28323bd62fdSAravind Ganesan 
28423bd62fdSAravind Ganesan 	/* setup access protection: */
28523bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_PROTECT_CTRL, 0x00000007);
28623bd62fdSAravind Ganesan 
28723bd62fdSAravind Ganesan 	/* RBBM registers */
28823bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_PROTECT(0), 0x62000010);
28923bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_PROTECT(1), 0x63000020);
29023bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_PROTECT(2), 0x64000040);
29123bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_PROTECT(3), 0x65000080);
29223bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_PROTECT(4), 0x66000100);
29323bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_PROTECT(5), 0x64000200);
29423bd62fdSAravind Ganesan 
29523bd62fdSAravind Ganesan 	/* CP registers */
29623bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_PROTECT(6), 0x67000800);
29723bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_PROTECT(7), 0x64001600);
29823bd62fdSAravind Ganesan 
29923bd62fdSAravind Ganesan 
30023bd62fdSAravind Ganesan 	/* RB registers */
30123bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_PROTECT(8), 0x60003300);
30223bd62fdSAravind Ganesan 
30323bd62fdSAravind Ganesan 	/* HLSQ registers */
30423bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_PROTECT(9), 0x60003800);
30523bd62fdSAravind Ganesan 
30623bd62fdSAravind Ganesan 	/* VPC registers */
30723bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_PROTECT(10), 0x61003980);
30823bd62fdSAravind Ganesan 
30923bd62fdSAravind Ganesan 	/* SMMU registers */
31023bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_PROTECT(11), 0x6e010000);
31123bd62fdSAravind Ganesan 
31223bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_INT_0_MASK, A4XX_INT0_MASK);
31323bd62fdSAravind Ganesan 
31423bd62fdSAravind Ganesan 	ret = adreno_hw_init(gpu);
31523bd62fdSAravind Ganesan 	if (ret)
31623bd62fdSAravind Ganesan 		return ret;
31723bd62fdSAravind Ganesan 
318f6828e0cSJordan Crouse 	/*
319f6828e0cSJordan Crouse 	 * Use the default ringbuffer size and block size but disable the RPTR
320f6828e0cSJordan Crouse 	 * shadow
321f6828e0cSJordan Crouse 	 */
322f6828e0cSJordan Crouse 	gpu_write(gpu, REG_A4XX_CP_RB_CNTL,
323f6828e0cSJordan Crouse 		MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
324f6828e0cSJordan Crouse 
325f6828e0cSJordan Crouse 	/* Set the ringbuffer address */
326f6828e0cSJordan Crouse 	gpu_write(gpu, REG_A4XX_CP_RB_BASE, lower_32_bits(gpu->rb[0]->iova));
327f6828e0cSJordan Crouse 
32823bd62fdSAravind Ganesan 	/* Load PM4: */
329c5e3548cSJordan Crouse 	ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
330c5e3548cSJordan Crouse 	len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
33123bd62fdSAravind Ganesan 	DBG("loading PM4 ucode version: %u", ptr[0]);
33223bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_ME_RAM_WADDR, 0);
33323bd62fdSAravind Ganesan 	for (i = 1; i < len; i++)
33423bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_CP_ME_RAM_DATA, ptr[i]);
33523bd62fdSAravind Ganesan 
33623bd62fdSAravind Ganesan 	/* Load PFP: */
337c5e3548cSJordan Crouse 	ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data);
338c5e3548cSJordan Crouse 	len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4;
33923bd62fdSAravind Ganesan 	DBG("loading PFP ucode version: %u", ptr[0]);
34023bd62fdSAravind Ganesan 
34123bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_ADDR, 0);
34223bd62fdSAravind Ganesan 	for (i = 1; i < len; i++)
34323bd62fdSAravind Ganesan 		gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_DATA, ptr[i]);
34423bd62fdSAravind Ganesan 
34523bd62fdSAravind Ganesan 	/* clear ME_HALT to start micro engine */
34623bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_CP_ME_CNTL, 0);
34723bd62fdSAravind Ganesan 
348c4a8d475SJordan Crouse 	return a4xx_me_init(gpu) ? 0 : -EINVAL;
34923bd62fdSAravind Ganesan }
35023bd62fdSAravind Ganesan 
35123bd62fdSAravind Ganesan static void a4xx_recover(struct msm_gpu *gpu)
35223bd62fdSAravind Ganesan {
353398efc46SRob Clark 	int i;
354398efc46SRob Clark 
35526716185SRob Clark 	adreno_dump_info(gpu);
35626716185SRob Clark 
357398efc46SRob Clark 	for (i = 0; i < 8; i++) {
358398efc46SRob Clark 		printk("CP_SCRATCH_REG%d: %u\n", i,
359398efc46SRob Clark 			gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
360398efc46SRob Clark 	}
361398efc46SRob Clark 
36223bd62fdSAravind Ganesan 	/* dump registers before resetting gpu, if enabled: */
36323bd62fdSAravind Ganesan 	if (hang_debug)
36423bd62fdSAravind Ganesan 		a4xx_dump(gpu);
36523bd62fdSAravind Ganesan 
36623bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_SW_RESET_CMD, 1);
36723bd62fdSAravind Ganesan 	gpu_read(gpu, REG_A4XX_RBBM_SW_RESET_CMD);
36823bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_SW_RESET_CMD, 0);
36923bd62fdSAravind Ganesan 	adreno_recover(gpu);
37023bd62fdSAravind Ganesan }
37123bd62fdSAravind Ganesan 
37223bd62fdSAravind Ganesan static void a4xx_destroy(struct msm_gpu *gpu)
37323bd62fdSAravind Ganesan {
37423bd62fdSAravind Ganesan 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
37523bd62fdSAravind Ganesan 	struct a4xx_gpu *a4xx_gpu = to_a4xx_gpu(adreno_gpu);
37623bd62fdSAravind Ganesan 
37723bd62fdSAravind Ganesan 	DBG("%s", gpu->name);
37823bd62fdSAravind Ganesan 
37923bd62fdSAravind Ganesan 	adreno_gpu_cleanup(adreno_gpu);
38023bd62fdSAravind Ganesan 
38126c0b26dSBrian Masney 	adreno_gpu_ocmem_cleanup(&a4xx_gpu->ocmem);
38223bd62fdSAravind Ganesan 
38323bd62fdSAravind Ganesan 	kfree(a4xx_gpu);
38423bd62fdSAravind Ganesan }
38523bd62fdSAravind Ganesan 
386c4a8d475SJordan Crouse static bool a4xx_idle(struct msm_gpu *gpu)
38723bd62fdSAravind Ganesan {
38823bd62fdSAravind Ganesan 	/* wait for ringbuffer to drain: */
389f97decacSJordan Crouse 	if (!adreno_idle(gpu, gpu->rb[0]))
390c4a8d475SJordan Crouse 		return false;
39123bd62fdSAravind Ganesan 
39223bd62fdSAravind Ganesan 	/* then wait for GPU to finish: */
39323bd62fdSAravind Ganesan 	if (spin_until(!(gpu_read(gpu, REG_A4XX_RBBM_STATUS) &
394c4a8d475SJordan Crouse 					A4XX_RBBM_STATUS_GPU_BUSY))) {
39523bd62fdSAravind Ganesan 		DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
39623bd62fdSAravind Ganesan 		/* TODO maybe we need to reset GPU here to recover from hang? */
397c4a8d475SJordan Crouse 		return false;
398c4a8d475SJordan Crouse 	}
399c4a8d475SJordan Crouse 
400c4a8d475SJordan Crouse 	return true;
40123bd62fdSAravind Ganesan }
40223bd62fdSAravind Ganesan 
40323bd62fdSAravind Ganesan static irqreturn_t a4xx_irq(struct msm_gpu *gpu)
40423bd62fdSAravind Ganesan {
40523bd62fdSAravind Ganesan 	uint32_t status;
40623bd62fdSAravind Ganesan 
40723bd62fdSAravind Ganesan 	status = gpu_read(gpu, REG_A4XX_RBBM_INT_0_STATUS);
40823bd62fdSAravind Ganesan 	DBG("%s: Int status %08x", gpu->name, status);
40923bd62fdSAravind Ganesan 
4101e2c8e7aSCraig Stout 	if (status & A4XX_INT0_CP_REG_PROTECT_FAULT) {
4111e2c8e7aSCraig Stout 		uint32_t reg = gpu_read(gpu, REG_A4XX_CP_PROTECT_STATUS);
4121e2c8e7aSCraig Stout 		printk("CP | Protected mode error| %s | addr=%x\n",
4131e2c8e7aSCraig Stout 			reg & (1 << 24) ? "WRITE" : "READ",
4141e2c8e7aSCraig Stout 			(reg & 0xFFFFF) >> 2);
4151e2c8e7aSCraig Stout 	}
4161e2c8e7aSCraig Stout 
41723bd62fdSAravind Ganesan 	gpu_write(gpu, REG_A4XX_RBBM_INT_CLEAR_CMD, status);
41823bd62fdSAravind Ganesan 
41923bd62fdSAravind Ganesan 	msm_gpu_retire(gpu);
42023bd62fdSAravind Ganesan 
42123bd62fdSAravind Ganesan 	return IRQ_HANDLED;
42223bd62fdSAravind Ganesan }
42323bd62fdSAravind Ganesan 
42423bd62fdSAravind Ganesan static const unsigned int a4xx_registers[] = {
42523bd62fdSAravind Ganesan 	/* RBBM */
42623bd62fdSAravind Ganesan 	0x0000, 0x0002, 0x0004, 0x0021, 0x0023, 0x0024, 0x0026, 0x0026,
42723bd62fdSAravind Ganesan 	0x0028, 0x002B, 0x002E, 0x0034, 0x0037, 0x0044, 0x0047, 0x0066,
42823bd62fdSAravind Ganesan 	0x0068, 0x0095, 0x009C, 0x0170, 0x0174, 0x01AF,
42923bd62fdSAravind Ganesan 	/* CP */
43023bd62fdSAravind Ganesan 	0x0200, 0x0233, 0x0240, 0x0250, 0x04C0, 0x04DD, 0x0500, 0x050B,
43123bd62fdSAravind Ganesan 	0x0578, 0x058F,
43223bd62fdSAravind Ganesan 	/* VSC */
43323bd62fdSAravind Ganesan 	0x0C00, 0x0C03, 0x0C08, 0x0C41, 0x0C50, 0x0C51,
43423bd62fdSAravind Ganesan 	/* GRAS */
43523bd62fdSAravind Ganesan 	0x0C80, 0x0C81, 0x0C88, 0x0C8F,
43623bd62fdSAravind Ganesan 	/* RB */
43723bd62fdSAravind Ganesan 	0x0CC0, 0x0CC0, 0x0CC4, 0x0CD2,
43823bd62fdSAravind Ganesan 	/* PC */
43923bd62fdSAravind Ganesan 	0x0D00, 0x0D0C, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
44023bd62fdSAravind Ganesan 	/* VFD */
44123bd62fdSAravind Ganesan 	0x0E40, 0x0E4A,
44223bd62fdSAravind Ganesan 	/* VPC */
44323bd62fdSAravind Ganesan 	0x0E60, 0x0E61, 0x0E63, 0x0E68,
44423bd62fdSAravind Ganesan 	/* UCHE */
44523bd62fdSAravind Ganesan 	0x0E80, 0x0E84, 0x0E88, 0x0E95,
44623bd62fdSAravind Ganesan 	/* VMIDMT */
44723bd62fdSAravind Ganesan 	0x1000, 0x1000, 0x1002, 0x1002, 0x1004, 0x1004, 0x1008, 0x100A,
44823bd62fdSAravind Ganesan 	0x100C, 0x100D, 0x100F, 0x1010, 0x1012, 0x1016, 0x1024, 0x1024,
44923bd62fdSAravind Ganesan 	0x1027, 0x1027, 0x1100, 0x1100, 0x1102, 0x1102, 0x1104, 0x1104,
45023bd62fdSAravind Ganesan 	0x1110, 0x1110, 0x1112, 0x1116, 0x1124, 0x1124, 0x1300, 0x1300,
45123bd62fdSAravind Ganesan 	0x1380, 0x1380,
45223bd62fdSAravind Ganesan 	/* GRAS CTX 0 */
45323bd62fdSAravind Ganesan 	0x2000, 0x2004, 0x2008, 0x2067, 0x2070, 0x2078, 0x207B, 0x216E,
45423bd62fdSAravind Ganesan 	/* PC CTX 0 */
45523bd62fdSAravind Ganesan 	0x21C0, 0x21C6, 0x21D0, 0x21D0, 0x21D9, 0x21D9, 0x21E5, 0x21E7,
45623bd62fdSAravind Ganesan 	/* VFD CTX 0 */
45723bd62fdSAravind Ganesan 	0x2200, 0x2204, 0x2208, 0x22A9,
45823bd62fdSAravind Ganesan 	/* GRAS CTX 1 */
45923bd62fdSAravind Ganesan 	0x2400, 0x2404, 0x2408, 0x2467, 0x2470, 0x2478, 0x247B, 0x256E,
46023bd62fdSAravind Ganesan 	/* PC CTX 1 */
46123bd62fdSAravind Ganesan 	0x25C0, 0x25C6, 0x25D0, 0x25D0, 0x25D9, 0x25D9, 0x25E5, 0x25E7,
46223bd62fdSAravind Ganesan 	/* VFD CTX 1 */
46323bd62fdSAravind Ganesan 	0x2600, 0x2604, 0x2608, 0x26A9,
46423bd62fdSAravind Ganesan 	/* XPU */
46523bd62fdSAravind Ganesan 	0x2C00, 0x2C01, 0x2C10, 0x2C10, 0x2C12, 0x2C16, 0x2C1D, 0x2C20,
46623bd62fdSAravind Ganesan 	0x2C28, 0x2C28, 0x2C30, 0x2C30, 0x2C32, 0x2C36, 0x2C40, 0x2C40,
46723bd62fdSAravind Ganesan 	0x2C50, 0x2C50, 0x2C52, 0x2C56, 0x2C80, 0x2C80, 0x2C94, 0x2C95,
46823bd62fdSAravind Ganesan 	/* VBIF */
46923bd62fdSAravind Ganesan 	0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x301D, 0x3020, 0x3022,
47023bd62fdSAravind Ganesan 	0x3024, 0x3026, 0x3028, 0x302A, 0x302C, 0x302D, 0x3030, 0x3031,
47123bd62fdSAravind Ganesan 	0x3034, 0x3036, 0x3038, 0x3038, 0x303C, 0x303D, 0x3040, 0x3040,
47223bd62fdSAravind Ganesan 	0x3049, 0x3049, 0x3058, 0x3058, 0x305B, 0x3061, 0x3064, 0x3068,
47323bd62fdSAravind Ganesan 	0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
47423bd62fdSAravind Ganesan 	0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
47523bd62fdSAravind Ganesan 	0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
47623bd62fdSAravind Ganesan 	0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
47723bd62fdSAravind Ganesan 	0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x330C, 0x330C,
47823bd62fdSAravind Ganesan 	0x3310, 0x3310, 0x3400, 0x3401, 0x3410, 0x3410, 0x3412, 0x3416,
47923bd62fdSAravind Ganesan 	0x341D, 0x3420, 0x3428, 0x3428, 0x3430, 0x3430, 0x3432, 0x3436,
48023bd62fdSAravind Ganesan 	0x3440, 0x3440, 0x3450, 0x3450, 0x3452, 0x3456, 0x3480, 0x3480,
48123bd62fdSAravind Ganesan 	0x3494, 0x3495, 0x4000, 0x4000, 0x4002, 0x4002, 0x4004, 0x4004,
48223bd62fdSAravind Ganesan 	0x4008, 0x400A, 0x400C, 0x400D, 0x400F, 0x4012, 0x4014, 0x4016,
48323bd62fdSAravind Ganesan 	0x401D, 0x401D, 0x4020, 0x4027, 0x4060, 0x4062, 0x4200, 0x4200,
48423bd62fdSAravind Ganesan 	0x4300, 0x4300, 0x4400, 0x4400, 0x4500, 0x4500, 0x4800, 0x4802,
48523bd62fdSAravind Ganesan 	0x480F, 0x480F, 0x4811, 0x4811, 0x4813, 0x4813, 0x4815, 0x4816,
48623bd62fdSAravind Ganesan 	0x482B, 0x482B, 0x4857, 0x4857, 0x4883, 0x4883, 0x48AF, 0x48AF,
48723bd62fdSAravind Ganesan 	0x48C5, 0x48C5, 0x48E5, 0x48E5, 0x4905, 0x4905, 0x4925, 0x4925,
48823bd62fdSAravind Ganesan 	0x4945, 0x4945, 0x4950, 0x4950, 0x495B, 0x495B, 0x4980, 0x498E,
48923bd62fdSAravind Ganesan 	0x4B00, 0x4B00, 0x4C00, 0x4C00, 0x4D00, 0x4D00, 0x4E00, 0x4E00,
49023bd62fdSAravind Ganesan 	0x4E80, 0x4E80, 0x4F00, 0x4F00, 0x4F08, 0x4F08, 0x4F10, 0x4F10,
49123bd62fdSAravind Ganesan 	0x4F18, 0x4F18, 0x4F20, 0x4F20, 0x4F30, 0x4F30, 0x4F60, 0x4F60,
49223bd62fdSAravind Ganesan 	0x4F80, 0x4F81, 0x4F88, 0x4F89, 0x4FEE, 0x4FEE, 0x4FF3, 0x4FF3,
49323bd62fdSAravind Ganesan 	0x6000, 0x6001, 0x6008, 0x600F, 0x6014, 0x6016, 0x6018, 0x601B,
49423bd62fdSAravind Ganesan 	0x61FD, 0x61FD, 0x623C, 0x623C, 0x6380, 0x6380, 0x63A0, 0x63A0,
49523bd62fdSAravind Ganesan 	0x63C0, 0x63C1, 0x63C8, 0x63C9, 0x63D0, 0x63D4, 0x63D6, 0x63D6,
49623bd62fdSAravind Ganesan 	0x63EE, 0x63EE, 0x6400, 0x6401, 0x6408, 0x640F, 0x6414, 0x6416,
49723bd62fdSAravind Ganesan 	0x6418, 0x641B, 0x65FD, 0x65FD, 0x663C, 0x663C, 0x6780, 0x6780,
49823bd62fdSAravind Ganesan 	0x67A0, 0x67A0, 0x67C0, 0x67C1, 0x67C8, 0x67C9, 0x67D0, 0x67D4,
49923bd62fdSAravind Ganesan 	0x67D6, 0x67D6, 0x67EE, 0x67EE, 0x6800, 0x6801, 0x6808, 0x680F,
50023bd62fdSAravind Ganesan 	0x6814, 0x6816, 0x6818, 0x681B, 0x69FD, 0x69FD, 0x6A3C, 0x6A3C,
50123bd62fdSAravind Ganesan 	0x6B80, 0x6B80, 0x6BA0, 0x6BA0, 0x6BC0, 0x6BC1, 0x6BC8, 0x6BC9,
50223bd62fdSAravind Ganesan 	0x6BD0, 0x6BD4, 0x6BD6, 0x6BD6, 0x6BEE, 0x6BEE,
50323bd62fdSAravind Ganesan 	~0 /* sentinel */
50423bd62fdSAravind Ganesan };
50523bd62fdSAravind Ganesan 
506d3b68ddfSShawn Guo static const unsigned int a405_registers[] = {
507d3b68ddfSShawn Guo 	/* RBBM */
508d3b68ddfSShawn Guo 	0x0000, 0x0002, 0x0004, 0x0021, 0x0023, 0x0024, 0x0026, 0x0026,
509d3b68ddfSShawn Guo 	0x0028, 0x002B, 0x002E, 0x0034, 0x0037, 0x0044, 0x0047, 0x0066,
510d3b68ddfSShawn Guo 	0x0068, 0x0095, 0x009C, 0x0170, 0x0174, 0x01AF,
511d3b68ddfSShawn Guo 	/* CP */
512d3b68ddfSShawn Guo 	0x0200, 0x0233, 0x0240, 0x0250, 0x04C0, 0x04DD, 0x0500, 0x050B,
513d3b68ddfSShawn Guo 	0x0578, 0x058F,
514d3b68ddfSShawn Guo 	/* VSC */
515d3b68ddfSShawn Guo 	0x0C00, 0x0C03, 0x0C08, 0x0C41, 0x0C50, 0x0C51,
516d3b68ddfSShawn Guo 	/* GRAS */
517d3b68ddfSShawn Guo 	0x0C80, 0x0C81, 0x0C88, 0x0C8F,
518d3b68ddfSShawn Guo 	/* RB */
519d3b68ddfSShawn Guo 	0x0CC0, 0x0CC0, 0x0CC4, 0x0CD2,
520d3b68ddfSShawn Guo 	/* PC */
521d3b68ddfSShawn Guo 	0x0D00, 0x0D0C, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
522d3b68ddfSShawn Guo 	/* VFD */
523d3b68ddfSShawn Guo 	0x0E40, 0x0E4A,
524d3b68ddfSShawn Guo 	/* VPC */
525d3b68ddfSShawn Guo 	0x0E60, 0x0E61, 0x0E63, 0x0E68,
526d3b68ddfSShawn Guo 	/* UCHE */
527d3b68ddfSShawn Guo 	0x0E80, 0x0E84, 0x0E88, 0x0E95,
528d3b68ddfSShawn Guo 	/* GRAS CTX 0 */
529d3b68ddfSShawn Guo 	0x2000, 0x2004, 0x2008, 0x2067, 0x2070, 0x2078, 0x207B, 0x216E,
530d3b68ddfSShawn Guo 	/* PC CTX 0 */
531d3b68ddfSShawn Guo 	0x21C0, 0x21C6, 0x21D0, 0x21D0, 0x21D9, 0x21D9, 0x21E5, 0x21E7,
532d3b68ddfSShawn Guo 	/* VFD CTX 0 */
533d3b68ddfSShawn Guo 	0x2200, 0x2204, 0x2208, 0x22A9,
534d3b68ddfSShawn Guo 	/* GRAS CTX 1 */
535d3b68ddfSShawn Guo 	0x2400, 0x2404, 0x2408, 0x2467, 0x2470, 0x2478, 0x247B, 0x256E,
536d3b68ddfSShawn Guo 	/* PC CTX 1 */
537d3b68ddfSShawn Guo 	0x25C0, 0x25C6, 0x25D0, 0x25D0, 0x25D9, 0x25D9, 0x25E5, 0x25E7,
538d3b68ddfSShawn Guo 	/* VFD CTX 1 */
539d3b68ddfSShawn Guo 	0x2600, 0x2604, 0x2608, 0x26A9,
540d3b68ddfSShawn Guo 	/* VBIF version 0x20050000*/
541d3b68ddfSShawn Guo 	0x3000, 0x3007, 0x302C, 0x302C, 0x3030, 0x3030, 0x3034, 0x3036,
542d3b68ddfSShawn Guo 	0x3038, 0x3038, 0x303C, 0x303D, 0x3040, 0x3040, 0x3049, 0x3049,
543d3b68ddfSShawn Guo 	0x3058, 0x3058, 0x305B, 0x3061, 0x3064, 0x3068, 0x306C, 0x306D,
544d3b68ddfSShawn Guo 	0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094, 0x3098, 0x3098,
545d3b68ddfSShawn Guo 	0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8, 0x30D0, 0x30D0,
546d3b68ddfSShawn Guo 	0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100, 0x3108, 0x3108,
547d3b68ddfSShawn Guo 	0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120, 0x3124, 0x3125,
548d3b68ddfSShawn Guo 	0x3129, 0x3129, 0x340C, 0x340C, 0x3410, 0x3410,
549d3b68ddfSShawn Guo 	~0 /* sentinel */
550d3b68ddfSShawn Guo };
551d3b68ddfSShawn Guo 
552e00e473dSJordan Crouse static struct msm_gpu_state *a4xx_gpu_state_get(struct msm_gpu *gpu)
553e00e473dSJordan Crouse {
55450f8d218SJordan Crouse 	struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
555e00e473dSJordan Crouse 
55650f8d218SJordan Crouse 	if (!state)
55750f8d218SJordan Crouse 		return ERR_PTR(-ENOMEM);
55850f8d218SJordan Crouse 
55950f8d218SJordan Crouse 	adreno_gpu_state_get(gpu, state);
560e00e473dSJordan Crouse 
561e00e473dSJordan Crouse 	state->rbbm_status = gpu_read(gpu, REG_A4XX_RBBM_STATUS);
562e00e473dSJordan Crouse 
563e00e473dSJordan Crouse 	return state;
564e00e473dSJordan Crouse }
565e00e473dSJordan Crouse 
56623bd62fdSAravind Ganesan static void a4xx_dump(struct msm_gpu *gpu)
56723bd62fdSAravind Ganesan {
56823bd62fdSAravind Ganesan 	printk("status:   %08x\n",
56923bd62fdSAravind Ganesan 			gpu_read(gpu, REG_A4XX_RBBM_STATUS));
57023bd62fdSAravind Ganesan 	adreno_dump(gpu);
57123bd62fdSAravind Ganesan }
57223bd62fdSAravind Ganesan 
57338bbc55eSCraig Stout static int a4xx_pm_resume(struct msm_gpu *gpu) {
57438bbc55eSCraig Stout 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
57538bbc55eSCraig Stout 	int ret;
57638bbc55eSCraig Stout 
57738bbc55eSCraig Stout 	ret = msm_gpu_pm_resume(gpu);
57838bbc55eSCraig Stout 	if (ret)
57938bbc55eSCraig Stout 		return ret;
58038bbc55eSCraig Stout 
58138bbc55eSCraig Stout 	if (adreno_is_a430(adreno_gpu)) {
58238bbc55eSCraig Stout 		unsigned int reg;
58338bbc55eSCraig Stout 		/* Set the default register values; set SW_COLLAPSE to 0 */
58438bbc55eSCraig Stout 		gpu_write(gpu, REG_A4XX_RBBM_POWER_CNTL_IP, 0x778000);
58538bbc55eSCraig Stout 		do {
58638bbc55eSCraig Stout 			udelay(5);
58738bbc55eSCraig Stout 			reg = gpu_read(gpu, REG_A4XX_RBBM_POWER_STATUS);
58838bbc55eSCraig Stout 		} while (!(reg & A4XX_RBBM_POWER_CNTL_IP_SP_TP_PWR_ON));
58938bbc55eSCraig Stout 	}
59038bbc55eSCraig Stout 	return 0;
59138bbc55eSCraig Stout }
59238bbc55eSCraig Stout 
59338bbc55eSCraig Stout static int a4xx_pm_suspend(struct msm_gpu *gpu) {
59438bbc55eSCraig Stout 	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
59538bbc55eSCraig Stout 	int ret;
59638bbc55eSCraig Stout 
59738bbc55eSCraig Stout 	ret = msm_gpu_pm_suspend(gpu);
59838bbc55eSCraig Stout 	if (ret)
59938bbc55eSCraig Stout 		return ret;
60038bbc55eSCraig Stout 
60138bbc55eSCraig Stout 	if (adreno_is_a430(adreno_gpu)) {
60238bbc55eSCraig Stout 		/* Set the default register values; set SW_COLLAPSE to 1 */
60338bbc55eSCraig Stout 		gpu_write(gpu, REG_A4XX_RBBM_POWER_CNTL_IP, 0x778001);
60438bbc55eSCraig Stout 	}
60538bbc55eSCraig Stout 	return 0;
60638bbc55eSCraig Stout }
60738bbc55eSCraig Stout 
6086c77d1abSRob Clark static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
6096c77d1abSRob Clark {
610ae53a829SJordan Crouse 	*value = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO,
611ae53a829SJordan Crouse 		REG_A4XX_RBBM_PERFCTR_CP_0_HI);
6126c77d1abSRob Clark 
6136c77d1abSRob Clark 	return 0;
6146c77d1abSRob Clark }
6156c77d1abSRob Clark 
6162fb7487aSJordan Crouse static u32 a4xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
6172fb7487aSJordan Crouse {
6182fb7487aSJordan Crouse 	ring->memptrs->rptr = gpu_read(gpu, REG_A4XX_CP_RB_RPTR);
6192fb7487aSJordan Crouse 	return ring->memptrs->rptr;
6202fb7487aSJordan Crouse }
6212fb7487aSJordan Crouse 
62223bd62fdSAravind Ganesan static const struct adreno_gpu_funcs funcs = {
62323bd62fdSAravind Ganesan 	.base = {
62423bd62fdSAravind Ganesan 		.get_param = adreno_get_param,
62523bd62fdSAravind Ganesan 		.hw_init = a4xx_hw_init,
62638bbc55eSCraig Stout 		.pm_suspend = a4xx_pm_suspend,
62738bbc55eSCraig Stout 		.pm_resume = a4xx_pm_resume,
62823bd62fdSAravind Ganesan 		.recover = a4xx_recover,
6292fb7487aSJordan Crouse 		.submit = a4xx_submit,
630f97decacSJordan Crouse 		.active_ring = adreno_active_ring,
63123bd62fdSAravind Ganesan 		.irq = a4xx_irq,
63223bd62fdSAravind Ganesan 		.destroy = a4xx_destroy,
633c0fec7f5SJordan Crouse #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
6344f776f45SJordan Crouse 		.show = adreno_show,
63523bd62fdSAravind Ganesan #endif
636e00e473dSJordan Crouse 		.gpu_state_get = a4xx_gpu_state_get,
637e00e473dSJordan Crouse 		.gpu_state_put = adreno_gpu_state_put,
638ccac7ce3SJordan Crouse 		.create_address_space = adreno_iommu_create_address_space,
6392fb7487aSJordan Crouse 		.get_rptr = a4xx_get_rptr,
64023bd62fdSAravind Ganesan 	},
6416c77d1abSRob Clark 	.get_timestamp = a4xx_get_timestamp,
64223bd62fdSAravind Ganesan };
64323bd62fdSAravind Ganesan 
64423bd62fdSAravind Ganesan struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
64523bd62fdSAravind Ganesan {
64623bd62fdSAravind Ganesan 	struct a4xx_gpu *a4xx_gpu = NULL;
64723bd62fdSAravind Ganesan 	struct adreno_gpu *adreno_gpu;
64823bd62fdSAravind Ganesan 	struct msm_gpu *gpu;
64923bd62fdSAravind Ganesan 	struct msm_drm_private *priv = dev->dev_private;
65023bd62fdSAravind Ganesan 	struct platform_device *pdev = priv->gpu_pdev;
6515785dd7aSAkhil P Oommen 	struct icc_path *ocmem_icc_path;
6525785dd7aSAkhil P Oommen 	struct icc_path *icc_path;
65323bd62fdSAravind Ganesan 	int ret;
65423bd62fdSAravind Ganesan 
65523bd62fdSAravind Ganesan 	if (!pdev) {
6566a41da17SMamta Shukla 		DRM_DEV_ERROR(dev->dev, "no a4xx device\n");
65723bd62fdSAravind Ganesan 		ret = -ENXIO;
65823bd62fdSAravind Ganesan 		goto fail;
65923bd62fdSAravind Ganesan 	}
66023bd62fdSAravind Ganesan 
66123bd62fdSAravind Ganesan 	a4xx_gpu = kzalloc(sizeof(*a4xx_gpu), GFP_KERNEL);
66223bd62fdSAravind Ganesan 	if (!a4xx_gpu) {
66323bd62fdSAravind Ganesan 		ret = -ENOMEM;
66423bd62fdSAravind Ganesan 		goto fail;
66523bd62fdSAravind Ganesan 	}
66623bd62fdSAravind Ganesan 
66723bd62fdSAravind Ganesan 	adreno_gpu = &a4xx_gpu->base;
66823bd62fdSAravind Ganesan 	gpu = &adreno_gpu->base;
66923bd62fdSAravind Ganesan 
67023bd62fdSAravind Ganesan 	gpu->perfcntrs = NULL;
67123bd62fdSAravind Ganesan 	gpu->num_perfcntrs = 0;
67223bd62fdSAravind Ganesan 
673f97decacSJordan Crouse 	ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
67423bd62fdSAravind Ganesan 	if (ret)
67523bd62fdSAravind Ganesan 		goto fail;
67623bd62fdSAravind Ganesan 
677d3b68ddfSShawn Guo 	adreno_gpu->registers = adreno_is_a405(adreno_gpu) ? a405_registers :
678d3b68ddfSShawn Guo 							     a4xx_registers;
679d3b68ddfSShawn Guo 
68023bd62fdSAravind Ganesan 	/* if needed, allocate gmem: */
68126c0b26dSBrian Masney 	ret = adreno_gpu_ocmem_init(dev->dev, adreno_gpu,
68226c0b26dSBrian Masney 				    &a4xx_gpu->ocmem);
68326c0b26dSBrian Masney 	if (ret)
68426c0b26dSBrian Masney 		goto fail;
68523bd62fdSAravind Ganesan 
686667ce33eSRob Clark 	if (!gpu->aspace) {
68723bd62fdSAravind Ganesan 		/* TODO we think it is possible to configure the GPU to
68823bd62fdSAravind Ganesan 		 * restrict access to VRAM carveout.  But the required
68923bd62fdSAravind Ganesan 		 * registers are unknown.  For now just bail out and
69023bd62fdSAravind Ganesan 		 * limp along with just modesetting.  If it turns out
69123bd62fdSAravind Ganesan 		 * to not be possible to restrict access, then we must
69223bd62fdSAravind Ganesan 		 * implement a cmdstream validator.
69323bd62fdSAravind Ganesan 		 */
6946a41da17SMamta Shukla 		DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
695*3f7759e7SIskren Chernev 		if (!allow_vram_carveout) {
69623bd62fdSAravind Ganesan 			ret = -ENXIO;
69723bd62fdSAravind Ganesan 			goto fail;
69823bd62fdSAravind Ganesan 		}
699*3f7759e7SIskren Chernev 	}
70023bd62fdSAravind Ganesan 
7015785dd7aSAkhil P Oommen 	icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
7025785dd7aSAkhil P Oommen 	ret = IS_ERR(icc_path);
7035785dd7aSAkhil P Oommen 	if (ret)
7045785dd7aSAkhil P Oommen 		goto fail;
7055785dd7aSAkhil P Oommen 
7065785dd7aSAkhil P Oommen 	ocmem_icc_path = devm_of_icc_get(&pdev->dev, "ocmem");
7075785dd7aSAkhil P Oommen 	ret = IS_ERR(ocmem_icc_path);
7085785dd7aSAkhil P Oommen 	if (ret) {
7095785dd7aSAkhil P Oommen 		/* allow -ENODATA, ocmem icc is optional */
7105785dd7aSAkhil P Oommen 		if (ret != -ENODATA)
7115785dd7aSAkhil P Oommen 			goto fail;
7125785dd7aSAkhil P Oommen 		ocmem_icc_path = NULL;
7135785dd7aSAkhil P Oommen 	}
7145785dd7aSAkhil P Oommen 
71521f5a6c0SBrian Masney 	/*
71621f5a6c0SBrian Masney 	 * Set the ICC path to maximum speed for now by multiplying the fastest
71721f5a6c0SBrian Masney 	 * frequency by the bus width (8). We'll want to scale this later on to
71821f5a6c0SBrian Masney 	 * improve battery life.
71921f5a6c0SBrian Masney 	 */
7205785dd7aSAkhil P Oommen 	icc_set_bw(icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
7215785dd7aSAkhil P Oommen 	icc_set_bw(ocmem_icc_path, 0, Bps_to_icc(gpu->fast_rate) * 8);
72221f5a6c0SBrian Masney 
72323bd62fdSAravind Ganesan 	return gpu;
72423bd62fdSAravind Ganesan 
72523bd62fdSAravind Ganesan fail:
72623bd62fdSAravind Ganesan 	if (a4xx_gpu)
72723bd62fdSAravind Ganesan 		a4xx_destroy(&a4xx_gpu->base.base);
72823bd62fdSAravind Ganesan 
72923bd62fdSAravind Ganesan 	return ERR_PTR(ret);
73023bd62fdSAravind Ganesan }
731