xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c (revision 44a99b65)
1e60f8db5SAlex Xie /*
2e60f8db5SAlex Xie  * Copyright 2016 Advanced Micro Devices, Inc.
3e60f8db5SAlex Xie  *
4e60f8db5SAlex Xie  * Permission is hereby granted, free of charge, to any person obtaining a
5e60f8db5SAlex Xie  * copy of this software and associated documentation files (the "Software"),
6e60f8db5SAlex Xie  * to deal in the Software without restriction, including without limitation
7e60f8db5SAlex Xie  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8e60f8db5SAlex Xie  * and/or sell copies of the Software, and to permit persons to whom the
9e60f8db5SAlex Xie  * Software is furnished to do so, subject to the following conditions:
10e60f8db5SAlex Xie  *
11e60f8db5SAlex Xie  * The above copyright notice and this permission notice shall be included in
12e60f8db5SAlex Xie  * all copies or substantial portions of the Software.
13e60f8db5SAlex Xie  *
14e60f8db5SAlex Xie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15e60f8db5SAlex Xie  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16e60f8db5SAlex Xie  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17e60f8db5SAlex Xie  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18e60f8db5SAlex Xie  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19e60f8db5SAlex Xie  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20e60f8db5SAlex Xie  * OTHER DEALINGS IN THE SOFTWARE.
21e60f8db5SAlex Xie  *
22e60f8db5SAlex Xie  */
23e60f8db5SAlex Xie #include <linux/firmware.h>
24fd5fd480SChunming Zhou #include <drm/drm_cache.h>
25e60f8db5SAlex Xie #include "amdgpu.h"
26e60f8db5SAlex Xie #include "gmc_v9_0.h"
278d6a5230SAlex Deucher #include "amdgpu_atomfirmware.h"
28e60f8db5SAlex Xie 
2975199b8cSFeifei Xu #include "hdp/hdp_4_0_offset.h"
3075199b8cSFeifei Xu #include "hdp/hdp_4_0_sh_mask.h"
31cde5c34fSFeifei Xu #include "gc/gc_9_0_sh_mask.h"
32135d4b10SFeifei Xu #include "dce/dce_12_0_offset.h"
33135d4b10SFeifei Xu #include "dce/dce_12_0_sh_mask.h"
34fb960bd2SFeifei Xu #include "vega10_enum.h"
3565417d9fSFeifei Xu #include "mmhub/mmhub_1_0_offset.h"
366ce68225SFeifei Xu #include "athub/athub_1_0_offset.h"
37250b4228SChristian König #include "oss/osssys_4_0_offset.h"
38e60f8db5SAlex Xie 
39946a4d5bSShaoyun Liu #include "soc15.h"
40e60f8db5SAlex Xie #include "soc15_common.h"
4190c7a935SFeifei Xu #include "umc/umc_6_0_sh_mask.h"
42e60f8db5SAlex Xie 
43e60f8db5SAlex Xie #include "gfxhub_v1_0.h"
44e60f8db5SAlex Xie #include "mmhub_v1_0.h"
45e60f8db5SAlex Xie 
4644a99b65SAndrey Grodzovsky #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
4744a99b65SAndrey Grodzovsky 
48ebdef28eSAlex Deucher /* add these here since we already include dce12 headers and these are for DCN */
49ebdef28eSAlex Deucher #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
50ebdef28eSAlex Deucher #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
51ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT                                        0x0
52ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT                                       0x10
53ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
54ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
55ebdef28eSAlex Deucher 
56e60f8db5SAlex Xie /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
57e60f8db5SAlex Xie #define AMDGPU_NUM_OF_VMIDS			8
58e60f8db5SAlex Xie 
59e60f8db5SAlex Xie static const u32 golden_settings_vega10_hdp[] =
60e60f8db5SAlex Xie {
61e60f8db5SAlex Xie 	0xf64, 0x0fffffff, 0x00000000,
62e60f8db5SAlex Xie 	0xf65, 0x0fffffff, 0x00000000,
63e60f8db5SAlex Xie 	0xf66, 0x0fffffff, 0x00000000,
64e60f8db5SAlex Xie 	0xf67, 0x0fffffff, 0x00000000,
65e60f8db5SAlex Xie 	0xf68, 0x0fffffff, 0x00000000,
66e60f8db5SAlex Xie 	0xf6a, 0x0fffffff, 0x00000000,
67e60f8db5SAlex Xie 	0xf6b, 0x0fffffff, 0x00000000,
68e60f8db5SAlex Xie 	0xf6c, 0x0fffffff, 0x00000000,
69e60f8db5SAlex Xie 	0xf6d, 0x0fffffff, 0x00000000,
70e60f8db5SAlex Xie 	0xf6e, 0x0fffffff, 0x00000000,
71e60f8db5SAlex Xie };
72e60f8db5SAlex Xie 
73946a4d5bSShaoyun Liu static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
745c583018SEvan Quan {
75946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
76946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
775c583018SEvan Quan };
785c583018SEvan Quan 
79946a4d5bSShaoyun Liu static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
805c583018SEvan Quan {
81946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
82946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
835c583018SEvan Quan };
845c583018SEvan Quan 
8502bab923SDavid Panariti /* Ecc related register addresses, (BASE + reg offset) */
8602bab923SDavid Panariti /* Universal Memory Controller caps (may be fused). */
8702bab923SDavid Panariti /* UMCCH:UmcLocalCap */
8802bab923SDavid Panariti #define UMCLOCALCAPS_ADDR0	(0x00014306 + 0x00000000)
8902bab923SDavid Panariti #define UMCLOCALCAPS_ADDR1	(0x00014306 + 0x00000800)
9002bab923SDavid Panariti #define UMCLOCALCAPS_ADDR2	(0x00014306 + 0x00001000)
9102bab923SDavid Panariti #define UMCLOCALCAPS_ADDR3	(0x00014306 + 0x00001800)
9202bab923SDavid Panariti #define UMCLOCALCAPS_ADDR4	(0x00054306 + 0x00000000)
9302bab923SDavid Panariti #define UMCLOCALCAPS_ADDR5	(0x00054306 + 0x00000800)
9402bab923SDavid Panariti #define UMCLOCALCAPS_ADDR6	(0x00054306 + 0x00001000)
9502bab923SDavid Panariti #define UMCLOCALCAPS_ADDR7	(0x00054306 + 0x00001800)
9602bab923SDavid Panariti #define UMCLOCALCAPS_ADDR8	(0x00094306 + 0x00000000)
9702bab923SDavid Panariti #define UMCLOCALCAPS_ADDR9	(0x00094306 + 0x00000800)
9802bab923SDavid Panariti #define UMCLOCALCAPS_ADDR10	(0x00094306 + 0x00001000)
9902bab923SDavid Panariti #define UMCLOCALCAPS_ADDR11	(0x00094306 + 0x00001800)
10002bab923SDavid Panariti #define UMCLOCALCAPS_ADDR12	(0x000d4306 + 0x00000000)
10102bab923SDavid Panariti #define UMCLOCALCAPS_ADDR13	(0x000d4306 + 0x00000800)
10202bab923SDavid Panariti #define UMCLOCALCAPS_ADDR14	(0x000d4306 + 0x00001000)
10302bab923SDavid Panariti #define UMCLOCALCAPS_ADDR15	(0x000d4306 + 0x00001800)
10402bab923SDavid Panariti 
10502bab923SDavid Panariti /* Universal Memory Controller Channel config. */
10602bab923SDavid Panariti /* UMCCH:UMC_CONFIG */
10702bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR0	(0x00014040 + 0x00000000)
10802bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR1	(0x00014040 + 0x00000800)
10902bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR2	(0x00014040 + 0x00001000)
11002bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR3	(0x00014040 + 0x00001800)
11102bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR4	(0x00054040 + 0x00000000)
11202bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR5	(0x00054040 + 0x00000800)
11302bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR6	(0x00054040 + 0x00001000)
11402bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR7	(0x00054040 + 0x00001800)
11502bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR8	(0x00094040 + 0x00000000)
11602bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR9	(0x00094040 + 0x00000800)
11702bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR10	(0x00094040 + 0x00001000)
11802bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR11	(0x00094040 + 0x00001800)
11902bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR12	(0x000d4040 + 0x00000000)
12002bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR13	(0x000d4040 + 0x00000800)
12102bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR14	(0x000d4040 + 0x00001000)
12202bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR15	(0x000d4040 + 0x00001800)
12302bab923SDavid Panariti 
12402bab923SDavid Panariti /* Universal Memory Controller Channel Ecc config. */
12502bab923SDavid Panariti /* UMCCH:EccCtrl */
12602bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR0	(0x00014053 + 0x00000000)
12702bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR1	(0x00014053 + 0x00000800)
12802bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR2	(0x00014053 + 0x00001000)
12902bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR3	(0x00014053 + 0x00001800)
13002bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR4	(0x00054053 + 0x00000000)
13102bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR5	(0x00054053 + 0x00000800)
13202bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR6	(0x00054053 + 0x00001000)
13302bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR7	(0x00054053 + 0x00001800)
13402bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR8	(0x00094053 + 0x00000000)
13502bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR9	(0x00094053 + 0x00000800)
13602bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR10	(0x00094053 + 0x00001000)
13702bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR11	(0x00094053 + 0x00001800)
13802bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR12	(0x000d4053 + 0x00000000)
13902bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR13	(0x000d4053 + 0x00000800)
14002bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR14	(0x000d4053 + 0x00001000)
14102bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR15	(0x000d4053 + 0x00001800)
14202bab923SDavid Panariti 
14302bab923SDavid Panariti static const uint32_t ecc_umclocalcap_addrs[] = {
14402bab923SDavid Panariti 	UMCLOCALCAPS_ADDR0,
14502bab923SDavid Panariti 	UMCLOCALCAPS_ADDR1,
14602bab923SDavid Panariti 	UMCLOCALCAPS_ADDR2,
14702bab923SDavid Panariti 	UMCLOCALCAPS_ADDR3,
14802bab923SDavid Panariti 	UMCLOCALCAPS_ADDR4,
14902bab923SDavid Panariti 	UMCLOCALCAPS_ADDR5,
15002bab923SDavid Panariti 	UMCLOCALCAPS_ADDR6,
15102bab923SDavid Panariti 	UMCLOCALCAPS_ADDR7,
15202bab923SDavid Panariti 	UMCLOCALCAPS_ADDR8,
15302bab923SDavid Panariti 	UMCLOCALCAPS_ADDR9,
15402bab923SDavid Panariti 	UMCLOCALCAPS_ADDR10,
15502bab923SDavid Panariti 	UMCLOCALCAPS_ADDR11,
15602bab923SDavid Panariti 	UMCLOCALCAPS_ADDR12,
15702bab923SDavid Panariti 	UMCLOCALCAPS_ADDR13,
15802bab923SDavid Panariti 	UMCLOCALCAPS_ADDR14,
15902bab923SDavid Panariti 	UMCLOCALCAPS_ADDR15,
16002bab923SDavid Panariti };
16102bab923SDavid Panariti 
16202bab923SDavid Panariti static const uint32_t ecc_umcch_umc_config_addrs[] = {
16302bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR0,
16402bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR1,
16502bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR2,
16602bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR3,
16702bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR4,
16802bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR5,
16902bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR6,
17002bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR7,
17102bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR8,
17202bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR9,
17302bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR10,
17402bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR11,
17502bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR12,
17602bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR13,
17702bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR14,
17802bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR15,
17902bab923SDavid Panariti };
18002bab923SDavid Panariti 
18102bab923SDavid Panariti static const uint32_t ecc_umcch_eccctrl_addrs[] = {
18202bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR0,
18302bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR1,
18402bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR2,
18502bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR3,
18602bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR4,
18702bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR5,
18802bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR6,
18902bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR7,
19002bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR8,
19102bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR9,
19202bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR10,
19302bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR11,
19402bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR12,
19502bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR13,
19602bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR14,
19702bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR15,
19802bab923SDavid Panariti };
19902bab923SDavid Panariti 
200e60f8db5SAlex Xie static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
201e60f8db5SAlex Xie 					struct amdgpu_irq_src *src,
202e60f8db5SAlex Xie 					unsigned type,
203e60f8db5SAlex Xie 					enum amdgpu_interrupt_state state)
204e60f8db5SAlex Xie {
205e60f8db5SAlex Xie 	struct amdgpu_vmhub *hub;
206ae6d1416STom St Denis 	u32 tmp, reg, bits, i, j;
207e60f8db5SAlex Xie 
20811250164SChristian König 	bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
20911250164SChristian König 		VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
21011250164SChristian König 		VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
21111250164SChristian König 		VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
21211250164SChristian König 		VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
21311250164SChristian König 		VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
21411250164SChristian König 		VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
21511250164SChristian König 
216e60f8db5SAlex Xie 	switch (state) {
217e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_DISABLE:
218ae6d1416STom St Denis 		for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
219ae6d1416STom St Denis 			hub = &adev->vmhub[j];
220e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
221e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
222e60f8db5SAlex Xie 				tmp = RREG32(reg);
223e60f8db5SAlex Xie 				tmp &= ~bits;
224e60f8db5SAlex Xie 				WREG32(reg, tmp);
225e60f8db5SAlex Xie 			}
226e60f8db5SAlex Xie 		}
227e60f8db5SAlex Xie 		break;
228e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_ENABLE:
229ae6d1416STom St Denis 		for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
230ae6d1416STom St Denis 			hub = &adev->vmhub[j];
231e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
232e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
233e60f8db5SAlex Xie 				tmp = RREG32(reg);
234e60f8db5SAlex Xie 				tmp |= bits;
235e60f8db5SAlex Xie 				WREG32(reg, tmp);
236e60f8db5SAlex Xie 			}
237e60f8db5SAlex Xie 		}
238e60f8db5SAlex Xie 	default:
239e60f8db5SAlex Xie 		break;
240e60f8db5SAlex Xie 	}
241e60f8db5SAlex Xie 
242e60f8db5SAlex Xie 	return 0;
243e60f8db5SAlex Xie }
244e60f8db5SAlex Xie 
245e60f8db5SAlex Xie static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
246e60f8db5SAlex Xie 				struct amdgpu_irq_src *source,
247e60f8db5SAlex Xie 				struct amdgpu_iv_entry *entry)
248e60f8db5SAlex Xie {
249c4f46f22SChristian König 	struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
2504d6cbde3SFelix Kuehling 	uint32_t status = 0;
251e60f8db5SAlex Xie 	u64 addr;
252e60f8db5SAlex Xie 
253e60f8db5SAlex Xie 	addr = (u64)entry->src_data[0] << 12;
254e60f8db5SAlex Xie 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
255e60f8db5SAlex Xie 
25679a0c465SMonk Liu 	if (!amdgpu_sriov_vf(adev)) {
2575a9b8e8aSChristian König 		status = RREG32(hub->vm_l2_pro_fault_status);
2585a9b8e8aSChristian König 		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
2594d6cbde3SFelix Kuehling 	}
260e60f8db5SAlex Xie 
2614d6cbde3SFelix Kuehling 	if (printk_ratelimit()) {
262efaa9646SAndrey Grodzovsky 		struct amdgpu_task_info task_info = { 0 };
263efaa9646SAndrey Grodzovsky 
264efaa9646SAndrey Grodzovsky 		amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
265efaa9646SAndrey Grodzovsky 
2664d6cbde3SFelix Kuehling 		dev_err(adev->dev,
267efaa9646SAndrey Grodzovsky 			"[%s] VMC page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d\n)\n",
268c4f46f22SChristian König 			entry->vmid_src ? "mmhub" : "gfxhub",
269c4f46f22SChristian König 			entry->src_id, entry->ring_id, entry->vmid,
270efaa9646SAndrey Grodzovsky 			entry->pasid, task_info.process_name, task_info.tgid,
271efaa9646SAndrey Grodzovsky 			task_info.task_name, task_info.pid);
2724d6cbde3SFelix Kuehling 		dev_err(adev->dev, "  at page 0x%016llx from %d\n",
27379a0c465SMonk Liu 			addr, entry->client_id);
2744d6cbde3SFelix Kuehling 		if (!amdgpu_sriov_vf(adev))
2754d6cbde3SFelix Kuehling 			dev_err(adev->dev,
2764d6cbde3SFelix Kuehling 				"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
2774d6cbde3SFelix Kuehling 				status);
27879a0c465SMonk Liu 	}
279e60f8db5SAlex Xie 
280e60f8db5SAlex Xie 	return 0;
281e60f8db5SAlex Xie }
282e60f8db5SAlex Xie 
283e60f8db5SAlex Xie static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
284e60f8db5SAlex Xie 	.set = gmc_v9_0_vm_fault_interrupt_state,
285e60f8db5SAlex Xie 	.process = gmc_v9_0_process_interrupt,
286e60f8db5SAlex Xie };
287e60f8db5SAlex Xie 
288e60f8db5SAlex Xie static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
289e60f8db5SAlex Xie {
290770d13b1SChristian König 	adev->gmc.vm_fault.num_types = 1;
291770d13b1SChristian König 	adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
292e60f8db5SAlex Xie }
293e60f8db5SAlex Xie 
294c4f46f22SChristian König static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid)
29503f89febSChristian König {
29603f89febSChristian König 	u32 req = 0;
29703f89febSChristian König 
298c4f46f22SChristian König 	/* invalidate using legacy mode on vmid*/
29903f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
300c4f46f22SChristian König 			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
30103f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
30203f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
30303f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
30403f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
30503f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
30603f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
30703f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
30803f89febSChristian König 			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
30903f89febSChristian König 
31003f89febSChristian König 	return req;
31103f89febSChristian König }
31203f89febSChristian König 
313e60f8db5SAlex Xie /*
314e60f8db5SAlex Xie  * GART
315e60f8db5SAlex Xie  * VMID 0 is the physical GPU addresses as used by the kernel.
316e60f8db5SAlex Xie  * VMIDs 1-15 are used for userspace clients and are handled
317e60f8db5SAlex Xie  * by the amdgpu vm/hsa code.
318e60f8db5SAlex Xie  */
319e60f8db5SAlex Xie 
320e60f8db5SAlex Xie /**
321132f34e4SChristian König  * gmc_v9_0_flush_gpu_tlb - gart tlb flush callback
322e60f8db5SAlex Xie  *
323e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
324e60f8db5SAlex Xie  * @vmid: vm instance to flush
325e60f8db5SAlex Xie  *
326e60f8db5SAlex Xie  * Flush the TLB for the requested page table.
327e60f8db5SAlex Xie  */
328132f34e4SChristian König static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
329e60f8db5SAlex Xie 					uint32_t vmid)
330e60f8db5SAlex Xie {
331e60f8db5SAlex Xie 	/* Use register 17 for GART */
332e60f8db5SAlex Xie 	const unsigned eng = 17;
333e60f8db5SAlex Xie 	unsigned i, j;
334e60f8db5SAlex Xie 
335770d13b1SChristian König 	spin_lock(&adev->gmc.invalidate_lock);
336e60f8db5SAlex Xie 
337e60f8db5SAlex Xie 	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
338e60f8db5SAlex Xie 		struct amdgpu_vmhub *hub = &adev->vmhub[i];
33903f89febSChristian König 		u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
340e60f8db5SAlex Xie 
341c7a7266bSXiangliang Yu 		WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
342e60f8db5SAlex Xie 
343e60f8db5SAlex Xie 		/* Busy wait for ACK.*/
344e60f8db5SAlex Xie 		for (j = 0; j < 100; j++) {
345c7a7266bSXiangliang Yu 			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
346e60f8db5SAlex Xie 			tmp &= 1 << vmid;
347e60f8db5SAlex Xie 			if (tmp)
348e60f8db5SAlex Xie 				break;
349e60f8db5SAlex Xie 			cpu_relax();
350e60f8db5SAlex Xie 		}
351e60f8db5SAlex Xie 		if (j < 100)
352e60f8db5SAlex Xie 			continue;
353e60f8db5SAlex Xie 
354e60f8db5SAlex Xie 		/* Wait for ACK with a delay.*/
355e60f8db5SAlex Xie 		for (j = 0; j < adev->usec_timeout; j++) {
356c7a7266bSXiangliang Yu 			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
357e60f8db5SAlex Xie 			tmp &= 1 << vmid;
358e60f8db5SAlex Xie 			if (tmp)
359e60f8db5SAlex Xie 				break;
360e60f8db5SAlex Xie 			udelay(1);
361e60f8db5SAlex Xie 		}
362e60f8db5SAlex Xie 		if (j < adev->usec_timeout)
363e60f8db5SAlex Xie 			continue;
364e60f8db5SAlex Xie 
365e60f8db5SAlex Xie 		DRM_ERROR("Timeout waiting for VM flush ACK!\n");
366e60f8db5SAlex Xie 	}
367e60f8db5SAlex Xie 
368770d13b1SChristian König 	spin_unlock(&adev->gmc.invalidate_lock);
369e60f8db5SAlex Xie }
370e60f8db5SAlex Xie 
3719096d6e5SChristian König static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
372c633c00bSChristian König 					    unsigned vmid, uint64_t pd_addr)
3739096d6e5SChristian König {
374250b4228SChristian König 	struct amdgpu_device *adev = ring->adev;
375250b4228SChristian König 	struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
3769096d6e5SChristian König 	uint32_t req = gmc_v9_0_get_invalidate_req(vmid);
3779096d6e5SChristian König 	uint64_t flags = AMDGPU_PTE_VALID;
3789096d6e5SChristian König 	unsigned eng = ring->vm_inv_eng;
3799096d6e5SChristian König 
380c633c00bSChristian König 	amdgpu_gmc_get_vm_pde(adev, -1, &pd_addr, &flags);
3819096d6e5SChristian König 	pd_addr |= flags;
3829096d6e5SChristian König 
3839096d6e5SChristian König 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
3849096d6e5SChristian König 			      lower_32_bits(pd_addr));
3859096d6e5SChristian König 
3869096d6e5SChristian König 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
3879096d6e5SChristian König 			      upper_32_bits(pd_addr));
3889096d6e5SChristian König 
389f8bc9037SAlex Deucher 	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
390f8bc9037SAlex Deucher 					    hub->vm_inv_eng0_ack + eng,
391f8bc9037SAlex Deucher 					    req, 1 << vmid);
392f732b6b3SChristian König 
3939096d6e5SChristian König 	return pd_addr;
3949096d6e5SChristian König }
3959096d6e5SChristian König 
396c633c00bSChristian König static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
397c633c00bSChristian König 					unsigned pasid)
398c633c00bSChristian König {
399c633c00bSChristian König 	struct amdgpu_device *adev = ring->adev;
400c633c00bSChristian König 	uint32_t reg;
401c633c00bSChristian König 
402c633c00bSChristian König 	if (ring->funcs->vmhub == AMDGPU_GFXHUB)
403c633c00bSChristian König 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
404c633c00bSChristian König 	else
405c633c00bSChristian König 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
406c633c00bSChristian König 
407c633c00bSChristian König 	amdgpu_ring_emit_wreg(ring, reg, pasid);
408c633c00bSChristian König }
409c633c00bSChristian König 
410e60f8db5SAlex Xie /**
411132f34e4SChristian König  * gmc_v9_0_set_pte_pde - update the page tables using MMIO
412e60f8db5SAlex Xie  *
413e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
414e60f8db5SAlex Xie  * @cpu_pt_addr: cpu address of the page table
415e60f8db5SAlex Xie  * @gpu_page_idx: entry in the page table to update
416e60f8db5SAlex Xie  * @addr: dst addr to write into pte/pde
417e60f8db5SAlex Xie  * @flags: access flags
418e60f8db5SAlex Xie  *
419e60f8db5SAlex Xie  * Update the page tables using the CPU.
420e60f8db5SAlex Xie  */
421132f34e4SChristian König static int gmc_v9_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
422132f34e4SChristian König 				uint32_t gpu_page_idx, uint64_t addr,
423e60f8db5SAlex Xie 				uint64_t flags)
424e60f8db5SAlex Xie {
425e60f8db5SAlex Xie 	void __iomem *ptr = (void *)cpu_pt_addr;
426e60f8db5SAlex Xie 	uint64_t value;
427e60f8db5SAlex Xie 
428e60f8db5SAlex Xie 	/*
429e60f8db5SAlex Xie 	 * PTE format on VEGA 10:
430e60f8db5SAlex Xie 	 * 63:59 reserved
431e60f8db5SAlex Xie 	 * 58:57 mtype
432e60f8db5SAlex Xie 	 * 56 F
433e60f8db5SAlex Xie 	 * 55 L
434e60f8db5SAlex Xie 	 * 54 P
435e60f8db5SAlex Xie 	 * 53 SW
436e60f8db5SAlex Xie 	 * 52 T
437e60f8db5SAlex Xie 	 * 50:48 reserved
438e60f8db5SAlex Xie 	 * 47:12 4k physical page base address
439e60f8db5SAlex Xie 	 * 11:7 fragment
440e60f8db5SAlex Xie 	 * 6 write
441e60f8db5SAlex Xie 	 * 5 read
442e60f8db5SAlex Xie 	 * 4 exe
443e60f8db5SAlex Xie 	 * 3 Z
444e60f8db5SAlex Xie 	 * 2 snooped
445e60f8db5SAlex Xie 	 * 1 system
446e60f8db5SAlex Xie 	 * 0 valid
447e60f8db5SAlex Xie 	 *
448e60f8db5SAlex Xie 	 * PDE format on VEGA 10:
449e60f8db5SAlex Xie 	 * 63:59 block fragment size
450e60f8db5SAlex Xie 	 * 58:55 reserved
451e60f8db5SAlex Xie 	 * 54 P
452e60f8db5SAlex Xie 	 * 53:48 reserved
453e60f8db5SAlex Xie 	 * 47:6 physical base address of PD or PTE
454e60f8db5SAlex Xie 	 * 5:3 reserved
455e60f8db5SAlex Xie 	 * 2 C
456e60f8db5SAlex Xie 	 * 1 system
457e60f8db5SAlex Xie 	 * 0 valid
458e60f8db5SAlex Xie 	 */
459e60f8db5SAlex Xie 
460e60f8db5SAlex Xie 	/*
461e60f8db5SAlex Xie 	 * The following is for PTE only. GART does not have PDEs.
462e60f8db5SAlex Xie 	*/
463e60f8db5SAlex Xie 	value = addr & 0x0000FFFFFFFFF000ULL;
464e60f8db5SAlex Xie 	value |= flags;
465e60f8db5SAlex Xie 	writeq(value, ptr + (gpu_page_idx * 8));
466e60f8db5SAlex Xie 	return 0;
467e60f8db5SAlex Xie }
468e60f8db5SAlex Xie 
469e60f8db5SAlex Xie static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
470e60f8db5SAlex Xie 						uint32_t flags)
471e60f8db5SAlex Xie 
472e60f8db5SAlex Xie {
473e60f8db5SAlex Xie 	uint64_t pte_flag = 0;
474e60f8db5SAlex Xie 
475e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
476e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_EXECUTABLE;
477e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_READABLE)
478e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_READABLE;
479e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
480e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_WRITEABLE;
481e60f8db5SAlex Xie 
482e60f8db5SAlex Xie 	switch (flags & AMDGPU_VM_MTYPE_MASK) {
483e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_DEFAULT:
484e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
485e60f8db5SAlex Xie 		break;
486e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_NC:
487e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
488e60f8db5SAlex Xie 		break;
489e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_WC:
490e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_WC);
491e60f8db5SAlex Xie 		break;
492e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_CC:
493e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_CC);
494e60f8db5SAlex Xie 		break;
495e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_UC:
496e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_UC);
497e60f8db5SAlex Xie 		break;
498e60f8db5SAlex Xie 	default:
499e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
500e60f8db5SAlex Xie 		break;
501e60f8db5SAlex Xie 	}
502e60f8db5SAlex Xie 
503e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_PRT)
504e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_PRT;
505e60f8db5SAlex Xie 
506e60f8db5SAlex Xie 	return pte_flag;
507e60f8db5SAlex Xie }
508e60f8db5SAlex Xie 
5093de676d8SChristian König static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
5103de676d8SChristian König 				uint64_t *addr, uint64_t *flags)
511f75e237cSChristian König {
5123de676d8SChristian König 	if (!(*flags & AMDGPU_PDE_PTE))
5133de676d8SChristian König 		*addr = adev->vm_manager.vram_base_offset + *addr -
514770d13b1SChristian König 			adev->gmc.vram_start;
5153de676d8SChristian König 	BUG_ON(*addr & 0xFFFF00000000003FULL);
5166a42fd6fSChristian König 
517770d13b1SChristian König 	if (!adev->gmc.translate_further)
5186a42fd6fSChristian König 		return;
5196a42fd6fSChristian König 
5206a42fd6fSChristian König 	if (level == AMDGPU_VM_PDB1) {
5216a42fd6fSChristian König 		/* Set the block fragment size */
5226a42fd6fSChristian König 		if (!(*flags & AMDGPU_PDE_PTE))
5236a42fd6fSChristian König 			*flags |= AMDGPU_PDE_BFS(0x9);
5246a42fd6fSChristian König 
5256a42fd6fSChristian König 	} else if (level == AMDGPU_VM_PDB0) {
5266a42fd6fSChristian König 		if (*flags & AMDGPU_PDE_PTE)
5276a42fd6fSChristian König 			*flags &= ~AMDGPU_PDE_PTE;
5286a42fd6fSChristian König 		else
5296a42fd6fSChristian König 			*flags |= AMDGPU_PTE_TF;
5306a42fd6fSChristian König 	}
531f75e237cSChristian König }
532f75e237cSChristian König 
533132f34e4SChristian König static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
534132f34e4SChristian König 	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
5359096d6e5SChristian König 	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
536c633c00bSChristian König 	.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
537132f34e4SChristian König 	.set_pte_pde = gmc_v9_0_set_pte_pde,
538b1166325SChristian König 	.get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
539b1166325SChristian König 	.get_vm_pde = gmc_v9_0_get_vm_pde
540e60f8db5SAlex Xie };
541e60f8db5SAlex Xie 
542132f34e4SChristian König static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
543e60f8db5SAlex Xie {
544132f34e4SChristian König 	if (adev->gmc.gmc_funcs == NULL)
545132f34e4SChristian König 		adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
546e60f8db5SAlex Xie }
547e60f8db5SAlex Xie 
548e60f8db5SAlex Xie static int gmc_v9_0_early_init(void *handle)
549e60f8db5SAlex Xie {
550e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
551e60f8db5SAlex Xie 
552132f34e4SChristian König 	gmc_v9_0_set_gmc_funcs(adev);
553e60f8db5SAlex Xie 	gmc_v9_0_set_irq_funcs(adev);
554e60f8db5SAlex Xie 
555770d13b1SChristian König 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
556770d13b1SChristian König 	adev->gmc.shared_aperture_end =
557770d13b1SChristian König 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
558bfa8eea2SFlora Cui 	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
559770d13b1SChristian König 	adev->gmc.private_aperture_end =
560770d13b1SChristian König 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
561a7ea6548SAlex Deucher 
562e60f8db5SAlex Xie 	return 0;
563e60f8db5SAlex Xie }
564e60f8db5SAlex Xie 
56502bab923SDavid Panariti static int gmc_v9_0_ecc_available(struct amdgpu_device *adev)
56602bab923SDavid Panariti {
56702bab923SDavid Panariti 	uint32_t reg_val;
56802bab923SDavid Panariti 	uint32_t reg_addr;
56902bab923SDavid Panariti 	uint32_t field_val;
57002bab923SDavid Panariti 	size_t i;
57102bab923SDavid Panariti 	uint32_t fv2;
57202bab923SDavid Panariti 	size_t lost_sheep;
57302bab923SDavid Panariti 
57402bab923SDavid Panariti 	DRM_DEBUG("ecc: gmc_v9_0_ecc_available()\n");
57502bab923SDavid Panariti 
57602bab923SDavid Panariti 	lost_sheep = 0;
57702bab923SDavid Panariti 	for (i = 0; i < ARRAY_SIZE(ecc_umclocalcap_addrs); ++i) {
57802bab923SDavid Panariti 		reg_addr = ecc_umclocalcap_addrs[i];
57902bab923SDavid Panariti 		DRM_DEBUG("ecc: "
58002bab923SDavid Panariti 			  "UMCCH_UmcLocalCap[%zu]: reg_addr: 0x%08x\n",
58102bab923SDavid Panariti 			  i, reg_addr);
58202bab923SDavid Panariti 		reg_val = RREG32(reg_addr);
58302bab923SDavid Panariti 		field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UmcLocalCap,
58402bab923SDavid Panariti 					  EccDis);
58502bab923SDavid Panariti 		DRM_DEBUG("ecc: "
58602bab923SDavid Panariti 			  "reg_val: 0x%08x, "
58702bab923SDavid Panariti 			  "EccDis: 0x%08x, ",
58802bab923SDavid Panariti 			  reg_val, field_val);
58902bab923SDavid Panariti 		if (field_val) {
59002bab923SDavid Panariti 			DRM_ERROR("ecc: UmcLocalCap:EccDis is set.\n");
59102bab923SDavid Panariti 			++lost_sheep;
59202bab923SDavid Panariti 		}
59302bab923SDavid Panariti 	}
59402bab923SDavid Panariti 
59502bab923SDavid Panariti 	for (i = 0; i < ARRAY_SIZE(ecc_umcch_umc_config_addrs); ++i) {
59602bab923SDavid Panariti 		reg_addr = ecc_umcch_umc_config_addrs[i];
59702bab923SDavid Panariti 		DRM_DEBUG("ecc: "
59802bab923SDavid Panariti 			  "UMCCH0_0_UMC_CONFIG[%zu]: reg_addr: 0x%08x",
59902bab923SDavid Panariti 			  i, reg_addr);
60002bab923SDavid Panariti 		reg_val = RREG32(reg_addr);
60102bab923SDavid Panariti 		field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UMC_CONFIG,
60202bab923SDavid Panariti 					  DramReady);
60302bab923SDavid Panariti 		DRM_DEBUG("ecc: "
60402bab923SDavid Panariti 			  "reg_val: 0x%08x, "
60502bab923SDavid Panariti 			  "DramReady: 0x%08x\n",
60602bab923SDavid Panariti 			  reg_val, field_val);
60702bab923SDavid Panariti 
60802bab923SDavid Panariti 		if (!field_val) {
60902bab923SDavid Panariti 			DRM_ERROR("ecc: UMC_CONFIG:DramReady is not set.\n");
61002bab923SDavid Panariti 			++lost_sheep;
61102bab923SDavid Panariti 		}
61202bab923SDavid Panariti 	}
61302bab923SDavid Panariti 
61402bab923SDavid Panariti 	for (i = 0; i < ARRAY_SIZE(ecc_umcch_eccctrl_addrs); ++i) {
61502bab923SDavid Panariti 		reg_addr = ecc_umcch_eccctrl_addrs[i];
61602bab923SDavid Panariti 		DRM_DEBUG("ecc: "
61702bab923SDavid Panariti 			  "UMCCH_EccCtrl[%zu]: reg_addr: 0x%08x, ",
61802bab923SDavid Panariti 			  i, reg_addr);
61902bab923SDavid Panariti 		reg_val = RREG32(reg_addr);
62002bab923SDavid Panariti 		field_val = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
62102bab923SDavid Panariti 					  WrEccEn);
62202bab923SDavid Panariti 		fv2 = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
62302bab923SDavid Panariti 				    RdEccEn);
62402bab923SDavid Panariti 		DRM_DEBUG("ecc: "
62502bab923SDavid Panariti 			  "reg_val: 0x%08x, "
62602bab923SDavid Panariti 			  "WrEccEn: 0x%08x, "
62702bab923SDavid Panariti 			  "RdEccEn: 0x%08x\n",
62802bab923SDavid Panariti 			  reg_val, field_val, fv2);
62902bab923SDavid Panariti 
63002bab923SDavid Panariti 		if (!field_val) {
6315a16008fSAlex Deucher 			DRM_DEBUG("ecc: WrEccEn is not set\n");
63202bab923SDavid Panariti 			++lost_sheep;
63302bab923SDavid Panariti 		}
63402bab923SDavid Panariti 		if (!fv2) {
6355a16008fSAlex Deucher 			DRM_DEBUG("ecc: RdEccEn is not set\n");
63602bab923SDavid Panariti 			++lost_sheep;
63702bab923SDavid Panariti 		}
63802bab923SDavid Panariti 	}
63902bab923SDavid Panariti 
64002bab923SDavid Panariti 	DRM_DEBUG("ecc: lost_sheep: %zu\n", lost_sheep);
64102bab923SDavid Panariti 	return lost_sheep == 0;
64202bab923SDavid Panariti }
64302bab923SDavid Panariti 
644e60f8db5SAlex Xie static int gmc_v9_0_late_init(void *handle)
645e60f8db5SAlex Xie {
646e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
647c5066129Sozeng 	/*
648c5066129Sozeng 	 * The latest engine allocation on gfx9 is:
649c5066129Sozeng 	 * Engine 0, 1: idle
650c5066129Sozeng 	 * Engine 2, 3: firmware
651c5066129Sozeng 	 * Engine 4~13: amdgpu ring, subject to change when ring number changes
652c5066129Sozeng 	 * Engine 14~15: idle
653c5066129Sozeng 	 * Engine 16: kfd tlb invalidation
654c5066129Sozeng 	 * Engine 17: Gart flushes
655c5066129Sozeng 	 */
656c5066129Sozeng 	unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
6574789c463SChristian König 	unsigned i;
65802bab923SDavid Panariti 	int r;
6594789c463SChristian König 
6606f752ec2SAndrey Grodzovsky 	/*
6616f752ec2SAndrey Grodzovsky 	 * TODO - Uncomment once GART corruption issue is fixed.
6626f752ec2SAndrey Grodzovsky 	 */
6636f752ec2SAndrey Grodzovsky 	/* amdgpu_bo_late_init(adev); */
6646f752ec2SAndrey Grodzovsky 
6654789c463SChristian König 	for(i = 0; i < adev->num_rings; ++i) {
6664789c463SChristian König 		struct amdgpu_ring *ring = adev->rings[i];
6674789c463SChristian König 		unsigned vmhub = ring->funcs->vmhub;
6684789c463SChristian König 
6694789c463SChristian König 		ring->vm_inv_eng = vm_inv_eng[vmhub]++;
670775f55f1STom St Denis 		dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
671775f55f1STom St Denis 			 ring->idx, ring->name, ring->vm_inv_eng,
672775f55f1STom St Denis 			 ring->funcs->vmhub);
6734789c463SChristian König 	}
6744789c463SChristian König 
675c5066129Sozeng 	/* Engine 16 is used for KFD and 17 for GART flushes */
6764789c463SChristian König 	for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
677c5066129Sozeng 		BUG_ON(vm_inv_eng[i] > 16);
6784789c463SChristian König 
6797b6cbae2SMonk Liu 	if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) {
68002bab923SDavid Panariti 		r = gmc_v9_0_ecc_available(adev);
68102bab923SDavid Panariti 		if (r == 1) {
68202bab923SDavid Panariti 			DRM_INFO("ECC is active.\n");
68302bab923SDavid Panariti 		} else if (r == 0) {
68402bab923SDavid Panariti 			DRM_INFO("ECC is not present.\n");
685e1d1a772SAlex Deucher 			adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
68602bab923SDavid Panariti 		} else {
68702bab923SDavid Panariti 			DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r);
68802bab923SDavid Panariti 			return r;
68902bab923SDavid Panariti 		}
6905ba4fa35SAlex Deucher 	}
69102bab923SDavid Panariti 
692770d13b1SChristian König 	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
693e60f8db5SAlex Xie }
694e60f8db5SAlex Xie 
695e60f8db5SAlex Xie static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
696770d13b1SChristian König 					struct amdgpu_gmc *mc)
697e60f8db5SAlex Xie {
698eeb2487dSMonk Liu 	u64 base = 0;
699eeb2487dSMonk Liu 	if (!amdgpu_sriov_vf(adev))
700eeb2487dSMonk Liu 		base = mmhub_v1_0_get_fb_location(adev);
701770d13b1SChristian König 	amdgpu_device_vram_location(adev, &adev->gmc, base);
7022543e28aSAlex Deucher 	amdgpu_device_gart_location(adev, mc);
703bc099ee9SChunming Zhou 	/* base offset of vram pages */
704bc099ee9SChunming Zhou 	adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
705e60f8db5SAlex Xie }
706e60f8db5SAlex Xie 
707e60f8db5SAlex Xie /**
708e60f8db5SAlex Xie  * gmc_v9_0_mc_init - initialize the memory controller driver params
709e60f8db5SAlex Xie  *
710e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
711e60f8db5SAlex Xie  *
712e60f8db5SAlex Xie  * Look up the amount of vram, vram width, and decide how to place
713e60f8db5SAlex Xie  * vram and gart within the GPU's physical address space.
714e60f8db5SAlex Xie  * Returns 0 for success.
715e60f8db5SAlex Xie  */
716e60f8db5SAlex Xie static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
717e60f8db5SAlex Xie {
718e60f8db5SAlex Xie 	int chansize, numchan;
719d6895ad3SChristian König 	int r;
720e60f8db5SAlex Xie 
7213d918c0eSShaoyun Liu 	if (amdgpu_emu_mode != 1)
722770d13b1SChristian König 		adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
723770d13b1SChristian König 	if (!adev->gmc.vram_width) {
724e60f8db5SAlex Xie 		/* hbm memory channel size */
725585b7f16STom St Denis 		if (adev->flags & AMD_IS_APU)
726585b7f16STom St Denis 			chansize = 64;
727585b7f16STom St Denis 		else
728e60f8db5SAlex Xie 			chansize = 128;
729e60f8db5SAlex Xie 
730070706c0SHawking Zhang 		numchan = adev->df_funcs->get_hbm_channel_number(adev);
731770d13b1SChristian König 		adev->gmc.vram_width = numchan * chansize;
7328d6a5230SAlex Deucher 	}
733e60f8db5SAlex Xie 
734e60f8db5SAlex Xie 	/* size in MB on si */
735770d13b1SChristian König 	adev->gmc.mc_vram_size =
736bf383fb6SAlex Deucher 		adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL;
737770d13b1SChristian König 	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
738d6895ad3SChristian König 
739d6895ad3SChristian König 	if (!(adev->flags & AMD_IS_APU)) {
740d6895ad3SChristian König 		r = amdgpu_device_resize_fb_bar(adev);
741d6895ad3SChristian König 		if (r)
742d6895ad3SChristian König 			return r;
743d6895ad3SChristian König 	}
744770d13b1SChristian König 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
745770d13b1SChristian König 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
746e60f8db5SAlex Xie 
747156a81beSChunming Zhou #ifdef CONFIG_X86_64
748156a81beSChunming Zhou 	if (adev->flags & AMD_IS_APU) {
749156a81beSChunming Zhou 		adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
750156a81beSChunming Zhou 		adev->gmc.aper_size = adev->gmc.real_vram_size;
751156a81beSChunming Zhou 	}
752156a81beSChunming Zhou #endif
753e60f8db5SAlex Xie 	/* In case the PCI BAR is larger than the actual amount of vram */
754770d13b1SChristian König 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
755770d13b1SChristian König 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
756770d13b1SChristian König 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
757e60f8db5SAlex Xie 
758c3db7b5aSAlex Deucher 	/* set the gart size */
759c3db7b5aSAlex Deucher 	if (amdgpu_gart_size == -1) {
760c3db7b5aSAlex Deucher 		switch (adev->asic_type) {
761c3db7b5aSAlex Deucher 		case CHIP_VEGA10:  /* all engines support GPUVM */
762273a14cdSAlex Deucher 		case CHIP_VEGA12:  /* all engines support GPUVM */
763d96b428cSFeifei Xu 		case CHIP_VEGA20:
764c3db7b5aSAlex Deucher 		default:
765fe19b862SMonk Liu 			adev->gmc.gart_size = 512ULL << 20;
766c3db7b5aSAlex Deucher 			break;
767c3db7b5aSAlex Deucher 		case CHIP_RAVEN:   /* DCE SG support */
768770d13b1SChristian König 			adev->gmc.gart_size = 1024ULL << 20;
769c3db7b5aSAlex Deucher 			break;
770c3db7b5aSAlex Deucher 		}
771c3db7b5aSAlex Deucher 	} else {
772770d13b1SChristian König 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
773c3db7b5aSAlex Deucher 	}
774c3db7b5aSAlex Deucher 
775770d13b1SChristian König 	gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
776e60f8db5SAlex Xie 
777e60f8db5SAlex Xie 	return 0;
778e60f8db5SAlex Xie }
779e60f8db5SAlex Xie 
780e60f8db5SAlex Xie static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
781e60f8db5SAlex Xie {
782e60f8db5SAlex Xie 	int r;
783e60f8db5SAlex Xie 
784e60f8db5SAlex Xie 	if (adev->gart.robj) {
785e60f8db5SAlex Xie 		WARN(1, "VEGA10 PCIE GART already initialized\n");
786e60f8db5SAlex Xie 		return 0;
787e60f8db5SAlex Xie 	}
788e60f8db5SAlex Xie 	/* Initialize common gart structure */
789e60f8db5SAlex Xie 	r = amdgpu_gart_init(adev);
790e60f8db5SAlex Xie 	if (r)
791e60f8db5SAlex Xie 		return r;
792e60f8db5SAlex Xie 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
793e60f8db5SAlex Xie 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
794e60f8db5SAlex Xie 				 AMDGPU_PTE_EXECUTABLE;
795e60f8db5SAlex Xie 	return amdgpu_gart_table_vram_alloc(adev);
796e60f8db5SAlex Xie }
797e60f8db5SAlex Xie 
798ebdef28eSAlex Deucher static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
799ebdef28eSAlex Deucher {
800ebdef28eSAlex Deucher #if 0
801ebdef28eSAlex Deucher 	u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
802ebdef28eSAlex Deucher #endif
803ebdef28eSAlex Deucher 	unsigned size;
804ebdef28eSAlex Deucher 
8056f752ec2SAndrey Grodzovsky 	/*
8066f752ec2SAndrey Grodzovsky 	 * TODO Remove once GART corruption is resolved
8076f752ec2SAndrey Grodzovsky 	 * Check related code in gmc_v9_0_sw_fini
8086f752ec2SAndrey Grodzovsky 	 * */
8096f752ec2SAndrey Grodzovsky 	size = 9 * 1024 * 1024;
8106f752ec2SAndrey Grodzovsky 
8116f752ec2SAndrey Grodzovsky #if 0
812ebdef28eSAlex Deucher 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
813ebdef28eSAlex Deucher 		size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
814ebdef28eSAlex Deucher 	} else {
815ebdef28eSAlex Deucher 		u32 viewport;
816ebdef28eSAlex Deucher 
817ebdef28eSAlex Deucher 		switch (adev->asic_type) {
818ebdef28eSAlex Deucher 		case CHIP_RAVEN:
819ebdef28eSAlex Deucher 			viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
820ebdef28eSAlex Deucher 			size = (REG_GET_FIELD(viewport,
821ebdef28eSAlex Deucher 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
822ebdef28eSAlex Deucher 				REG_GET_FIELD(viewport,
823ebdef28eSAlex Deucher 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
824ebdef28eSAlex Deucher 				4);
825ebdef28eSAlex Deucher 			break;
826ebdef28eSAlex Deucher 		case CHIP_VEGA10:
827ebdef28eSAlex Deucher 		case CHIP_VEGA12:
828ebdef28eSAlex Deucher 		default:
829ebdef28eSAlex Deucher 			viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
830ebdef28eSAlex Deucher 			size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
831ebdef28eSAlex Deucher 				REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
832ebdef28eSAlex Deucher 				4);
833ebdef28eSAlex Deucher 			break;
834ebdef28eSAlex Deucher 		}
835ebdef28eSAlex Deucher 	}
836ebdef28eSAlex Deucher 	/* return 0 if the pre-OS buffer uses up most of vram */
837ebdef28eSAlex Deucher 	if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
838ebdef28eSAlex Deucher 		return 0;
8396f752ec2SAndrey Grodzovsky 
8406f752ec2SAndrey Grodzovsky #endif
841ebdef28eSAlex Deucher 	return size;
842ebdef28eSAlex Deucher }
843ebdef28eSAlex Deucher 
844e60f8db5SAlex Xie static int gmc_v9_0_sw_init(void *handle)
845e60f8db5SAlex Xie {
846e60f8db5SAlex Xie 	int r;
847e60f8db5SAlex Xie 	int dma_bits;
848e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
849e60f8db5SAlex Xie 
8500c8c0847SHuang Rui 	gfxhub_v1_0_init(adev);
85177f6c763SHuang Rui 	mmhub_v1_0_init(adev);
8520c8c0847SHuang Rui 
853770d13b1SChristian König 	spin_lock_init(&adev->gmc.invalidate_lock);
854e60f8db5SAlex Xie 
8551e09b053SHawking Zhang 	adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev);
856fd66560bSHawking Zhang 	switch (adev->asic_type) {
857fd66560bSHawking Zhang 	case CHIP_RAVEN:
8586a42fd6fSChristian König 		if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
859f3368128SChristian König 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
8606a42fd6fSChristian König 		} else {
8616a42fd6fSChristian König 			/* vm_size is 128TB + 512GB for legacy 3-level page support */
8626a42fd6fSChristian König 			amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
863770d13b1SChristian König 			adev->gmc.translate_further =
8646a42fd6fSChristian König 				adev->vm_manager.num_level > 1;
8656a42fd6fSChristian König 		}
866fd66560bSHawking Zhang 		break;
867fd66560bSHawking Zhang 	case CHIP_VEGA10:
868273a14cdSAlex Deucher 	case CHIP_VEGA12:
869d96b428cSFeifei Xu 	case CHIP_VEGA20:
87036b32a68SZhang, Jerry 		/*
87136b32a68SZhang, Jerry 		 * To fulfill 4-level page support,
87236b32a68SZhang, Jerry 		 * vm size is 256TB (48bit), maximum size of Vega10,
87336b32a68SZhang, Jerry 		 * block size 512 (9bit)
87436b32a68SZhang, Jerry 		 */
875f3368128SChristian König 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
876fd66560bSHawking Zhang 		break;
877fd66560bSHawking Zhang 	default:
878fd66560bSHawking Zhang 		break;
879fd66560bSHawking Zhang 	}
880fd66560bSHawking Zhang 
881e60f8db5SAlex Xie 	/* This interrupt is VMC page fault.*/
88244a99b65SAndrey Grodzovsky 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
883770d13b1SChristian König 				&adev->gmc.vm_fault);
88444a99b65SAndrey Grodzovsky 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
885770d13b1SChristian König 				&adev->gmc.vm_fault);
886e60f8db5SAlex Xie 
887e60f8db5SAlex Xie 	if (r)
888e60f8db5SAlex Xie 		return r;
889e60f8db5SAlex Xie 
890e60f8db5SAlex Xie 	/* Set the internal MC address mask
891e60f8db5SAlex Xie 	 * This is the max address of the GPU's
892e60f8db5SAlex Xie 	 * internal address space.
893e60f8db5SAlex Xie 	 */
894770d13b1SChristian König 	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
895e60f8db5SAlex Xie 
896e60f8db5SAlex Xie 	/* set DMA mask + need_dma32 flags.
897e60f8db5SAlex Xie 	 * PCIE - can handle 44-bits.
898e60f8db5SAlex Xie 	 * IGP - can handle 44-bits
899e60f8db5SAlex Xie 	 * PCI - dma32 for legacy pci gart, 44 bits on vega10
900e60f8db5SAlex Xie 	 */
901e60f8db5SAlex Xie 	adev->need_dma32 = false;
902e60f8db5SAlex Xie 	dma_bits = adev->need_dma32 ? 32 : 44;
903e60f8db5SAlex Xie 	r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
904e60f8db5SAlex Xie 	if (r) {
905e60f8db5SAlex Xie 		adev->need_dma32 = true;
906e60f8db5SAlex Xie 		dma_bits = 32;
907e60f8db5SAlex Xie 		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
908e60f8db5SAlex Xie 	}
909e60f8db5SAlex Xie 	r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
910e60f8db5SAlex Xie 	if (r) {
911e60f8db5SAlex Xie 		pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
912e60f8db5SAlex Xie 		printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
913e60f8db5SAlex Xie 	}
914fd5fd480SChunming Zhou 	adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
915e60f8db5SAlex Xie 
916e60f8db5SAlex Xie 	r = gmc_v9_0_mc_init(adev);
917e60f8db5SAlex Xie 	if (r)
918e60f8db5SAlex Xie 		return r;
919e60f8db5SAlex Xie 
920ebdef28eSAlex Deucher 	adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev);
921ebdef28eSAlex Deucher 
922e60f8db5SAlex Xie 	/* Memory manager */
923e60f8db5SAlex Xie 	r = amdgpu_bo_init(adev);
924e60f8db5SAlex Xie 	if (r)
925e60f8db5SAlex Xie 		return r;
926e60f8db5SAlex Xie 
927e60f8db5SAlex Xie 	r = gmc_v9_0_gart_init(adev);
928e60f8db5SAlex Xie 	if (r)
929e60f8db5SAlex Xie 		return r;
930e60f8db5SAlex Xie 
93105ec3edaSChristian König 	/*
93205ec3edaSChristian König 	 * number of VMs
93305ec3edaSChristian König 	 * VMID 0 is reserved for System
93405ec3edaSChristian König 	 * amdgpu graphics/compute will use VMIDs 1-7
93505ec3edaSChristian König 	 * amdkfd will use VMIDs 8-15
93605ec3edaSChristian König 	 */
93705ec3edaSChristian König 	adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
93805ec3edaSChristian König 	adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
93905ec3edaSChristian König 
94005ec3edaSChristian König 	amdgpu_vm_manager_init(adev);
94105ec3edaSChristian König 
94205ec3edaSChristian König 	return 0;
943e60f8db5SAlex Xie }
944e60f8db5SAlex Xie 
945e60f8db5SAlex Xie /**
946c79ee7d8SMonk Liu  * gmc_v9_0_gart_fini - vm fini callback
947e60f8db5SAlex Xie  *
948e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
949e60f8db5SAlex Xie  *
950e60f8db5SAlex Xie  * Tears down the driver GART/VM setup (CIK).
951e60f8db5SAlex Xie  */
952e60f8db5SAlex Xie static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
953e60f8db5SAlex Xie {
954e60f8db5SAlex Xie 	amdgpu_gart_table_vram_free(adev);
955e60f8db5SAlex Xie 	amdgpu_gart_fini(adev);
956e60f8db5SAlex Xie }
957e60f8db5SAlex Xie 
958e60f8db5SAlex Xie static int gmc_v9_0_sw_fini(void *handle)
959e60f8db5SAlex Xie {
960e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
961e60f8db5SAlex Xie 
962f59548c8SMonk Liu 	amdgpu_gem_force_release(adev);
963e60f8db5SAlex Xie 	amdgpu_vm_manager_fini(adev);
964e60f8db5SAlex Xie 	gmc_v9_0_gart_fini(adev);
9656f752ec2SAndrey Grodzovsky 
9666f752ec2SAndrey Grodzovsky 	/*
9676f752ec2SAndrey Grodzovsky 	* TODO:
9686f752ec2SAndrey Grodzovsky 	* Currently there is a bug where some memory client outside
9696f752ec2SAndrey Grodzovsky 	* of the driver writes to first 8M of VRAM on S3 resume,
9706f752ec2SAndrey Grodzovsky 	* this overrides GART which by default gets placed in first 8M and
9716f752ec2SAndrey Grodzovsky 	* causes VM_FAULTS once GTT is accessed.
9726f752ec2SAndrey Grodzovsky 	* Keep the stolen memory reservation until the while this is not solved.
9736f752ec2SAndrey Grodzovsky 	* Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init
9746f752ec2SAndrey Grodzovsky 	*/
9756f752ec2SAndrey Grodzovsky 	amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
9766f752ec2SAndrey Grodzovsky 
977e60f8db5SAlex Xie 	amdgpu_bo_fini(adev);
978e60f8db5SAlex Xie 
979e60f8db5SAlex Xie 	return 0;
980e60f8db5SAlex Xie }
981e60f8db5SAlex Xie 
982e60f8db5SAlex Xie static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
983e60f8db5SAlex Xie {
984946a4d5bSShaoyun Liu 
985e60f8db5SAlex Xie 	switch (adev->asic_type) {
986e60f8db5SAlex Xie 	case CHIP_VEGA10:
987d96b428cSFeifei Xu 	case CHIP_VEGA20:
988946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
9895c583018SEvan Quan 						golden_settings_mmhub_1_0_0,
990c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_mmhub_1_0_0));
991946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
9925c583018SEvan Quan 						golden_settings_athub_1_0_0,
993c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
994e60f8db5SAlex Xie 		break;
995273a14cdSAlex Deucher 	case CHIP_VEGA12:
996273a14cdSAlex Deucher 		break;
997e4f3abaaSChunming Zhou 	case CHIP_RAVEN:
998946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
9995c583018SEvan Quan 						golden_settings_athub_1_0_0,
1000c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1001e4f3abaaSChunming Zhou 		break;
1002e60f8db5SAlex Xie 	default:
1003e60f8db5SAlex Xie 		break;
1004e60f8db5SAlex Xie 	}
1005e60f8db5SAlex Xie }
1006e60f8db5SAlex Xie 
1007e60f8db5SAlex Xie /**
1008e60f8db5SAlex Xie  * gmc_v9_0_gart_enable - gart enable
1009e60f8db5SAlex Xie  *
1010e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
1011e60f8db5SAlex Xie  */
1012e60f8db5SAlex Xie static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1013e60f8db5SAlex Xie {
1014e60f8db5SAlex Xie 	int r;
1015e60f8db5SAlex Xie 	bool value;
1016e60f8db5SAlex Xie 	u32 tmp;
1017e60f8db5SAlex Xie 
10189c3f2b54SAlex Deucher 	amdgpu_device_program_register_sequence(adev,
1019e60f8db5SAlex Xie 						golden_settings_vega10_hdp,
1020c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_vega10_hdp));
1021e60f8db5SAlex Xie 
1022e60f8db5SAlex Xie 	if (adev->gart.robj == NULL) {
1023e60f8db5SAlex Xie 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1024e60f8db5SAlex Xie 		return -EINVAL;
1025e60f8db5SAlex Xie 	}
1026ce1b1b66SMonk Liu 	r = amdgpu_gart_table_vram_pin(adev);
1027ce1b1b66SMonk Liu 	if (r)
1028ce1b1b66SMonk Liu 		return r;
1029e60f8db5SAlex Xie 
10302fcd43ceSHawking Zhang 	switch (adev->asic_type) {
10312fcd43ceSHawking Zhang 	case CHIP_RAVEN:
10322fcd43ceSHawking Zhang 		mmhub_v1_0_initialize_power_gating(adev);
1033f8386b35SHawking Zhang 		mmhub_v1_0_update_power_gating(adev, true);
10342fcd43ceSHawking Zhang 		break;
10352fcd43ceSHawking Zhang 	default:
10362fcd43ceSHawking Zhang 		break;
10372fcd43ceSHawking Zhang 	}
10382fcd43ceSHawking Zhang 
1039e60f8db5SAlex Xie 	r = gfxhub_v1_0_gart_enable(adev);
1040e60f8db5SAlex Xie 	if (r)
1041e60f8db5SAlex Xie 		return r;
1042e60f8db5SAlex Xie 
1043e60f8db5SAlex Xie 	r = mmhub_v1_0_gart_enable(adev);
1044e60f8db5SAlex Xie 	if (r)
1045e60f8db5SAlex Xie 		return r;
1046e60f8db5SAlex Xie 
1047846347c9STom St Denis 	WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
1048e60f8db5SAlex Xie 
1049b9509c80SHuang Rui 	tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
1050b9509c80SHuang Rui 	WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
1051e60f8db5SAlex Xie 
10521d4e0a8cSMonk Liu 	/* After HDP is initialized, flush HDP.*/
105369882565SChristian König 	adev->nbio_funcs->hdp_flush(adev, NULL);
10541d4e0a8cSMonk Liu 
1055e60f8db5SAlex Xie 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1056e60f8db5SAlex Xie 		value = false;
1057e60f8db5SAlex Xie 	else
1058e60f8db5SAlex Xie 		value = true;
1059e60f8db5SAlex Xie 
1060e60f8db5SAlex Xie 	gfxhub_v1_0_set_fault_enable_default(adev, value);
1061e60f8db5SAlex Xie 	mmhub_v1_0_set_fault_enable_default(adev, value);
1062132f34e4SChristian König 	gmc_v9_0_flush_gpu_tlb(adev, 0);
1063e60f8db5SAlex Xie 
1064e60f8db5SAlex Xie 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1065770d13b1SChristian König 		 (unsigned)(adev->gmc.gart_size >> 20),
1066e60f8db5SAlex Xie 		 (unsigned long long)adev->gart.table_addr);
1067e60f8db5SAlex Xie 	adev->gart.ready = true;
1068e60f8db5SAlex Xie 	return 0;
1069e60f8db5SAlex Xie }
1070e60f8db5SAlex Xie 
1071e60f8db5SAlex Xie static int gmc_v9_0_hw_init(void *handle)
1072e60f8db5SAlex Xie {
1073e60f8db5SAlex Xie 	int r;
1074e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1075e60f8db5SAlex Xie 
1076e60f8db5SAlex Xie 	/* The sequence of these two function calls matters.*/
1077e60f8db5SAlex Xie 	gmc_v9_0_init_golden_registers(adev);
1078e60f8db5SAlex Xie 
1079edca2d05SAlex Deucher 	if (adev->mode_info.num_crtc) {
1080edca2d05SAlex Deucher 		/* Lockout access through VGA aperture*/
10814d9c333aSTom St Denis 		WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1082edca2d05SAlex Deucher 
1083edca2d05SAlex Deucher 		/* disable VGA render */
10844d9c333aSTom St Denis 		WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1085edca2d05SAlex Deucher 	}
1086edca2d05SAlex Deucher 
1087e60f8db5SAlex Xie 	r = gmc_v9_0_gart_enable(adev);
1088e60f8db5SAlex Xie 
1089e60f8db5SAlex Xie 	return r;
1090e60f8db5SAlex Xie }
1091e60f8db5SAlex Xie 
1092e60f8db5SAlex Xie /**
1093e60f8db5SAlex Xie  * gmc_v9_0_gart_disable - gart disable
1094e60f8db5SAlex Xie  *
1095e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
1096e60f8db5SAlex Xie  *
1097e60f8db5SAlex Xie  * This disables all VM page table.
1098e60f8db5SAlex Xie  */
1099e60f8db5SAlex Xie static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1100e60f8db5SAlex Xie {
1101e60f8db5SAlex Xie 	gfxhub_v1_0_gart_disable(adev);
1102e60f8db5SAlex Xie 	mmhub_v1_0_gart_disable(adev);
1103ce1b1b66SMonk Liu 	amdgpu_gart_table_vram_unpin(adev);
1104e60f8db5SAlex Xie }
1105e60f8db5SAlex Xie 
1106e60f8db5SAlex Xie static int gmc_v9_0_hw_fini(void *handle)
1107e60f8db5SAlex Xie {
1108e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1109e60f8db5SAlex Xie 
11105dd696aeSTrigger Huang 	if (amdgpu_sriov_vf(adev)) {
11115dd696aeSTrigger Huang 		/* full access mode, so don't touch any GMC register */
11125dd696aeSTrigger Huang 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
11135dd696aeSTrigger Huang 		return 0;
11145dd696aeSTrigger Huang 	}
11155dd696aeSTrigger Huang 
1116770d13b1SChristian König 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1117e60f8db5SAlex Xie 	gmc_v9_0_gart_disable(adev);
1118e60f8db5SAlex Xie 
1119e60f8db5SAlex Xie 	return 0;
1120e60f8db5SAlex Xie }
1121e60f8db5SAlex Xie 
1122e60f8db5SAlex Xie static int gmc_v9_0_suspend(void *handle)
1123e60f8db5SAlex Xie {
1124e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1125e60f8db5SAlex Xie 
1126f053cd47STom St Denis 	return gmc_v9_0_hw_fini(adev);
1127e60f8db5SAlex Xie }
1128e60f8db5SAlex Xie 
1129e60f8db5SAlex Xie static int gmc_v9_0_resume(void *handle)
1130e60f8db5SAlex Xie {
1131e60f8db5SAlex Xie 	int r;
1132e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1133e60f8db5SAlex Xie 
1134e60f8db5SAlex Xie 	r = gmc_v9_0_hw_init(adev);
1135e60f8db5SAlex Xie 	if (r)
1136e60f8db5SAlex Xie 		return r;
1137e60f8db5SAlex Xie 
1138620f774fSChristian König 	amdgpu_vmid_reset_all(adev);
1139e60f8db5SAlex Xie 
114032601d48SChristian König 	return 0;
1141e60f8db5SAlex Xie }
1142e60f8db5SAlex Xie 
1143e60f8db5SAlex Xie static bool gmc_v9_0_is_idle(void *handle)
1144e60f8db5SAlex Xie {
1145e60f8db5SAlex Xie 	/* MC is always ready in GMC v9.*/
1146e60f8db5SAlex Xie 	return true;
1147e60f8db5SAlex Xie }
1148e60f8db5SAlex Xie 
1149e60f8db5SAlex Xie static int gmc_v9_0_wait_for_idle(void *handle)
1150e60f8db5SAlex Xie {
1151e60f8db5SAlex Xie 	/* There is no need to wait for MC idle in GMC v9.*/
1152e60f8db5SAlex Xie 	return 0;
1153e60f8db5SAlex Xie }
1154e60f8db5SAlex Xie 
1155e60f8db5SAlex Xie static int gmc_v9_0_soft_reset(void *handle)
1156e60f8db5SAlex Xie {
1157e60f8db5SAlex Xie 	/* XXX for emulation.*/
1158e60f8db5SAlex Xie 	return 0;
1159e60f8db5SAlex Xie }
1160e60f8db5SAlex Xie 
1161e60f8db5SAlex Xie static int gmc_v9_0_set_clockgating_state(void *handle,
1162e60f8db5SAlex Xie 					enum amd_clockgating_state state)
1163e60f8db5SAlex Xie {
1164d5583d4fSHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1165d5583d4fSHuang Rui 
1166d5583d4fSHuang Rui 	return mmhub_v1_0_set_clockgating(adev, state);
1167e60f8db5SAlex Xie }
1168e60f8db5SAlex Xie 
116913052be5SHuang Rui static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
117013052be5SHuang Rui {
117113052be5SHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
117213052be5SHuang Rui 
117313052be5SHuang Rui 	mmhub_v1_0_get_clockgating(adev, flags);
117413052be5SHuang Rui }
117513052be5SHuang Rui 
1176e60f8db5SAlex Xie static int gmc_v9_0_set_powergating_state(void *handle,
1177e60f8db5SAlex Xie 					enum amd_powergating_state state)
1178e60f8db5SAlex Xie {
1179e60f8db5SAlex Xie 	return 0;
1180e60f8db5SAlex Xie }
1181e60f8db5SAlex Xie 
1182e60f8db5SAlex Xie const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1183e60f8db5SAlex Xie 	.name = "gmc_v9_0",
1184e60f8db5SAlex Xie 	.early_init = gmc_v9_0_early_init,
1185e60f8db5SAlex Xie 	.late_init = gmc_v9_0_late_init,
1186e60f8db5SAlex Xie 	.sw_init = gmc_v9_0_sw_init,
1187e60f8db5SAlex Xie 	.sw_fini = gmc_v9_0_sw_fini,
1188e60f8db5SAlex Xie 	.hw_init = gmc_v9_0_hw_init,
1189e60f8db5SAlex Xie 	.hw_fini = gmc_v9_0_hw_fini,
1190e60f8db5SAlex Xie 	.suspend = gmc_v9_0_suspend,
1191e60f8db5SAlex Xie 	.resume = gmc_v9_0_resume,
1192e60f8db5SAlex Xie 	.is_idle = gmc_v9_0_is_idle,
1193e60f8db5SAlex Xie 	.wait_for_idle = gmc_v9_0_wait_for_idle,
1194e60f8db5SAlex Xie 	.soft_reset = gmc_v9_0_soft_reset,
1195e60f8db5SAlex Xie 	.set_clockgating_state = gmc_v9_0_set_clockgating_state,
1196e60f8db5SAlex Xie 	.set_powergating_state = gmc_v9_0_set_powergating_state,
119713052be5SHuang Rui 	.get_clockgating_state = gmc_v9_0_get_clockgating_state,
1198e60f8db5SAlex Xie };
1199e60f8db5SAlex Xie 
1200e60f8db5SAlex Xie const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1201e60f8db5SAlex Xie {
1202e60f8db5SAlex Xie 	.type = AMD_IP_BLOCK_TYPE_GMC,
1203e60f8db5SAlex Xie 	.major = 9,
1204e60f8db5SAlex Xie 	.minor = 0,
1205e60f8db5SAlex Xie 	.rev = 0,
1206e60f8db5SAlex Xie 	.funcs = &gmc_v9_0_ip_funcs,
1207e60f8db5SAlex Xie };
1208