xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c (revision 65417d9f)
1e60f8db5SAlex Xie /*
2e60f8db5SAlex Xie  * Copyright 2016 Advanced Micro Devices, Inc.
3e60f8db5SAlex Xie  *
4e60f8db5SAlex Xie  * Permission is hereby granted, free of charge, to any person obtaining a
5e60f8db5SAlex Xie  * copy of this software and associated documentation files (the "Software"),
6e60f8db5SAlex Xie  * to deal in the Software without restriction, including without limitation
7e60f8db5SAlex Xie  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8e60f8db5SAlex Xie  * and/or sell copies of the Software, and to permit persons to whom the
9e60f8db5SAlex Xie  * Software is furnished to do so, subject to the following conditions:
10e60f8db5SAlex Xie  *
11e60f8db5SAlex Xie  * The above copyright notice and this permission notice shall be included in
12e60f8db5SAlex Xie  * all copies or substantial portions of the Software.
13e60f8db5SAlex Xie  *
14e60f8db5SAlex Xie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15e60f8db5SAlex Xie  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16e60f8db5SAlex Xie  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17e60f8db5SAlex Xie  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18e60f8db5SAlex Xie  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19e60f8db5SAlex Xie  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20e60f8db5SAlex Xie  * OTHER DEALINGS IN THE SOFTWARE.
21e60f8db5SAlex Xie  *
22e60f8db5SAlex Xie  */
23e60f8db5SAlex Xie #include <linux/firmware.h>
24e60f8db5SAlex Xie #include "amdgpu.h"
25e60f8db5SAlex Xie #include "gmc_v9_0.h"
268d6a5230SAlex Deucher #include "amdgpu_atomfirmware.h"
27e60f8db5SAlex Xie 
28e60f8db5SAlex Xie #include "vega10/soc15ip.h"
2975199b8cSFeifei Xu #include "hdp/hdp_4_0_offset.h"
3075199b8cSFeifei Xu #include "hdp/hdp_4_0_sh_mask.h"
31cde5c34fSFeifei Xu #include "gc/gc_9_0_sh_mask.h"
32135d4b10SFeifei Xu #include "dce/dce_12_0_offset.h"
33135d4b10SFeifei Xu #include "dce/dce_12_0_sh_mask.h"
34e60f8db5SAlex Xie #include "vega10/vega10_enum.h"
3565417d9fSFeifei Xu #include "mmhub/mmhub_1_0_offset.h"
366ce68225SFeifei Xu #include "athub/athub_1_0_offset.h"
37e60f8db5SAlex Xie 
38e60f8db5SAlex Xie #include "soc15_common.h"
3990c7a935SFeifei Xu #include "umc/umc_6_0_sh_mask.h"
40e60f8db5SAlex Xie 
41e60f8db5SAlex Xie #include "nbio_v6_1.h"
42aecbe64fSChunming Zhou #include "nbio_v7_0.h"
43e60f8db5SAlex Xie #include "gfxhub_v1_0.h"
44e60f8db5SAlex Xie #include "mmhub_v1_0.h"
45e60f8db5SAlex Xie 
46e60f8db5SAlex Xie #define mmDF_CS_AON0_DramBaseAddress0                                                                  0x0044
47e60f8db5SAlex Xie #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX                                                         0
48e60f8db5SAlex Xie //DF_CS_AON0_DramBaseAddress0
49e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT                                                        0x0
50e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT                                                    0x1
51e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT                                                      0x4
52e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT                                                      0x8
53e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT                                                      0xc
54e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK                                                          0x00000001L
55e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK                                                      0x00000002L
56e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK                                                        0x000000F0L
57e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK                                                        0x00000700L
58e60f8db5SAlex Xie #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK                                                        0xFFFFF000L
59e60f8db5SAlex Xie 
60e60f8db5SAlex Xie /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/
61e60f8db5SAlex Xie #define AMDGPU_NUM_OF_VMIDS			8
62e60f8db5SAlex Xie 
63e60f8db5SAlex Xie static const u32 golden_settings_vega10_hdp[] =
64e60f8db5SAlex Xie {
65e60f8db5SAlex Xie 	0xf64, 0x0fffffff, 0x00000000,
66e60f8db5SAlex Xie 	0xf65, 0x0fffffff, 0x00000000,
67e60f8db5SAlex Xie 	0xf66, 0x0fffffff, 0x00000000,
68e60f8db5SAlex Xie 	0xf67, 0x0fffffff, 0x00000000,
69e60f8db5SAlex Xie 	0xf68, 0x0fffffff, 0x00000000,
70e60f8db5SAlex Xie 	0xf6a, 0x0fffffff, 0x00000000,
71e60f8db5SAlex Xie 	0xf6b, 0x0fffffff, 0x00000000,
72e60f8db5SAlex Xie 	0xf6c, 0x0fffffff, 0x00000000,
73e60f8db5SAlex Xie 	0xf6d, 0x0fffffff, 0x00000000,
74e60f8db5SAlex Xie 	0xf6e, 0x0fffffff, 0x00000000,
75e60f8db5SAlex Xie };
76e60f8db5SAlex Xie 
775c583018SEvan Quan static const u32 golden_settings_mmhub_1_0_0[] =
785c583018SEvan Quan {
795c583018SEvan Quan 	SOC15_REG_OFFSET(MMHUB, 0, mmDAGB1_WRCLI2), 0x00000007, 0xfe5fe0fa,
805c583018SEvan Quan 	SOC15_REG_OFFSET(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0), 0x00000030, 0x55555565
815c583018SEvan Quan };
825c583018SEvan Quan 
835c583018SEvan Quan static const u32 golden_settings_athub_1_0_0[] =
845c583018SEvan Quan {
855c583018SEvan Quan 	SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL), 0x0000ff00, 0x00000800,
865c583018SEvan Quan 	SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL2), 0x00ff00ff, 0x00080008
875c583018SEvan Quan };
885c583018SEvan Quan 
8902bab923SDavid Panariti /* Ecc related register addresses, (BASE + reg offset) */
9002bab923SDavid Panariti /* Universal Memory Controller caps (may be fused). */
9102bab923SDavid Panariti /* UMCCH:UmcLocalCap */
9202bab923SDavid Panariti #define UMCLOCALCAPS_ADDR0	(0x00014306 + 0x00000000)
9302bab923SDavid Panariti #define UMCLOCALCAPS_ADDR1	(0x00014306 + 0x00000800)
9402bab923SDavid Panariti #define UMCLOCALCAPS_ADDR2	(0x00014306 + 0x00001000)
9502bab923SDavid Panariti #define UMCLOCALCAPS_ADDR3	(0x00014306 + 0x00001800)
9602bab923SDavid Panariti #define UMCLOCALCAPS_ADDR4	(0x00054306 + 0x00000000)
9702bab923SDavid Panariti #define UMCLOCALCAPS_ADDR5	(0x00054306 + 0x00000800)
9802bab923SDavid Panariti #define UMCLOCALCAPS_ADDR6	(0x00054306 + 0x00001000)
9902bab923SDavid Panariti #define UMCLOCALCAPS_ADDR7	(0x00054306 + 0x00001800)
10002bab923SDavid Panariti #define UMCLOCALCAPS_ADDR8	(0x00094306 + 0x00000000)
10102bab923SDavid Panariti #define UMCLOCALCAPS_ADDR9	(0x00094306 + 0x00000800)
10202bab923SDavid Panariti #define UMCLOCALCAPS_ADDR10	(0x00094306 + 0x00001000)
10302bab923SDavid Panariti #define UMCLOCALCAPS_ADDR11	(0x00094306 + 0x00001800)
10402bab923SDavid Panariti #define UMCLOCALCAPS_ADDR12	(0x000d4306 + 0x00000000)
10502bab923SDavid Panariti #define UMCLOCALCAPS_ADDR13	(0x000d4306 + 0x00000800)
10602bab923SDavid Panariti #define UMCLOCALCAPS_ADDR14	(0x000d4306 + 0x00001000)
10702bab923SDavid Panariti #define UMCLOCALCAPS_ADDR15	(0x000d4306 + 0x00001800)
10802bab923SDavid Panariti 
10902bab923SDavid Panariti /* Universal Memory Controller Channel config. */
11002bab923SDavid Panariti /* UMCCH:UMC_CONFIG */
11102bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR0	(0x00014040 + 0x00000000)
11202bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR1	(0x00014040 + 0x00000800)
11302bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR2	(0x00014040 + 0x00001000)
11402bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR3	(0x00014040 + 0x00001800)
11502bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR4	(0x00054040 + 0x00000000)
11602bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR5	(0x00054040 + 0x00000800)
11702bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR6	(0x00054040 + 0x00001000)
11802bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR7	(0x00054040 + 0x00001800)
11902bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR8	(0x00094040 + 0x00000000)
12002bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR9	(0x00094040 + 0x00000800)
12102bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR10	(0x00094040 + 0x00001000)
12202bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR11	(0x00094040 + 0x00001800)
12302bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR12	(0x000d4040 + 0x00000000)
12402bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR13	(0x000d4040 + 0x00000800)
12502bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR14	(0x000d4040 + 0x00001000)
12602bab923SDavid Panariti #define UMCCH_UMC_CONFIG_ADDR15	(0x000d4040 + 0x00001800)
12702bab923SDavid Panariti 
12802bab923SDavid Panariti /* Universal Memory Controller Channel Ecc config. */
12902bab923SDavid Panariti /* UMCCH:EccCtrl */
13002bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR0	(0x00014053 + 0x00000000)
13102bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR1	(0x00014053 + 0x00000800)
13202bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR2	(0x00014053 + 0x00001000)
13302bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR3	(0x00014053 + 0x00001800)
13402bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR4	(0x00054053 + 0x00000000)
13502bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR5	(0x00054053 + 0x00000800)
13602bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR6	(0x00054053 + 0x00001000)
13702bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR7	(0x00054053 + 0x00001800)
13802bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR8	(0x00094053 + 0x00000000)
13902bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR9	(0x00094053 + 0x00000800)
14002bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR10	(0x00094053 + 0x00001000)
14102bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR11	(0x00094053 + 0x00001800)
14202bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR12	(0x000d4053 + 0x00000000)
14302bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR13	(0x000d4053 + 0x00000800)
14402bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR14	(0x000d4053 + 0x00001000)
14502bab923SDavid Panariti #define UMCCH_ECCCTRL_ADDR15	(0x000d4053 + 0x00001800)
14602bab923SDavid Panariti 
14702bab923SDavid Panariti static const uint32_t ecc_umclocalcap_addrs[] = {
14802bab923SDavid Panariti 	UMCLOCALCAPS_ADDR0,
14902bab923SDavid Panariti 	UMCLOCALCAPS_ADDR1,
15002bab923SDavid Panariti 	UMCLOCALCAPS_ADDR2,
15102bab923SDavid Panariti 	UMCLOCALCAPS_ADDR3,
15202bab923SDavid Panariti 	UMCLOCALCAPS_ADDR4,
15302bab923SDavid Panariti 	UMCLOCALCAPS_ADDR5,
15402bab923SDavid Panariti 	UMCLOCALCAPS_ADDR6,
15502bab923SDavid Panariti 	UMCLOCALCAPS_ADDR7,
15602bab923SDavid Panariti 	UMCLOCALCAPS_ADDR8,
15702bab923SDavid Panariti 	UMCLOCALCAPS_ADDR9,
15802bab923SDavid Panariti 	UMCLOCALCAPS_ADDR10,
15902bab923SDavid Panariti 	UMCLOCALCAPS_ADDR11,
16002bab923SDavid Panariti 	UMCLOCALCAPS_ADDR12,
16102bab923SDavid Panariti 	UMCLOCALCAPS_ADDR13,
16202bab923SDavid Panariti 	UMCLOCALCAPS_ADDR14,
16302bab923SDavid Panariti 	UMCLOCALCAPS_ADDR15,
16402bab923SDavid Panariti };
16502bab923SDavid Panariti 
16602bab923SDavid Panariti static const uint32_t ecc_umcch_umc_config_addrs[] = {
16702bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR0,
16802bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR1,
16902bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR2,
17002bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR3,
17102bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR4,
17202bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR5,
17302bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR6,
17402bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR7,
17502bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR8,
17602bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR9,
17702bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR10,
17802bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR11,
17902bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR12,
18002bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR13,
18102bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR14,
18202bab923SDavid Panariti 	UMCCH_UMC_CONFIG_ADDR15,
18302bab923SDavid Panariti };
18402bab923SDavid Panariti 
18502bab923SDavid Panariti static const uint32_t ecc_umcch_eccctrl_addrs[] = {
18602bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR0,
18702bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR1,
18802bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR2,
18902bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR3,
19002bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR4,
19102bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR5,
19202bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR6,
19302bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR7,
19402bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR8,
19502bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR9,
19602bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR10,
19702bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR11,
19802bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR12,
19902bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR13,
20002bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR14,
20102bab923SDavid Panariti 	UMCCH_ECCCTRL_ADDR15,
20202bab923SDavid Panariti };
20302bab923SDavid Panariti 
204e60f8db5SAlex Xie static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
205e60f8db5SAlex Xie 					struct amdgpu_irq_src *src,
206e60f8db5SAlex Xie 					unsigned type,
207e60f8db5SAlex Xie 					enum amdgpu_interrupt_state state)
208e60f8db5SAlex Xie {
209e60f8db5SAlex Xie 	struct amdgpu_vmhub *hub;
210ae6d1416STom St Denis 	u32 tmp, reg, bits, i, j;
211e60f8db5SAlex Xie 
21211250164SChristian König 	bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
21311250164SChristian König 		VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
21411250164SChristian König 		VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
21511250164SChristian König 		VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
21611250164SChristian König 		VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
21711250164SChristian König 		VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
21811250164SChristian König 		VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
21911250164SChristian König 
220e60f8db5SAlex Xie 	switch (state) {
221e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_DISABLE:
222ae6d1416STom St Denis 		for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
223ae6d1416STom St Denis 			hub = &adev->vmhub[j];
224e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
225e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
226e60f8db5SAlex Xie 				tmp = RREG32(reg);
227e60f8db5SAlex Xie 				tmp &= ~bits;
228e60f8db5SAlex Xie 				WREG32(reg, tmp);
229e60f8db5SAlex Xie 			}
230e60f8db5SAlex Xie 		}
231e60f8db5SAlex Xie 		break;
232e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_ENABLE:
233ae6d1416STom St Denis 		for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
234ae6d1416STom St Denis 			hub = &adev->vmhub[j];
235e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
236e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
237e60f8db5SAlex Xie 				tmp = RREG32(reg);
238e60f8db5SAlex Xie 				tmp |= bits;
239e60f8db5SAlex Xie 				WREG32(reg, tmp);
240e60f8db5SAlex Xie 			}
241e60f8db5SAlex Xie 		}
242e60f8db5SAlex Xie 	default:
243e60f8db5SAlex Xie 		break;
244e60f8db5SAlex Xie 	}
245e60f8db5SAlex Xie 
246e60f8db5SAlex Xie 	return 0;
247e60f8db5SAlex Xie }
248e60f8db5SAlex Xie 
249e60f8db5SAlex Xie static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
250e60f8db5SAlex Xie 				struct amdgpu_irq_src *source,
251e60f8db5SAlex Xie 				struct amdgpu_iv_entry *entry)
252e60f8db5SAlex Xie {
2535a9b8e8aSChristian König 	struct amdgpu_vmhub *hub = &adev->vmhub[entry->vm_id_src];
2544d6cbde3SFelix Kuehling 	uint32_t status = 0;
255e60f8db5SAlex Xie 	u64 addr;
256e60f8db5SAlex Xie 
257e60f8db5SAlex Xie 	addr = (u64)entry->src_data[0] << 12;
258e60f8db5SAlex Xie 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
259e60f8db5SAlex Xie 
26079a0c465SMonk Liu 	if (!amdgpu_sriov_vf(adev)) {
2615a9b8e8aSChristian König 		status = RREG32(hub->vm_l2_pro_fault_status);
2625a9b8e8aSChristian König 		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
2634d6cbde3SFelix Kuehling 	}
264e60f8db5SAlex Xie 
2654d6cbde3SFelix Kuehling 	if (printk_ratelimit()) {
2664d6cbde3SFelix Kuehling 		dev_err(adev->dev,
2674d6cbde3SFelix Kuehling 			"[%s] VMC page fault (src_id:%u ring:%u vm_id:%u pas_id:%u)\n",
268e60f8db5SAlex Xie 			entry->vm_id_src ? "mmhub" : "gfxhub",
2694d6cbde3SFelix Kuehling 			entry->src_id, entry->ring_id, entry->vm_id,
2704d6cbde3SFelix Kuehling 			entry->pas_id);
2714d6cbde3SFelix Kuehling 		dev_err(adev->dev, "  at page 0x%016llx from %d\n",
27279a0c465SMonk Liu 			addr, entry->client_id);
2734d6cbde3SFelix Kuehling 		if (!amdgpu_sriov_vf(adev))
2744d6cbde3SFelix Kuehling 			dev_err(adev->dev,
2754d6cbde3SFelix Kuehling 				"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
2764d6cbde3SFelix Kuehling 				status);
27779a0c465SMonk Liu 	}
278e60f8db5SAlex Xie 
279e60f8db5SAlex Xie 	return 0;
280e60f8db5SAlex Xie }
281e60f8db5SAlex Xie 
282e60f8db5SAlex Xie static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
283e60f8db5SAlex Xie 	.set = gmc_v9_0_vm_fault_interrupt_state,
284e60f8db5SAlex Xie 	.process = gmc_v9_0_process_interrupt,
285e60f8db5SAlex Xie };
286e60f8db5SAlex Xie 
287e60f8db5SAlex Xie static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
288e60f8db5SAlex Xie {
289e60f8db5SAlex Xie 	adev->mc.vm_fault.num_types = 1;
290e60f8db5SAlex Xie 	adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
291e60f8db5SAlex Xie }
292e60f8db5SAlex Xie 
29303f89febSChristian König static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vm_id)
29403f89febSChristian König {
29503f89febSChristian König 	u32 req = 0;
29603f89febSChristian König 
29703f89febSChristian König 	/* invalidate using legacy mode on vm_id*/
29803f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
29903f89febSChristian König 			    PER_VMID_INVALIDATE_REQ, 1 << vm_id);
30003f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
30103f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
30203f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
30303f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
30403f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
30503f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
30603f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
30703f89febSChristian König 			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
30803f89febSChristian König 
30903f89febSChristian König 	return req;
31003f89febSChristian König }
31103f89febSChristian König 
312e60f8db5SAlex Xie /*
313e60f8db5SAlex Xie  * GART
314e60f8db5SAlex Xie  * VMID 0 is the physical GPU addresses as used by the kernel.
315e60f8db5SAlex Xie  * VMIDs 1-15 are used for userspace clients and are handled
316e60f8db5SAlex Xie  * by the amdgpu vm/hsa code.
317e60f8db5SAlex Xie  */
318e60f8db5SAlex Xie 
319e60f8db5SAlex Xie /**
320e60f8db5SAlex Xie  * gmc_v9_0_gart_flush_gpu_tlb - gart tlb flush callback
321e60f8db5SAlex Xie  *
322e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
323e60f8db5SAlex Xie  * @vmid: vm instance to flush
324e60f8db5SAlex Xie  *
325e60f8db5SAlex Xie  * Flush the TLB for the requested page table.
326e60f8db5SAlex Xie  */
327e60f8db5SAlex Xie static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
328e60f8db5SAlex Xie 					uint32_t vmid)
329e60f8db5SAlex Xie {
330e60f8db5SAlex Xie 	/* Use register 17 for GART */
331e60f8db5SAlex Xie 	const unsigned eng = 17;
332e60f8db5SAlex Xie 	unsigned i, j;
333e60f8db5SAlex Xie 
334e60f8db5SAlex Xie 	/* flush hdp cache */
335aecbe64fSChunming Zhou 	if (adev->flags & AMD_IS_APU)
336aecbe64fSChunming Zhou 		nbio_v7_0_hdp_flush(adev);
337aecbe64fSChunming Zhou 	else
338e60f8db5SAlex Xie 		nbio_v6_1_hdp_flush(adev);
339e60f8db5SAlex Xie 
340e60f8db5SAlex Xie 	spin_lock(&adev->mc.invalidate_lock);
341e60f8db5SAlex Xie 
342e60f8db5SAlex Xie 	for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
343e60f8db5SAlex Xie 		struct amdgpu_vmhub *hub = &adev->vmhub[i];
34403f89febSChristian König 		u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
345e60f8db5SAlex Xie 
346c7a7266bSXiangliang Yu 		WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
347e60f8db5SAlex Xie 
348e60f8db5SAlex Xie 		/* Busy wait for ACK.*/
349e60f8db5SAlex Xie 		for (j = 0; j < 100; j++) {
350c7a7266bSXiangliang Yu 			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
351e60f8db5SAlex Xie 			tmp &= 1 << vmid;
352e60f8db5SAlex Xie 			if (tmp)
353e60f8db5SAlex Xie 				break;
354e60f8db5SAlex Xie 			cpu_relax();
355e60f8db5SAlex Xie 		}
356e60f8db5SAlex Xie 		if (j < 100)
357e60f8db5SAlex Xie 			continue;
358e60f8db5SAlex Xie 
359e60f8db5SAlex Xie 		/* Wait for ACK with a delay.*/
360e60f8db5SAlex Xie 		for (j = 0; j < adev->usec_timeout; j++) {
361c7a7266bSXiangliang Yu 			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
362e60f8db5SAlex Xie 			tmp &= 1 << vmid;
363e60f8db5SAlex Xie 			if (tmp)
364e60f8db5SAlex Xie 				break;
365e60f8db5SAlex Xie 			udelay(1);
366e60f8db5SAlex Xie 		}
367e60f8db5SAlex Xie 		if (j < adev->usec_timeout)
368e60f8db5SAlex Xie 			continue;
369e60f8db5SAlex Xie 
370e60f8db5SAlex Xie 		DRM_ERROR("Timeout waiting for VM flush ACK!\n");
371e60f8db5SAlex Xie 	}
372e60f8db5SAlex Xie 
373e60f8db5SAlex Xie 	spin_unlock(&adev->mc.invalidate_lock);
374e60f8db5SAlex Xie }
375e60f8db5SAlex Xie 
376e60f8db5SAlex Xie /**
377e60f8db5SAlex Xie  * gmc_v9_0_gart_set_pte_pde - update the page tables using MMIO
378e60f8db5SAlex Xie  *
379e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
380e60f8db5SAlex Xie  * @cpu_pt_addr: cpu address of the page table
381e60f8db5SAlex Xie  * @gpu_page_idx: entry in the page table to update
382e60f8db5SAlex Xie  * @addr: dst addr to write into pte/pde
383e60f8db5SAlex Xie  * @flags: access flags
384e60f8db5SAlex Xie  *
385e60f8db5SAlex Xie  * Update the page tables using the CPU.
386e60f8db5SAlex Xie  */
387e60f8db5SAlex Xie static int gmc_v9_0_gart_set_pte_pde(struct amdgpu_device *adev,
388e60f8db5SAlex Xie 					void *cpu_pt_addr,
389e60f8db5SAlex Xie 					uint32_t gpu_page_idx,
390e60f8db5SAlex Xie 					uint64_t addr,
391e60f8db5SAlex Xie 					uint64_t flags)
392e60f8db5SAlex Xie {
393e60f8db5SAlex Xie 	void __iomem *ptr = (void *)cpu_pt_addr;
394e60f8db5SAlex Xie 	uint64_t value;
395e60f8db5SAlex Xie 
396e60f8db5SAlex Xie 	/*
397e60f8db5SAlex Xie 	 * PTE format on VEGA 10:
398e60f8db5SAlex Xie 	 * 63:59 reserved
399e60f8db5SAlex Xie 	 * 58:57 mtype
400e60f8db5SAlex Xie 	 * 56 F
401e60f8db5SAlex Xie 	 * 55 L
402e60f8db5SAlex Xie 	 * 54 P
403e60f8db5SAlex Xie 	 * 53 SW
404e60f8db5SAlex Xie 	 * 52 T
405e60f8db5SAlex Xie 	 * 50:48 reserved
406e60f8db5SAlex Xie 	 * 47:12 4k physical page base address
407e60f8db5SAlex Xie 	 * 11:7 fragment
408e60f8db5SAlex Xie 	 * 6 write
409e60f8db5SAlex Xie 	 * 5 read
410e60f8db5SAlex Xie 	 * 4 exe
411e60f8db5SAlex Xie 	 * 3 Z
412e60f8db5SAlex Xie 	 * 2 snooped
413e60f8db5SAlex Xie 	 * 1 system
414e60f8db5SAlex Xie 	 * 0 valid
415e60f8db5SAlex Xie 	 *
416e60f8db5SAlex Xie 	 * PDE format on VEGA 10:
417e60f8db5SAlex Xie 	 * 63:59 block fragment size
418e60f8db5SAlex Xie 	 * 58:55 reserved
419e60f8db5SAlex Xie 	 * 54 P
420e60f8db5SAlex Xie 	 * 53:48 reserved
421e60f8db5SAlex Xie 	 * 47:6 physical base address of PD or PTE
422e60f8db5SAlex Xie 	 * 5:3 reserved
423e60f8db5SAlex Xie 	 * 2 C
424e60f8db5SAlex Xie 	 * 1 system
425e60f8db5SAlex Xie 	 * 0 valid
426e60f8db5SAlex Xie 	 */
427e60f8db5SAlex Xie 
428e60f8db5SAlex Xie 	/*
429e60f8db5SAlex Xie 	 * The following is for PTE only. GART does not have PDEs.
430e60f8db5SAlex Xie 	*/
431e60f8db5SAlex Xie 	value = addr & 0x0000FFFFFFFFF000ULL;
432e60f8db5SAlex Xie 	value |= flags;
433e60f8db5SAlex Xie 	writeq(value, ptr + (gpu_page_idx * 8));
434e60f8db5SAlex Xie 	return 0;
435e60f8db5SAlex Xie }
436e60f8db5SAlex Xie 
437e60f8db5SAlex Xie static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
438e60f8db5SAlex Xie 						uint32_t flags)
439e60f8db5SAlex Xie 
440e60f8db5SAlex Xie {
441e60f8db5SAlex Xie 	uint64_t pte_flag = 0;
442e60f8db5SAlex Xie 
443e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
444e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_EXECUTABLE;
445e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_READABLE)
446e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_READABLE;
447e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_WRITEABLE)
448e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_WRITEABLE;
449e60f8db5SAlex Xie 
450e60f8db5SAlex Xie 	switch (flags & AMDGPU_VM_MTYPE_MASK) {
451e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_DEFAULT:
452e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
453e60f8db5SAlex Xie 		break;
454e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_NC:
455e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
456e60f8db5SAlex Xie 		break;
457e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_WC:
458e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_WC);
459e60f8db5SAlex Xie 		break;
460e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_CC:
461e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_CC);
462e60f8db5SAlex Xie 		break;
463e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_UC:
464e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_UC);
465e60f8db5SAlex Xie 		break;
466e60f8db5SAlex Xie 	default:
467e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_MTYPE(MTYPE_NC);
468e60f8db5SAlex Xie 		break;
469e60f8db5SAlex Xie 	}
470e60f8db5SAlex Xie 
471e60f8db5SAlex Xie 	if (flags & AMDGPU_VM_PAGE_PRT)
472e60f8db5SAlex Xie 		pte_flag |= AMDGPU_PTE_PRT;
473e60f8db5SAlex Xie 
474e60f8db5SAlex Xie 	return pte_flag;
475e60f8db5SAlex Xie }
476e60f8db5SAlex Xie 
477b1166325SChristian König static u64 gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, u64 addr)
478f75e237cSChristian König {
479b1166325SChristian König 	addr = adev->vm_manager.vram_base_offset + addr - adev->mc.vram_start;
480b1166325SChristian König 	BUG_ON(addr & 0xFFFF00000000003FULL);
481b1166325SChristian König 	return addr;
482f75e237cSChristian König }
483f75e237cSChristian König 
484e60f8db5SAlex Xie static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
485e60f8db5SAlex Xie 	.flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
486e60f8db5SAlex Xie 	.set_pte_pde = gmc_v9_0_gart_set_pte_pde,
48703f89febSChristian König 	.get_invalidate_req = gmc_v9_0_get_invalidate_req,
488b1166325SChristian König 	.get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
489b1166325SChristian König 	.get_vm_pde = gmc_v9_0_get_vm_pde
490e60f8db5SAlex Xie };
491e60f8db5SAlex Xie 
492e60f8db5SAlex Xie static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
493e60f8db5SAlex Xie {
494e60f8db5SAlex Xie 	if (adev->gart.gart_funcs == NULL)
495e60f8db5SAlex Xie 		adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
496e60f8db5SAlex Xie }
497e60f8db5SAlex Xie 
498e60f8db5SAlex Xie static int gmc_v9_0_early_init(void *handle)
499e60f8db5SAlex Xie {
500e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
501e60f8db5SAlex Xie 
502e60f8db5SAlex Xie 	gmc_v9_0_set_gart_funcs(adev);
503e60f8db5SAlex Xie 	gmc_v9_0_set_irq_funcs(adev);
504e60f8db5SAlex Xie 
505e60f8db5SAlex Xie 	return 0;
506e60f8db5SAlex Xie }
507e60f8db5SAlex Xie 
50802bab923SDavid Panariti static int gmc_v9_0_ecc_available(struct amdgpu_device *adev)
50902bab923SDavid Panariti {
51002bab923SDavid Panariti 	uint32_t reg_val;
51102bab923SDavid Panariti 	uint32_t reg_addr;
51202bab923SDavid Panariti 	uint32_t field_val;
51302bab923SDavid Panariti 	size_t i;
51402bab923SDavid Panariti 	uint32_t fv2;
51502bab923SDavid Panariti 	size_t lost_sheep;
51602bab923SDavid Panariti 
51702bab923SDavid Panariti 	DRM_DEBUG("ecc: gmc_v9_0_ecc_available()\n");
51802bab923SDavid Panariti 
51902bab923SDavid Panariti 	lost_sheep = 0;
52002bab923SDavid Panariti 	for (i = 0; i < ARRAY_SIZE(ecc_umclocalcap_addrs); ++i) {
52102bab923SDavid Panariti 		reg_addr = ecc_umclocalcap_addrs[i];
52202bab923SDavid Panariti 		DRM_DEBUG("ecc: "
52302bab923SDavid Panariti 			  "UMCCH_UmcLocalCap[%zu]: reg_addr: 0x%08x\n",
52402bab923SDavid Panariti 			  i, reg_addr);
52502bab923SDavid Panariti 		reg_val = RREG32(reg_addr);
52602bab923SDavid Panariti 		field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UmcLocalCap,
52702bab923SDavid Panariti 					  EccDis);
52802bab923SDavid Panariti 		DRM_DEBUG("ecc: "
52902bab923SDavid Panariti 			  "reg_val: 0x%08x, "
53002bab923SDavid Panariti 			  "EccDis: 0x%08x, ",
53102bab923SDavid Panariti 			  reg_val, field_val);
53202bab923SDavid Panariti 		if (field_val) {
53302bab923SDavid Panariti 			DRM_ERROR("ecc: UmcLocalCap:EccDis is set.\n");
53402bab923SDavid Panariti 			++lost_sheep;
53502bab923SDavid Panariti 		}
53602bab923SDavid Panariti 	}
53702bab923SDavid Panariti 
53802bab923SDavid Panariti 	for (i = 0; i < ARRAY_SIZE(ecc_umcch_umc_config_addrs); ++i) {
53902bab923SDavid Panariti 		reg_addr = ecc_umcch_umc_config_addrs[i];
54002bab923SDavid Panariti 		DRM_DEBUG("ecc: "
54102bab923SDavid Panariti 			  "UMCCH0_0_UMC_CONFIG[%zu]: reg_addr: 0x%08x",
54202bab923SDavid Panariti 			  i, reg_addr);
54302bab923SDavid Panariti 		reg_val = RREG32(reg_addr);
54402bab923SDavid Panariti 		field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UMC_CONFIG,
54502bab923SDavid Panariti 					  DramReady);
54602bab923SDavid Panariti 		DRM_DEBUG("ecc: "
54702bab923SDavid Panariti 			  "reg_val: 0x%08x, "
54802bab923SDavid Panariti 			  "DramReady: 0x%08x\n",
54902bab923SDavid Panariti 			  reg_val, field_val);
55002bab923SDavid Panariti 
55102bab923SDavid Panariti 		if (!field_val) {
55202bab923SDavid Panariti 			DRM_ERROR("ecc: UMC_CONFIG:DramReady is not set.\n");
55302bab923SDavid Panariti 			++lost_sheep;
55402bab923SDavid Panariti 		}
55502bab923SDavid Panariti 	}
55602bab923SDavid Panariti 
55702bab923SDavid Panariti 	for (i = 0; i < ARRAY_SIZE(ecc_umcch_eccctrl_addrs); ++i) {
55802bab923SDavid Panariti 		reg_addr = ecc_umcch_eccctrl_addrs[i];
55902bab923SDavid Panariti 		DRM_DEBUG("ecc: "
56002bab923SDavid Panariti 			  "UMCCH_EccCtrl[%zu]: reg_addr: 0x%08x, ",
56102bab923SDavid Panariti 			  i, reg_addr);
56202bab923SDavid Panariti 		reg_val = RREG32(reg_addr);
56302bab923SDavid Panariti 		field_val = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
56402bab923SDavid Panariti 					  WrEccEn);
56502bab923SDavid Panariti 		fv2 = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
56602bab923SDavid Panariti 				    RdEccEn);
56702bab923SDavid Panariti 		DRM_DEBUG("ecc: "
56802bab923SDavid Panariti 			  "reg_val: 0x%08x, "
56902bab923SDavid Panariti 			  "WrEccEn: 0x%08x, "
57002bab923SDavid Panariti 			  "RdEccEn: 0x%08x\n",
57102bab923SDavid Panariti 			  reg_val, field_val, fv2);
57202bab923SDavid Panariti 
57302bab923SDavid Panariti 		if (!field_val) {
5745a16008fSAlex Deucher 			DRM_DEBUG("ecc: WrEccEn is not set\n");
57502bab923SDavid Panariti 			++lost_sheep;
57602bab923SDavid Panariti 		}
57702bab923SDavid Panariti 		if (!fv2) {
5785a16008fSAlex Deucher 			DRM_DEBUG("ecc: RdEccEn is not set\n");
57902bab923SDavid Panariti 			++lost_sheep;
58002bab923SDavid Panariti 		}
58102bab923SDavid Panariti 	}
58202bab923SDavid Panariti 
58302bab923SDavid Panariti 	DRM_DEBUG("ecc: lost_sheep: %zu\n", lost_sheep);
58402bab923SDavid Panariti 	return lost_sheep == 0;
58502bab923SDavid Panariti }
58602bab923SDavid Panariti 
587e60f8db5SAlex Xie static int gmc_v9_0_late_init(void *handle)
588e60f8db5SAlex Xie {
589e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
590c5066129Sozeng 	/*
591c5066129Sozeng 	 * The latest engine allocation on gfx9 is:
592c5066129Sozeng 	 * Engine 0, 1: idle
593c5066129Sozeng 	 * Engine 2, 3: firmware
594c5066129Sozeng 	 * Engine 4~13: amdgpu ring, subject to change when ring number changes
595c5066129Sozeng 	 * Engine 14~15: idle
596c5066129Sozeng 	 * Engine 16: kfd tlb invalidation
597c5066129Sozeng 	 * Engine 17: Gart flushes
598c5066129Sozeng 	 */
599c5066129Sozeng 	unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
6004789c463SChristian König 	unsigned i;
60102bab923SDavid Panariti 	int r;
6024789c463SChristian König 
6034789c463SChristian König 	for(i = 0; i < adev->num_rings; ++i) {
6044789c463SChristian König 		struct amdgpu_ring *ring = adev->rings[i];
6054789c463SChristian König 		unsigned vmhub = ring->funcs->vmhub;
6064789c463SChristian König 
6074789c463SChristian König 		ring->vm_inv_eng = vm_inv_eng[vmhub]++;
608775f55f1STom St Denis 		dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n",
609775f55f1STom St Denis 			 ring->idx, ring->name, ring->vm_inv_eng,
610775f55f1STom St Denis 			 ring->funcs->vmhub);
6114789c463SChristian König 	}
6124789c463SChristian König 
613c5066129Sozeng 	/* Engine 16 is used for KFD and 17 for GART flushes */
6144789c463SChristian König 	for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
615c5066129Sozeng 		BUG_ON(vm_inv_eng[i] > 16);
6164789c463SChristian König 
61702bab923SDavid Panariti 	r = gmc_v9_0_ecc_available(adev);
61802bab923SDavid Panariti 	if (r == 1) {
61902bab923SDavid Panariti 		DRM_INFO("ECC is active.\n");
62002bab923SDavid Panariti 	} else if (r == 0) {
62102bab923SDavid Panariti 		DRM_INFO("ECC is not present.\n");
62202bab923SDavid Panariti 	} else {
62302bab923SDavid Panariti 		DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r);
62402bab923SDavid Panariti 		return r;
62502bab923SDavid Panariti 	}
62602bab923SDavid Panariti 
627e60f8db5SAlex Xie 	return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
628e60f8db5SAlex Xie }
629e60f8db5SAlex Xie 
630e60f8db5SAlex Xie static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
631e60f8db5SAlex Xie 					struct amdgpu_mc *mc)
632e60f8db5SAlex Xie {
633eeb2487dSMonk Liu 	u64 base = 0;
634eeb2487dSMonk Liu 	if (!amdgpu_sriov_vf(adev))
635eeb2487dSMonk Liu 		base = mmhub_v1_0_get_fb_location(adev);
636e60f8db5SAlex Xie 	amdgpu_vram_location(adev, &adev->mc, base);
6376f02a696SChristian König 	amdgpu_gart_location(adev, mc);
638bc099ee9SChunming Zhou 	/* base offset of vram pages */
639bc099ee9SChunming Zhou 	if (adev->flags & AMD_IS_APU)
640bc099ee9SChunming Zhou 		adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
641bc099ee9SChunming Zhou 	else
642bc099ee9SChunming Zhou 		adev->vm_manager.vram_base_offset = 0;
643e60f8db5SAlex Xie }
644e60f8db5SAlex Xie 
645e60f8db5SAlex Xie /**
646e60f8db5SAlex Xie  * gmc_v9_0_mc_init - initialize the memory controller driver params
647e60f8db5SAlex Xie  *
648e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
649e60f8db5SAlex Xie  *
650e60f8db5SAlex Xie  * Look up the amount of vram, vram width, and decide how to place
651e60f8db5SAlex Xie  * vram and gart within the GPU's physical address space.
652e60f8db5SAlex Xie  * Returns 0 for success.
653e60f8db5SAlex Xie  */
654e60f8db5SAlex Xie static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
655e60f8db5SAlex Xie {
656e60f8db5SAlex Xie 	u32 tmp;
657e60f8db5SAlex Xie 	int chansize, numchan;
658d6895ad3SChristian König 	int r;
659e60f8db5SAlex Xie 
6608d6a5230SAlex Deucher 	adev->mc.vram_width = amdgpu_atomfirmware_get_vram_width(adev);
6618d6a5230SAlex Deucher 	if (!adev->mc.vram_width) {
662e60f8db5SAlex Xie 		/* hbm memory channel size */
663e60f8db5SAlex Xie 		chansize = 128;
664e60f8db5SAlex Xie 
665b9509c80SHuang Rui 		tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0);
666e60f8db5SAlex Xie 		tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK;
667e60f8db5SAlex Xie 		tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
668e60f8db5SAlex Xie 		switch (tmp) {
669e60f8db5SAlex Xie 		case 0:
670e60f8db5SAlex Xie 		default:
671e60f8db5SAlex Xie 			numchan = 1;
672e60f8db5SAlex Xie 			break;
673e60f8db5SAlex Xie 		case 1:
674e60f8db5SAlex Xie 			numchan = 2;
675e60f8db5SAlex Xie 			break;
676e60f8db5SAlex Xie 		case 2:
677e60f8db5SAlex Xie 			numchan = 0;
678e60f8db5SAlex Xie 			break;
679e60f8db5SAlex Xie 		case 3:
680e60f8db5SAlex Xie 			numchan = 4;
681e60f8db5SAlex Xie 			break;
682e60f8db5SAlex Xie 		case 4:
683e60f8db5SAlex Xie 			numchan = 0;
684e60f8db5SAlex Xie 			break;
685e60f8db5SAlex Xie 		case 5:
686e60f8db5SAlex Xie 			numchan = 8;
687e60f8db5SAlex Xie 			break;
688e60f8db5SAlex Xie 		case 6:
689e60f8db5SAlex Xie 			numchan = 0;
690e60f8db5SAlex Xie 			break;
691e60f8db5SAlex Xie 		case 7:
692e60f8db5SAlex Xie 			numchan = 16;
693e60f8db5SAlex Xie 			break;
694e60f8db5SAlex Xie 		case 8:
695e60f8db5SAlex Xie 			numchan = 2;
696e60f8db5SAlex Xie 			break;
697e60f8db5SAlex Xie 		}
698e60f8db5SAlex Xie 		adev->mc.vram_width = numchan * chansize;
6998d6a5230SAlex Deucher 	}
700e60f8db5SAlex Xie 
701e60f8db5SAlex Xie 	/* size in MB on si */
702e60f8db5SAlex Xie 	adev->mc.mc_vram_size =
703aecbe64fSChunming Zhou 		((adev->flags & AMD_IS_APU) ? nbio_v7_0_get_memsize(adev) :
704aecbe64fSChunming Zhou 		 nbio_v6_1_get_memsize(adev)) * 1024ULL * 1024ULL;
705e60f8db5SAlex Xie 	adev->mc.real_vram_size = adev->mc.mc_vram_size;
706d6895ad3SChristian König 
707d6895ad3SChristian König 	if (!(adev->flags & AMD_IS_APU)) {
708d6895ad3SChristian König 		r = amdgpu_device_resize_fb_bar(adev);
709d6895ad3SChristian König 		if (r)
710d6895ad3SChristian König 			return r;
711d6895ad3SChristian König 	}
712d6895ad3SChristian König 	adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
713d6895ad3SChristian König 	adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
714e60f8db5SAlex Xie 
715e60f8db5SAlex Xie 	/* In case the PCI BAR is larger than the actual amount of vram */
716d6895ad3SChristian König 	adev->mc.visible_vram_size = adev->mc.aper_size;
717e60f8db5SAlex Xie 	if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
718e60f8db5SAlex Xie 		adev->mc.visible_vram_size = adev->mc.real_vram_size;
719e60f8db5SAlex Xie 
720c3db7b5aSAlex Deucher 	/* set the gart size */
721c3db7b5aSAlex Deucher 	if (amdgpu_gart_size == -1) {
722c3db7b5aSAlex Deucher 		switch (adev->asic_type) {
723c3db7b5aSAlex Deucher 		case CHIP_VEGA10:  /* all engines support GPUVM */
724c3db7b5aSAlex Deucher 		default:
725c3db7b5aSAlex Deucher 			adev->mc.gart_size = 256ULL << 20;
726c3db7b5aSAlex Deucher 			break;
727c3db7b5aSAlex Deucher 		case CHIP_RAVEN:   /* DCE SG support */
728c3db7b5aSAlex Deucher 			adev->mc.gart_size = 1024ULL << 20;
729c3db7b5aSAlex Deucher 			break;
730c3db7b5aSAlex Deucher 		}
731c3db7b5aSAlex Deucher 	} else {
732c3db7b5aSAlex Deucher 		adev->mc.gart_size = (u64)amdgpu_gart_size << 20;
733c3db7b5aSAlex Deucher 	}
734c3db7b5aSAlex Deucher 
735e60f8db5SAlex Xie 	gmc_v9_0_vram_gtt_location(adev, &adev->mc);
736e60f8db5SAlex Xie 
737e60f8db5SAlex Xie 	return 0;
738e60f8db5SAlex Xie }
739e60f8db5SAlex Xie 
740e60f8db5SAlex Xie static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
741e60f8db5SAlex Xie {
742e60f8db5SAlex Xie 	int r;
743e60f8db5SAlex Xie 
744e60f8db5SAlex Xie 	if (adev->gart.robj) {
745e60f8db5SAlex Xie 		WARN(1, "VEGA10 PCIE GART already initialized\n");
746e60f8db5SAlex Xie 		return 0;
747e60f8db5SAlex Xie 	}
748e60f8db5SAlex Xie 	/* Initialize common gart structure */
749e60f8db5SAlex Xie 	r = amdgpu_gart_init(adev);
750e60f8db5SAlex Xie 	if (r)
751e60f8db5SAlex Xie 		return r;
752e60f8db5SAlex Xie 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
753e60f8db5SAlex Xie 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE(MTYPE_UC) |
754e60f8db5SAlex Xie 				 AMDGPU_PTE_EXECUTABLE;
755e60f8db5SAlex Xie 	return amdgpu_gart_table_vram_alloc(adev);
756e60f8db5SAlex Xie }
757e60f8db5SAlex Xie 
758e60f8db5SAlex Xie static int gmc_v9_0_sw_init(void *handle)
759e60f8db5SAlex Xie {
760e60f8db5SAlex Xie 	int r;
761e60f8db5SAlex Xie 	int dma_bits;
762e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
763e60f8db5SAlex Xie 
7640c8c0847SHuang Rui 	gfxhub_v1_0_init(adev);
76577f6c763SHuang Rui 	mmhub_v1_0_init(adev);
7660c8c0847SHuang Rui 
767e60f8db5SAlex Xie 	spin_lock_init(&adev->mc.invalidate_lock);
768e60f8db5SAlex Xie 
769fd66560bSHawking Zhang 	switch (adev->asic_type) {
770fd66560bSHawking Zhang 	case CHIP_RAVEN:
771e60f8db5SAlex Xie 		adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
772fd66560bSHawking Zhang 		if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
773fdd5faaaSChristian König 			adev->vm_manager.max_pfn = 1ULL << 36;
774fd66560bSHawking Zhang 			adev->vm_manager.block_size = 9;
775fd66560bSHawking Zhang 			adev->vm_manager.num_level = 3;
776d07f14beSRoger He 			amdgpu_vm_set_fragment_size(adev, 9);
777e60f8db5SAlex Xie 		} else {
778fd66560bSHawking Zhang 			/* vm_size is 64GB for legacy 2-level page support */
779d07f14beSRoger He 			amdgpu_vm_adjust_size(adev, 64, 9);
780fd66560bSHawking Zhang 			adev->vm_manager.num_level = 1;
781fd66560bSHawking Zhang 		}
782fd66560bSHawking Zhang 		break;
783fd66560bSHawking Zhang 	case CHIP_VEGA10:
784e60f8db5SAlex Xie 		/* XXX Don't know how to get VRAM type yet. */
785e60f8db5SAlex Xie 		adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM;
78636b32a68SZhang, Jerry 		/*
78736b32a68SZhang, Jerry 		 * To fulfill 4-level page support,
78836b32a68SZhang, Jerry 		 * vm size is 256TB (48bit), maximum size of Vega10,
78936b32a68SZhang, Jerry 		 * block size 512 (9bit)
79036b32a68SZhang, Jerry 		 */
791fdd5faaaSChristian König 		adev->vm_manager.max_pfn = 1ULL << 36;
79236b32a68SZhang, Jerry 		adev->vm_manager.block_size = 9;
793fd66560bSHawking Zhang 		adev->vm_manager.num_level = 3;
794d07f14beSRoger He 		amdgpu_vm_set_fragment_size(adev, 9);
795fd66560bSHawking Zhang 		break;
796fd66560bSHawking Zhang 	default:
797fd66560bSHawking Zhang 		break;
798fd66560bSHawking Zhang 	}
799fd66560bSHawking Zhang 
800e618d306SRoger He 	DRM_INFO("vm size is %llu GB, block size is %u-bit, fragment size is %u-bit\n",
801fdd5faaaSChristian König 		 adev->vm_manager.max_pfn >> 18, adev->vm_manager.block_size,
802e618d306SRoger He 		 adev->vm_manager.fragment_size);
803e60f8db5SAlex Xie 
804e60f8db5SAlex Xie 	/* This interrupt is VMC page fault.*/
805e60f8db5SAlex Xie 	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_VMC, 0,
806e60f8db5SAlex Xie 				&adev->mc.vm_fault);
807d7c434d3SFelix Kuehling 	r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_UTCL2, 0,
808d7c434d3SFelix Kuehling 				&adev->mc.vm_fault);
809e60f8db5SAlex Xie 
810e60f8db5SAlex Xie 	if (r)
811e60f8db5SAlex Xie 		return r;
812e60f8db5SAlex Xie 
813e60f8db5SAlex Xie 	/* Set the internal MC address mask
814e60f8db5SAlex Xie 	 * This is the max address of the GPU's
815e60f8db5SAlex Xie 	 * internal address space.
816e60f8db5SAlex Xie 	 */
817e60f8db5SAlex Xie 	adev->mc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
818e60f8db5SAlex Xie 
819916910adSHuang Rui 	/*
820916910adSHuang Rui 	 * It needs to reserve 8M stolen memory for vega10
821916910adSHuang Rui 	 * TODO: Figure out how to avoid that...
822916910adSHuang Rui 	 */
823916910adSHuang Rui 	adev->mc.stolen_size = 8 * 1024 * 1024;
824916910adSHuang Rui 
825e60f8db5SAlex Xie 	/* set DMA mask + need_dma32 flags.
826e60f8db5SAlex Xie 	 * PCIE - can handle 44-bits.
827e60f8db5SAlex Xie 	 * IGP - can handle 44-bits
828e60f8db5SAlex Xie 	 * PCI - dma32 for legacy pci gart, 44 bits on vega10
829e60f8db5SAlex Xie 	 */
830e60f8db5SAlex Xie 	adev->need_dma32 = false;
831e60f8db5SAlex Xie 	dma_bits = adev->need_dma32 ? 32 : 44;
832e60f8db5SAlex Xie 	r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
833e60f8db5SAlex Xie 	if (r) {
834e60f8db5SAlex Xie 		adev->need_dma32 = true;
835e60f8db5SAlex Xie 		dma_bits = 32;
836e60f8db5SAlex Xie 		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
837e60f8db5SAlex Xie 	}
838e60f8db5SAlex Xie 	r = pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits));
839e60f8db5SAlex Xie 	if (r) {
840e60f8db5SAlex Xie 		pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
841e60f8db5SAlex Xie 		printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
842e60f8db5SAlex Xie 	}
843e60f8db5SAlex Xie 
844e60f8db5SAlex Xie 	r = gmc_v9_0_mc_init(adev);
845e60f8db5SAlex Xie 	if (r)
846e60f8db5SAlex Xie 		return r;
847e60f8db5SAlex Xie 
848e60f8db5SAlex Xie 	/* Memory manager */
849e60f8db5SAlex Xie 	r = amdgpu_bo_init(adev);
850e60f8db5SAlex Xie 	if (r)
851e60f8db5SAlex Xie 		return r;
852e60f8db5SAlex Xie 
853e60f8db5SAlex Xie 	r = gmc_v9_0_gart_init(adev);
854e60f8db5SAlex Xie 	if (r)
855e60f8db5SAlex Xie 		return r;
856e60f8db5SAlex Xie 
85705ec3edaSChristian König 	/*
85805ec3edaSChristian König 	 * number of VMs
85905ec3edaSChristian König 	 * VMID 0 is reserved for System
86005ec3edaSChristian König 	 * amdgpu graphics/compute will use VMIDs 1-7
86105ec3edaSChristian König 	 * amdkfd will use VMIDs 8-15
86205ec3edaSChristian König 	 */
86305ec3edaSChristian König 	adev->vm_manager.id_mgr[AMDGPU_GFXHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
86405ec3edaSChristian König 	adev->vm_manager.id_mgr[AMDGPU_MMHUB].num_ids = AMDGPU_NUM_OF_VMIDS;
86505ec3edaSChristian König 
86605ec3edaSChristian König 	amdgpu_vm_manager_init(adev);
86705ec3edaSChristian König 
86805ec3edaSChristian König 	return 0;
869e60f8db5SAlex Xie }
870e60f8db5SAlex Xie 
871e60f8db5SAlex Xie /**
872c79ee7d8SMonk Liu  * gmc_v9_0_gart_fini - vm fini callback
873e60f8db5SAlex Xie  *
874e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
875e60f8db5SAlex Xie  *
876e60f8db5SAlex Xie  * Tears down the driver GART/VM setup (CIK).
877e60f8db5SAlex Xie  */
878e60f8db5SAlex Xie static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
879e60f8db5SAlex Xie {
880e60f8db5SAlex Xie 	amdgpu_gart_table_vram_free(adev);
881e60f8db5SAlex Xie 	amdgpu_gart_fini(adev);
882e60f8db5SAlex Xie }
883e60f8db5SAlex Xie 
884e60f8db5SAlex Xie static int gmc_v9_0_sw_fini(void *handle)
885e60f8db5SAlex Xie {
886e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
887e60f8db5SAlex Xie 
888f59548c8SMonk Liu 	amdgpu_gem_force_release(adev);
889e60f8db5SAlex Xie 	amdgpu_vm_manager_fini(adev);
890e60f8db5SAlex Xie 	gmc_v9_0_gart_fini(adev);
891e60f8db5SAlex Xie 	amdgpu_bo_fini(adev);
892e60f8db5SAlex Xie 
893e60f8db5SAlex Xie 	return 0;
894e60f8db5SAlex Xie }
895e60f8db5SAlex Xie 
896e60f8db5SAlex Xie static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
897e60f8db5SAlex Xie {
898e60f8db5SAlex Xie 	switch (adev->asic_type) {
899e60f8db5SAlex Xie 	case CHIP_VEGA10:
9005c583018SEvan Quan 		amdgpu_program_register_sequence(adev,
9015c583018SEvan Quan 						golden_settings_mmhub_1_0_0,
902c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_mmhub_1_0_0));
9035c583018SEvan Quan 		amdgpu_program_register_sequence(adev,
9045c583018SEvan Quan 						golden_settings_athub_1_0_0,
905c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
906e60f8db5SAlex Xie 		break;
907e4f3abaaSChunming Zhou 	case CHIP_RAVEN:
9085c583018SEvan Quan 		amdgpu_program_register_sequence(adev,
9095c583018SEvan Quan 						golden_settings_athub_1_0_0,
910c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
911e4f3abaaSChunming Zhou 		break;
912e60f8db5SAlex Xie 	default:
913e60f8db5SAlex Xie 		break;
914e60f8db5SAlex Xie 	}
915e60f8db5SAlex Xie }
916e60f8db5SAlex Xie 
917e60f8db5SAlex Xie /**
918e60f8db5SAlex Xie  * gmc_v9_0_gart_enable - gart enable
919e60f8db5SAlex Xie  *
920e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
921e60f8db5SAlex Xie  */
922e60f8db5SAlex Xie static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
923e60f8db5SAlex Xie {
924e60f8db5SAlex Xie 	int r;
925e60f8db5SAlex Xie 	bool value;
926e60f8db5SAlex Xie 	u32 tmp;
927e60f8db5SAlex Xie 
928e60f8db5SAlex Xie 	amdgpu_program_register_sequence(adev,
929e60f8db5SAlex Xie 		golden_settings_vega10_hdp,
930c47b41a7SChristian König 		ARRAY_SIZE(golden_settings_vega10_hdp));
931e60f8db5SAlex Xie 
932e60f8db5SAlex Xie 	if (adev->gart.robj == NULL) {
933e60f8db5SAlex Xie 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
934e60f8db5SAlex Xie 		return -EINVAL;
935e60f8db5SAlex Xie 	}
936ce1b1b66SMonk Liu 	r = amdgpu_gart_table_vram_pin(adev);
937ce1b1b66SMonk Liu 	if (r)
938ce1b1b66SMonk Liu 		return r;
939e60f8db5SAlex Xie 
9402fcd43ceSHawking Zhang 	switch (adev->asic_type) {
9412fcd43ceSHawking Zhang 	case CHIP_RAVEN:
9422fcd43ceSHawking Zhang 		mmhub_v1_0_initialize_power_gating(adev);
943f8386b35SHawking Zhang 		mmhub_v1_0_update_power_gating(adev, true);
9442fcd43ceSHawking Zhang 		break;
9452fcd43ceSHawking Zhang 	default:
9462fcd43ceSHawking Zhang 		break;
9472fcd43ceSHawking Zhang 	}
9482fcd43ceSHawking Zhang 
949e60f8db5SAlex Xie 	r = gfxhub_v1_0_gart_enable(adev);
950e60f8db5SAlex Xie 	if (r)
951e60f8db5SAlex Xie 		return r;
952e60f8db5SAlex Xie 
953e60f8db5SAlex Xie 	r = mmhub_v1_0_gart_enable(adev);
954e60f8db5SAlex Xie 	if (r)
955e60f8db5SAlex Xie 		return r;
956e60f8db5SAlex Xie 
957846347c9STom St Denis 	WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
958e60f8db5SAlex Xie 
959b9509c80SHuang Rui 	tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
960b9509c80SHuang Rui 	WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
961e60f8db5SAlex Xie 
9621d4e0a8cSMonk Liu 	/* After HDP is initialized, flush HDP.*/
9631d4e0a8cSMonk Liu 	if (adev->flags & AMD_IS_APU)
9641d4e0a8cSMonk Liu 		nbio_v7_0_hdp_flush(adev);
9651d4e0a8cSMonk Liu 	else
9661d4e0a8cSMonk Liu 		nbio_v6_1_hdp_flush(adev);
9671d4e0a8cSMonk Liu 
968e60f8db5SAlex Xie 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
969e60f8db5SAlex Xie 		value = false;
970e60f8db5SAlex Xie 	else
971e60f8db5SAlex Xie 		value = true;
972e60f8db5SAlex Xie 
973e60f8db5SAlex Xie 	gfxhub_v1_0_set_fault_enable_default(adev, value);
974e60f8db5SAlex Xie 	mmhub_v1_0_set_fault_enable_default(adev, value);
975e60f8db5SAlex Xie 	gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
976e60f8db5SAlex Xie 
977e60f8db5SAlex Xie 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
9786f02a696SChristian König 		 (unsigned)(adev->mc.gart_size >> 20),
979e60f8db5SAlex Xie 		 (unsigned long long)adev->gart.table_addr);
980e60f8db5SAlex Xie 	adev->gart.ready = true;
981e60f8db5SAlex Xie 	return 0;
982e60f8db5SAlex Xie }
983e60f8db5SAlex Xie 
984e60f8db5SAlex Xie static int gmc_v9_0_hw_init(void *handle)
985e60f8db5SAlex Xie {
986e60f8db5SAlex Xie 	int r;
987e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
988e60f8db5SAlex Xie 
989e60f8db5SAlex Xie 	/* The sequence of these two function calls matters.*/
990e60f8db5SAlex Xie 	gmc_v9_0_init_golden_registers(adev);
991e60f8db5SAlex Xie 
992edca2d05SAlex Deucher 	if (adev->mode_info.num_crtc) {
993edca2d05SAlex Deucher 		/* Lockout access through VGA aperture*/
9944d9c333aSTom St Denis 		WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
995edca2d05SAlex Deucher 
996edca2d05SAlex Deucher 		/* disable VGA render */
9974d9c333aSTom St Denis 		WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
998edca2d05SAlex Deucher 	}
999edca2d05SAlex Deucher 
1000e60f8db5SAlex Xie 	r = gmc_v9_0_gart_enable(adev);
1001e60f8db5SAlex Xie 
1002e60f8db5SAlex Xie 	return r;
1003e60f8db5SAlex Xie }
1004e60f8db5SAlex Xie 
1005e60f8db5SAlex Xie /**
1006e60f8db5SAlex Xie  * gmc_v9_0_gart_disable - gart disable
1007e60f8db5SAlex Xie  *
1008e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
1009e60f8db5SAlex Xie  *
1010e60f8db5SAlex Xie  * This disables all VM page table.
1011e60f8db5SAlex Xie  */
1012e60f8db5SAlex Xie static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1013e60f8db5SAlex Xie {
1014e60f8db5SAlex Xie 	gfxhub_v1_0_gart_disable(adev);
1015e60f8db5SAlex Xie 	mmhub_v1_0_gart_disable(adev);
1016ce1b1b66SMonk Liu 	amdgpu_gart_table_vram_unpin(adev);
1017e60f8db5SAlex Xie }
1018e60f8db5SAlex Xie 
1019e60f8db5SAlex Xie static int gmc_v9_0_hw_fini(void *handle)
1020e60f8db5SAlex Xie {
1021e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1022e60f8db5SAlex Xie 
10235dd696aeSTrigger Huang 	if (amdgpu_sriov_vf(adev)) {
10245dd696aeSTrigger Huang 		/* full access mode, so don't touch any GMC register */
10255dd696aeSTrigger Huang 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
10265dd696aeSTrigger Huang 		return 0;
10275dd696aeSTrigger Huang 	}
10285dd696aeSTrigger Huang 
1029e60f8db5SAlex Xie 	amdgpu_irq_put(adev, &adev->mc.vm_fault, 0);
1030e60f8db5SAlex Xie 	gmc_v9_0_gart_disable(adev);
1031e60f8db5SAlex Xie 
1032e60f8db5SAlex Xie 	return 0;
1033e60f8db5SAlex Xie }
1034e60f8db5SAlex Xie 
1035e60f8db5SAlex Xie static int gmc_v9_0_suspend(void *handle)
1036e60f8db5SAlex Xie {
1037e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1038e60f8db5SAlex Xie 
1039f053cd47STom St Denis 	return gmc_v9_0_hw_fini(adev);
1040e60f8db5SAlex Xie }
1041e60f8db5SAlex Xie 
1042e60f8db5SAlex Xie static int gmc_v9_0_resume(void *handle)
1043e60f8db5SAlex Xie {
1044e60f8db5SAlex Xie 	int r;
1045e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1046e60f8db5SAlex Xie 
1047e60f8db5SAlex Xie 	r = gmc_v9_0_hw_init(adev);
1048e60f8db5SAlex Xie 	if (r)
1049e60f8db5SAlex Xie 		return r;
1050e60f8db5SAlex Xie 
105132601d48SChristian König 	amdgpu_vm_reset_all_ids(adev);
1052e60f8db5SAlex Xie 
105332601d48SChristian König 	return 0;
1054e60f8db5SAlex Xie }
1055e60f8db5SAlex Xie 
1056e60f8db5SAlex Xie static bool gmc_v9_0_is_idle(void *handle)
1057e60f8db5SAlex Xie {
1058e60f8db5SAlex Xie 	/* MC is always ready in GMC v9.*/
1059e60f8db5SAlex Xie 	return true;
1060e60f8db5SAlex Xie }
1061e60f8db5SAlex Xie 
1062e60f8db5SAlex Xie static int gmc_v9_0_wait_for_idle(void *handle)
1063e60f8db5SAlex Xie {
1064e60f8db5SAlex Xie 	/* There is no need to wait for MC idle in GMC v9.*/
1065e60f8db5SAlex Xie 	return 0;
1066e60f8db5SAlex Xie }
1067e60f8db5SAlex Xie 
1068e60f8db5SAlex Xie static int gmc_v9_0_soft_reset(void *handle)
1069e60f8db5SAlex Xie {
1070e60f8db5SAlex Xie 	/* XXX for emulation.*/
1071e60f8db5SAlex Xie 	return 0;
1072e60f8db5SAlex Xie }
1073e60f8db5SAlex Xie 
1074e60f8db5SAlex Xie static int gmc_v9_0_set_clockgating_state(void *handle,
1075e60f8db5SAlex Xie 					enum amd_clockgating_state state)
1076e60f8db5SAlex Xie {
1077d5583d4fSHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1078d5583d4fSHuang Rui 
1079d5583d4fSHuang Rui 	return mmhub_v1_0_set_clockgating(adev, state);
1080e60f8db5SAlex Xie }
1081e60f8db5SAlex Xie 
108213052be5SHuang Rui static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
108313052be5SHuang Rui {
108413052be5SHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
108513052be5SHuang Rui 
108613052be5SHuang Rui 	mmhub_v1_0_get_clockgating(adev, flags);
108713052be5SHuang Rui }
108813052be5SHuang Rui 
1089e60f8db5SAlex Xie static int gmc_v9_0_set_powergating_state(void *handle,
1090e60f8db5SAlex Xie 					enum amd_powergating_state state)
1091e60f8db5SAlex Xie {
1092e60f8db5SAlex Xie 	return 0;
1093e60f8db5SAlex Xie }
1094e60f8db5SAlex Xie 
1095e60f8db5SAlex Xie const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1096e60f8db5SAlex Xie 	.name = "gmc_v9_0",
1097e60f8db5SAlex Xie 	.early_init = gmc_v9_0_early_init,
1098e60f8db5SAlex Xie 	.late_init = gmc_v9_0_late_init,
1099e60f8db5SAlex Xie 	.sw_init = gmc_v9_0_sw_init,
1100e60f8db5SAlex Xie 	.sw_fini = gmc_v9_0_sw_fini,
1101e60f8db5SAlex Xie 	.hw_init = gmc_v9_0_hw_init,
1102e60f8db5SAlex Xie 	.hw_fini = gmc_v9_0_hw_fini,
1103e60f8db5SAlex Xie 	.suspend = gmc_v9_0_suspend,
1104e60f8db5SAlex Xie 	.resume = gmc_v9_0_resume,
1105e60f8db5SAlex Xie 	.is_idle = gmc_v9_0_is_idle,
1106e60f8db5SAlex Xie 	.wait_for_idle = gmc_v9_0_wait_for_idle,
1107e60f8db5SAlex Xie 	.soft_reset = gmc_v9_0_soft_reset,
1108e60f8db5SAlex Xie 	.set_clockgating_state = gmc_v9_0_set_clockgating_state,
1109e60f8db5SAlex Xie 	.set_powergating_state = gmc_v9_0_set_powergating_state,
111013052be5SHuang Rui 	.get_clockgating_state = gmc_v9_0_get_clockgating_state,
1111e60f8db5SAlex Xie };
1112e60f8db5SAlex Xie 
1113e60f8db5SAlex Xie const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1114e60f8db5SAlex Xie {
1115e60f8db5SAlex Xie 	.type = AMD_IP_BLOCK_TYPE_GMC,
1116e60f8db5SAlex Xie 	.major = 9,
1117e60f8db5SAlex Xie 	.minor = 0,
1118e60f8db5SAlex Xie 	.rev = 0,
1119e60f8db5SAlex Xie 	.funcs = &gmc_v9_0_ip_funcs,
1120e60f8db5SAlex Xie };
1121