xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c (revision 3de60d96)
1e60f8db5SAlex Xie /*
2e60f8db5SAlex Xie  * Copyright 2016 Advanced Micro Devices, Inc.
3e60f8db5SAlex Xie  *
4e60f8db5SAlex Xie  * Permission is hereby granted, free of charge, to any person obtaining a
5e60f8db5SAlex Xie  * copy of this software and associated documentation files (the "Software"),
6e60f8db5SAlex Xie  * to deal in the Software without restriction, including without limitation
7e60f8db5SAlex Xie  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8e60f8db5SAlex Xie  * and/or sell copies of the Software, and to permit persons to whom the
9e60f8db5SAlex Xie  * Software is furnished to do so, subject to the following conditions:
10e60f8db5SAlex Xie  *
11e60f8db5SAlex Xie  * The above copyright notice and this permission notice shall be included in
12e60f8db5SAlex Xie  * all copies or substantial portions of the Software.
13e60f8db5SAlex Xie  *
14e60f8db5SAlex Xie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15e60f8db5SAlex Xie  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16e60f8db5SAlex Xie  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17e60f8db5SAlex Xie  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18e60f8db5SAlex Xie  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19e60f8db5SAlex Xie  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20e60f8db5SAlex Xie  * OTHER DEALINGS IN THE SOFTWARE.
21e60f8db5SAlex Xie  *
22e60f8db5SAlex Xie  */
23f867723bSSam Ravnborg 
24e60f8db5SAlex Xie #include <linux/firmware.h>
25f867723bSSam Ravnborg #include <linux/pci.h>
26f867723bSSam Ravnborg 
27fd5fd480SChunming Zhou #include <drm/drm_cache.h>
28f867723bSSam Ravnborg 
29e60f8db5SAlex Xie #include "amdgpu.h"
30e60f8db5SAlex Xie #include "gmc_v9_0.h"
318d6a5230SAlex Deucher #include "amdgpu_atomfirmware.h"
322cddc50eSHuang Rui #include "amdgpu_gem.h"
33e60f8db5SAlex Xie 
34cde5c34fSFeifei Xu #include "gc/gc_9_0_sh_mask.h"
35135d4b10SFeifei Xu #include "dce/dce_12_0_offset.h"
36135d4b10SFeifei Xu #include "dce/dce_12_0_sh_mask.h"
37fb960bd2SFeifei Xu #include "vega10_enum.h"
3865417d9fSFeifei Xu #include "mmhub/mmhub_1_0_offset.h"
39ea930000SAlex Sierra #include "athub/athub_1_0_sh_mask.h"
406ce68225SFeifei Xu #include "athub/athub_1_0_offset.h"
41250b4228SChristian König #include "oss/osssys_4_0_offset.h"
42e60f8db5SAlex Xie 
43946a4d5bSShaoyun Liu #include "soc15.h"
44ea930000SAlex Sierra #include "soc15d.h"
45e60f8db5SAlex Xie #include "soc15_common.h"
4690c7a935SFeifei Xu #include "umc/umc_6_0_sh_mask.h"
47e60f8db5SAlex Xie 
48e60f8db5SAlex Xie #include "gfxhub_v1_0.h"
49e60f8db5SAlex Xie #include "mmhub_v1_0.h"
50bee7b51aSLe Ma #include "athub_v1_0.h"
51bf0a60b7SAlex Deucher #include "gfxhub_v1_1.h"
5251cce480SLe Ma #include "mmhub_v9_4.h"
5385e39550SLe Ma #include "mmhub_v1_7.h"
545b6b35aaSHawking Zhang #include "umc_v6_1.h"
55e7da754bSMonk Liu #include "umc_v6_0.h"
56e60f8db5SAlex Xie 
5744a99b65SAndrey Grodzovsky #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
5844a99b65SAndrey Grodzovsky 
59791c4769Sxinhui pan #include "amdgpu_ras.h"
60029fbd43SHawking Zhang #include "amdgpu_xgmi.h"
61791c4769Sxinhui pan 
62ebdef28eSAlex Deucher /* add these here since we already include dce12 headers and these are for DCN */
63ebdef28eSAlex Deucher #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
64ebdef28eSAlex Deucher #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
65ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT                                        0x0
66ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT                                       0x10
67ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
68ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
69f8646661SAlex Deucher #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0                                                                  0x049d
70f8646661SAlex Deucher #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX                                                         2
71f8646661SAlex Deucher 
72ebdef28eSAlex Deucher 
73be99ecbfSAlex Deucher static const char *gfxhub_client_ids[] = {
74be99ecbfSAlex Deucher 	"CB",
75be99ecbfSAlex Deucher 	"DB",
76be99ecbfSAlex Deucher 	"IA",
77be99ecbfSAlex Deucher 	"WD",
78be99ecbfSAlex Deucher 	"CPF",
79be99ecbfSAlex Deucher 	"CPC",
80be99ecbfSAlex Deucher 	"CPG",
81be99ecbfSAlex Deucher 	"RLC",
82be99ecbfSAlex Deucher 	"TCP",
83be99ecbfSAlex Deucher 	"SQC (inst)",
84be99ecbfSAlex Deucher 	"SQC (data)",
85be99ecbfSAlex Deucher 	"SQG",
86be99ecbfSAlex Deucher 	"PA",
87be99ecbfSAlex Deucher };
88be99ecbfSAlex Deucher 
8902f23f5fSAlex Deucher static const char *mmhub_client_ids_raven[][2] = {
9002f23f5fSAlex Deucher 	[0][0] = "MP1",
9102f23f5fSAlex Deucher 	[1][0] = "MP0",
9202f23f5fSAlex Deucher 	[2][0] = "VCN",
9302f23f5fSAlex Deucher 	[3][0] = "VCNU",
9402f23f5fSAlex Deucher 	[4][0] = "HDP",
9502f23f5fSAlex Deucher 	[5][0] = "DCE",
9602f23f5fSAlex Deucher 	[13][0] = "UTCL2",
9702f23f5fSAlex Deucher 	[19][0] = "TLS",
9802f23f5fSAlex Deucher 	[26][0] = "OSS",
9902f23f5fSAlex Deucher 	[27][0] = "SDMA0",
10002f23f5fSAlex Deucher 	[0][1] = "MP1",
10102f23f5fSAlex Deucher 	[1][1] = "MP0",
10202f23f5fSAlex Deucher 	[2][1] = "VCN",
10302f23f5fSAlex Deucher 	[3][1] = "VCNU",
10402f23f5fSAlex Deucher 	[4][1] = "HDP",
10502f23f5fSAlex Deucher 	[5][1] = "XDP",
10602f23f5fSAlex Deucher 	[6][1] = "DBGU0",
10702f23f5fSAlex Deucher 	[7][1] = "DCE",
10802f23f5fSAlex Deucher 	[8][1] = "DCEDWB0",
10902f23f5fSAlex Deucher 	[9][1] = "DCEDWB1",
11002f23f5fSAlex Deucher 	[26][1] = "OSS",
11102f23f5fSAlex Deucher 	[27][1] = "SDMA0",
11202f23f5fSAlex Deucher };
11302f23f5fSAlex Deucher 
11402f23f5fSAlex Deucher static const char *mmhub_client_ids_renoir[][2] = {
11502f23f5fSAlex Deucher 	[0][0] = "MP1",
11602f23f5fSAlex Deucher 	[1][0] = "MP0",
11702f23f5fSAlex Deucher 	[2][0] = "HDP",
11802f23f5fSAlex Deucher 	[4][0] = "DCEDMC",
11902f23f5fSAlex Deucher 	[5][0] = "DCEVGA",
12002f23f5fSAlex Deucher 	[13][0] = "UTCL2",
12102f23f5fSAlex Deucher 	[19][0] = "TLS",
12202f23f5fSAlex Deucher 	[26][0] = "OSS",
12302f23f5fSAlex Deucher 	[27][0] = "SDMA0",
12402f23f5fSAlex Deucher 	[28][0] = "VCN",
12502f23f5fSAlex Deucher 	[29][0] = "VCNU",
12602f23f5fSAlex Deucher 	[30][0] = "JPEG",
12702f23f5fSAlex Deucher 	[0][1] = "MP1",
12802f23f5fSAlex Deucher 	[1][1] = "MP0",
12902f23f5fSAlex Deucher 	[2][1] = "HDP",
13002f23f5fSAlex Deucher 	[3][1] = "XDP",
13102f23f5fSAlex Deucher 	[6][1] = "DBGU0",
13202f23f5fSAlex Deucher 	[7][1] = "DCEDMC",
13302f23f5fSAlex Deucher 	[8][1] = "DCEVGA",
13402f23f5fSAlex Deucher 	[9][1] = "DCEDWB",
13502f23f5fSAlex Deucher 	[26][1] = "OSS",
13602f23f5fSAlex Deucher 	[27][1] = "SDMA0",
13702f23f5fSAlex Deucher 	[28][1] = "VCN",
13802f23f5fSAlex Deucher 	[29][1] = "VCNU",
13902f23f5fSAlex Deucher 	[30][1] = "JPEG",
14002f23f5fSAlex Deucher };
14102f23f5fSAlex Deucher 
14202f23f5fSAlex Deucher static const char *mmhub_client_ids_vega10[][2] = {
14302f23f5fSAlex Deucher 	[0][0] = "MP0",
14402f23f5fSAlex Deucher 	[1][0] = "UVD",
14502f23f5fSAlex Deucher 	[2][0] = "UVDU",
14602f23f5fSAlex Deucher 	[3][0] = "HDP",
14702f23f5fSAlex Deucher 	[13][0] = "UTCL2",
14802f23f5fSAlex Deucher 	[14][0] = "OSS",
14902f23f5fSAlex Deucher 	[15][0] = "SDMA1",
15002f23f5fSAlex Deucher 	[32+0][0] = "VCE0",
15102f23f5fSAlex Deucher 	[32+1][0] = "VCE0U",
15202f23f5fSAlex Deucher 	[32+2][0] = "XDMA",
15302f23f5fSAlex Deucher 	[32+3][0] = "DCE",
15402f23f5fSAlex Deucher 	[32+4][0] = "MP1",
15502f23f5fSAlex Deucher 	[32+14][0] = "SDMA0",
15602f23f5fSAlex Deucher 	[0][1] = "MP0",
15702f23f5fSAlex Deucher 	[1][1] = "UVD",
15802f23f5fSAlex Deucher 	[2][1] = "UVDU",
15902f23f5fSAlex Deucher 	[3][1] = "DBGU0",
16002f23f5fSAlex Deucher 	[4][1] = "HDP",
16102f23f5fSAlex Deucher 	[5][1] = "XDP",
16202f23f5fSAlex Deucher 	[14][1] = "OSS",
16302f23f5fSAlex Deucher 	[15][1] = "SDMA0",
16402f23f5fSAlex Deucher 	[32+0][1] = "VCE0",
16502f23f5fSAlex Deucher 	[32+1][1] = "VCE0U",
16602f23f5fSAlex Deucher 	[32+2][1] = "XDMA",
16702f23f5fSAlex Deucher 	[32+3][1] = "DCE",
16802f23f5fSAlex Deucher 	[32+4][1] = "DCEDWB",
16902f23f5fSAlex Deucher 	[32+5][1] = "MP1",
17002f23f5fSAlex Deucher 	[32+6][1] = "DBGU1",
17102f23f5fSAlex Deucher 	[32+14][1] = "SDMA1",
17202f23f5fSAlex Deucher };
17302f23f5fSAlex Deucher 
17402f23f5fSAlex Deucher static const char *mmhub_client_ids_vega12[][2] = {
17502f23f5fSAlex Deucher 	[0][0] = "MP0",
17602f23f5fSAlex Deucher 	[1][0] = "VCE0",
17702f23f5fSAlex Deucher 	[2][0] = "VCE0U",
17802f23f5fSAlex Deucher 	[3][0] = "HDP",
17902f23f5fSAlex Deucher 	[13][0] = "UTCL2",
18002f23f5fSAlex Deucher 	[14][0] = "OSS",
18102f23f5fSAlex Deucher 	[15][0] = "SDMA1",
18202f23f5fSAlex Deucher 	[32+0][0] = "DCE",
18302f23f5fSAlex Deucher 	[32+1][0] = "XDMA",
18402f23f5fSAlex Deucher 	[32+2][0] = "UVD",
18502f23f5fSAlex Deucher 	[32+3][0] = "UVDU",
18602f23f5fSAlex Deucher 	[32+4][0] = "MP1",
18702f23f5fSAlex Deucher 	[32+15][0] = "SDMA0",
18802f23f5fSAlex Deucher 	[0][1] = "MP0",
18902f23f5fSAlex Deucher 	[1][1] = "VCE0",
19002f23f5fSAlex Deucher 	[2][1] = "VCE0U",
19102f23f5fSAlex Deucher 	[3][1] = "DBGU0",
19202f23f5fSAlex Deucher 	[4][1] = "HDP",
19302f23f5fSAlex Deucher 	[5][1] = "XDP",
19402f23f5fSAlex Deucher 	[14][1] = "OSS",
19502f23f5fSAlex Deucher 	[15][1] = "SDMA0",
19602f23f5fSAlex Deucher 	[32+0][1] = "DCE",
19702f23f5fSAlex Deucher 	[32+1][1] = "DCEDWB",
19802f23f5fSAlex Deucher 	[32+2][1] = "XDMA",
19902f23f5fSAlex Deucher 	[32+3][1] = "UVD",
20002f23f5fSAlex Deucher 	[32+4][1] = "UVDU",
20102f23f5fSAlex Deucher 	[32+5][1] = "MP1",
20202f23f5fSAlex Deucher 	[32+6][1] = "DBGU1",
20302f23f5fSAlex Deucher 	[32+15][1] = "SDMA1",
20402f23f5fSAlex Deucher };
20502f23f5fSAlex Deucher 
20602f23f5fSAlex Deucher static const char *mmhub_client_ids_vega20[][2] = {
20702f23f5fSAlex Deucher 	[0][0] = "XDMA",
20802f23f5fSAlex Deucher 	[1][0] = "DCE",
20902f23f5fSAlex Deucher 	[2][0] = "VCE0",
21002f23f5fSAlex Deucher 	[3][0] = "VCE0U",
21102f23f5fSAlex Deucher 	[4][0] = "UVD",
21202f23f5fSAlex Deucher 	[5][0] = "UVD1U",
21302f23f5fSAlex Deucher 	[13][0] = "OSS",
21402f23f5fSAlex Deucher 	[14][0] = "HDP",
21502f23f5fSAlex Deucher 	[15][0] = "SDMA0",
21602f23f5fSAlex Deucher 	[32+0][0] = "UVD",
21702f23f5fSAlex Deucher 	[32+1][0] = "UVDU",
21802f23f5fSAlex Deucher 	[32+2][0] = "MP1",
21902f23f5fSAlex Deucher 	[32+3][0] = "MP0",
22002f23f5fSAlex Deucher 	[32+12][0] = "UTCL2",
22102f23f5fSAlex Deucher 	[32+14][0] = "SDMA1",
22202f23f5fSAlex Deucher 	[0][1] = "XDMA",
22302f23f5fSAlex Deucher 	[1][1] = "DCE",
22402f23f5fSAlex Deucher 	[2][1] = "DCEDWB",
22502f23f5fSAlex Deucher 	[3][1] = "VCE0",
22602f23f5fSAlex Deucher 	[4][1] = "VCE0U",
22702f23f5fSAlex Deucher 	[5][1] = "UVD1",
22802f23f5fSAlex Deucher 	[6][1] = "UVD1U",
22902f23f5fSAlex Deucher 	[7][1] = "DBGU0",
23002f23f5fSAlex Deucher 	[8][1] = "XDP",
23102f23f5fSAlex Deucher 	[13][1] = "OSS",
23202f23f5fSAlex Deucher 	[14][1] = "HDP",
23302f23f5fSAlex Deucher 	[15][1] = "SDMA0",
23402f23f5fSAlex Deucher 	[32+0][1] = "UVD",
23502f23f5fSAlex Deucher 	[32+1][1] = "UVDU",
23602f23f5fSAlex Deucher 	[32+2][1] = "DBGU1",
23702f23f5fSAlex Deucher 	[32+3][1] = "MP1",
23802f23f5fSAlex Deucher 	[32+4][1] = "MP0",
23902f23f5fSAlex Deucher 	[32+14][1] = "SDMA1",
24002f23f5fSAlex Deucher };
24102f23f5fSAlex Deucher 
24202f23f5fSAlex Deucher static const char *mmhub_client_ids_arcturus[][2] = {
243e83db774SAlex Deucher 	[0][0] = "DBGU1",
244e83db774SAlex Deucher 	[1][0] = "XDP",
24502f23f5fSAlex Deucher 	[2][0] = "MP1",
24602f23f5fSAlex Deucher 	[14][0] = "HDP",
247e83db774SAlex Deucher 	[171][0] = "JPEG",
248e83db774SAlex Deucher 	[172][0] = "VCN",
249e83db774SAlex Deucher 	[173][0] = "VCNU",
250e83db774SAlex Deucher 	[203][0] = "JPEG1",
251e83db774SAlex Deucher 	[204][0] = "VCN1",
252e83db774SAlex Deucher 	[205][0] = "VCN1U",
253e83db774SAlex Deucher 	[256][0] = "SDMA0",
254e83db774SAlex Deucher 	[257][0] = "SDMA1",
255e83db774SAlex Deucher 	[258][0] = "SDMA2",
256e83db774SAlex Deucher 	[259][0] = "SDMA3",
257e83db774SAlex Deucher 	[260][0] = "SDMA4",
258e83db774SAlex Deucher 	[261][0] = "SDMA5",
259e83db774SAlex Deucher 	[262][0] = "SDMA6",
260e83db774SAlex Deucher 	[263][0] = "SDMA7",
261e83db774SAlex Deucher 	[384][0] = "OSS",
26202f23f5fSAlex Deucher 	[0][1] = "DBGU1",
26302f23f5fSAlex Deucher 	[1][1] = "XDP",
26402f23f5fSAlex Deucher 	[2][1] = "MP1",
26502f23f5fSAlex Deucher 	[14][1] = "HDP",
266e83db774SAlex Deucher 	[171][1] = "JPEG",
267e83db774SAlex Deucher 	[172][1] = "VCN",
268e83db774SAlex Deucher 	[173][1] = "VCNU",
269e83db774SAlex Deucher 	[203][1] = "JPEG1",
270e83db774SAlex Deucher 	[204][1] = "VCN1",
271e83db774SAlex Deucher 	[205][1] = "VCN1U",
272e83db774SAlex Deucher 	[256][1] = "SDMA0",
273e83db774SAlex Deucher 	[257][1] = "SDMA1",
274e83db774SAlex Deucher 	[258][1] = "SDMA2",
275e83db774SAlex Deucher 	[259][1] = "SDMA3",
276e83db774SAlex Deucher 	[260][1] = "SDMA4",
277e83db774SAlex Deucher 	[261][1] = "SDMA5",
278e83db774SAlex Deucher 	[262][1] = "SDMA6",
279e83db774SAlex Deucher 	[263][1] = "SDMA7",
280e83db774SAlex Deucher 	[384][1] = "OSS",
28102f23f5fSAlex Deucher };
282e60f8db5SAlex Xie 
283946a4d5bSShaoyun Liu static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
2845c583018SEvan Quan {
285946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
286946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
2875c583018SEvan Quan };
2885c583018SEvan Quan 
289946a4d5bSShaoyun Liu static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
2905c583018SEvan Quan {
291946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
292946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
2935c583018SEvan Quan };
2945c583018SEvan Quan 
295791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
296791c4769Sxinhui pan 	(0x000143c0 + 0x00000000),
297791c4769Sxinhui pan 	(0x000143c0 + 0x00000800),
298791c4769Sxinhui pan 	(0x000143c0 + 0x00001000),
299791c4769Sxinhui pan 	(0x000143c0 + 0x00001800),
300791c4769Sxinhui pan 	(0x000543c0 + 0x00000000),
301791c4769Sxinhui pan 	(0x000543c0 + 0x00000800),
302791c4769Sxinhui pan 	(0x000543c0 + 0x00001000),
303791c4769Sxinhui pan 	(0x000543c0 + 0x00001800),
304791c4769Sxinhui pan 	(0x000943c0 + 0x00000000),
305791c4769Sxinhui pan 	(0x000943c0 + 0x00000800),
306791c4769Sxinhui pan 	(0x000943c0 + 0x00001000),
307791c4769Sxinhui pan 	(0x000943c0 + 0x00001800),
308791c4769Sxinhui pan 	(0x000d43c0 + 0x00000000),
309791c4769Sxinhui pan 	(0x000d43c0 + 0x00000800),
310791c4769Sxinhui pan 	(0x000d43c0 + 0x00001000),
311791c4769Sxinhui pan 	(0x000d43c0 + 0x00001800),
312791c4769Sxinhui pan 	(0x001143c0 + 0x00000000),
313791c4769Sxinhui pan 	(0x001143c0 + 0x00000800),
314791c4769Sxinhui pan 	(0x001143c0 + 0x00001000),
315791c4769Sxinhui pan 	(0x001143c0 + 0x00001800),
316791c4769Sxinhui pan 	(0x001543c0 + 0x00000000),
317791c4769Sxinhui pan 	(0x001543c0 + 0x00000800),
318791c4769Sxinhui pan 	(0x001543c0 + 0x00001000),
319791c4769Sxinhui pan 	(0x001543c0 + 0x00001800),
320791c4769Sxinhui pan 	(0x001943c0 + 0x00000000),
321791c4769Sxinhui pan 	(0x001943c0 + 0x00000800),
322791c4769Sxinhui pan 	(0x001943c0 + 0x00001000),
323791c4769Sxinhui pan 	(0x001943c0 + 0x00001800),
324791c4769Sxinhui pan 	(0x001d43c0 + 0x00000000),
325791c4769Sxinhui pan 	(0x001d43c0 + 0x00000800),
326791c4769Sxinhui pan 	(0x001d43c0 + 0x00001000),
327791c4769Sxinhui pan 	(0x001d43c0 + 0x00001800),
32802bab923SDavid Panariti };
32902bab923SDavid Panariti 
330791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
331791c4769Sxinhui pan 	(0x000143e0 + 0x00000000),
332791c4769Sxinhui pan 	(0x000143e0 + 0x00000800),
333791c4769Sxinhui pan 	(0x000143e0 + 0x00001000),
334791c4769Sxinhui pan 	(0x000143e0 + 0x00001800),
335791c4769Sxinhui pan 	(0x000543e0 + 0x00000000),
336791c4769Sxinhui pan 	(0x000543e0 + 0x00000800),
337791c4769Sxinhui pan 	(0x000543e0 + 0x00001000),
338791c4769Sxinhui pan 	(0x000543e0 + 0x00001800),
339791c4769Sxinhui pan 	(0x000943e0 + 0x00000000),
340791c4769Sxinhui pan 	(0x000943e0 + 0x00000800),
341791c4769Sxinhui pan 	(0x000943e0 + 0x00001000),
342791c4769Sxinhui pan 	(0x000943e0 + 0x00001800),
343791c4769Sxinhui pan 	(0x000d43e0 + 0x00000000),
344791c4769Sxinhui pan 	(0x000d43e0 + 0x00000800),
345791c4769Sxinhui pan 	(0x000d43e0 + 0x00001000),
346791c4769Sxinhui pan 	(0x000d43e0 + 0x00001800),
347791c4769Sxinhui pan 	(0x001143e0 + 0x00000000),
348791c4769Sxinhui pan 	(0x001143e0 + 0x00000800),
349791c4769Sxinhui pan 	(0x001143e0 + 0x00001000),
350791c4769Sxinhui pan 	(0x001143e0 + 0x00001800),
351791c4769Sxinhui pan 	(0x001543e0 + 0x00000000),
352791c4769Sxinhui pan 	(0x001543e0 + 0x00000800),
353791c4769Sxinhui pan 	(0x001543e0 + 0x00001000),
354791c4769Sxinhui pan 	(0x001543e0 + 0x00001800),
355791c4769Sxinhui pan 	(0x001943e0 + 0x00000000),
356791c4769Sxinhui pan 	(0x001943e0 + 0x00000800),
357791c4769Sxinhui pan 	(0x001943e0 + 0x00001000),
358791c4769Sxinhui pan 	(0x001943e0 + 0x00001800),
359791c4769Sxinhui pan 	(0x001d43e0 + 0x00000000),
360791c4769Sxinhui pan 	(0x001d43e0 + 0x00000800),
361791c4769Sxinhui pan 	(0x001d43e0 + 0x00001000),
362791c4769Sxinhui pan 	(0x001d43e0 + 0x00001800),
36302bab923SDavid Panariti };
36402bab923SDavid Panariti 
365791c4769Sxinhui pan static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
366791c4769Sxinhui pan 		struct amdgpu_irq_src *src,
367791c4769Sxinhui pan 		unsigned type,
368791c4769Sxinhui pan 		enum amdgpu_interrupt_state state)
369791c4769Sxinhui pan {
370791c4769Sxinhui pan 	u32 bits, i, tmp, reg;
371791c4769Sxinhui pan 
3721e2c6d55SJohn Clements 	/* Devices newer then VEGA10/12 shall have these programming
3731e2c6d55SJohn Clements 	     sequences performed by PSP BL */
3741e2c6d55SJohn Clements 	if (adev->asic_type >= CHIP_VEGA20)
3751e2c6d55SJohn Clements 		return 0;
3761e2c6d55SJohn Clements 
377791c4769Sxinhui pan 	bits = 0x7f;
378791c4769Sxinhui pan 
379791c4769Sxinhui pan 	switch (state) {
380791c4769Sxinhui pan 	case AMDGPU_IRQ_STATE_DISABLE:
381791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
382791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_addrs[i];
383791c4769Sxinhui pan 			tmp = RREG32(reg);
384791c4769Sxinhui pan 			tmp &= ~bits;
385791c4769Sxinhui pan 			WREG32(reg, tmp);
386791c4769Sxinhui pan 		}
387791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
388791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
389791c4769Sxinhui pan 			tmp = RREG32(reg);
390791c4769Sxinhui pan 			tmp &= ~bits;
391791c4769Sxinhui pan 			WREG32(reg, tmp);
392791c4769Sxinhui pan 		}
393791c4769Sxinhui pan 		break;
394791c4769Sxinhui pan 	case AMDGPU_IRQ_STATE_ENABLE:
395791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
396791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_addrs[i];
397791c4769Sxinhui pan 			tmp = RREG32(reg);
398791c4769Sxinhui pan 			tmp |= bits;
399791c4769Sxinhui pan 			WREG32(reg, tmp);
400791c4769Sxinhui pan 		}
401791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
402791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
403791c4769Sxinhui pan 			tmp = RREG32(reg);
404791c4769Sxinhui pan 			tmp |= bits;
405791c4769Sxinhui pan 			WREG32(reg, tmp);
406791c4769Sxinhui pan 		}
407791c4769Sxinhui pan 		break;
408791c4769Sxinhui pan 	default:
409791c4769Sxinhui pan 		break;
410791c4769Sxinhui pan 	}
411791c4769Sxinhui pan 
412791c4769Sxinhui pan 	return 0;
413791c4769Sxinhui pan }
414791c4769Sxinhui pan 
415e60f8db5SAlex Xie static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
416e60f8db5SAlex Xie 					struct amdgpu_irq_src *src,
417e60f8db5SAlex Xie 					unsigned type,
418e60f8db5SAlex Xie 					enum amdgpu_interrupt_state state)
419e60f8db5SAlex Xie {
420e60f8db5SAlex Xie 	struct amdgpu_vmhub *hub;
421ae6d1416STom St Denis 	u32 tmp, reg, bits, i, j;
422e60f8db5SAlex Xie 
42311250164SChristian König 	bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
42411250164SChristian König 		VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
42511250164SChristian König 		VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
42611250164SChristian König 		VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
42711250164SChristian König 		VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
42811250164SChristian König 		VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
42911250164SChristian König 		VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
43011250164SChristian König 
431e60f8db5SAlex Xie 	switch (state) {
432e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_DISABLE:
4331daa2bfaSLe Ma 		for (j = 0; j < adev->num_vmhubs; j++) {
434ae6d1416STom St Denis 			hub = &adev->vmhub[j];
435e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
436e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
437e60f8db5SAlex Xie 				tmp = RREG32(reg);
438e60f8db5SAlex Xie 				tmp &= ~bits;
439e60f8db5SAlex Xie 				WREG32(reg, tmp);
440e60f8db5SAlex Xie 			}
441e60f8db5SAlex Xie 		}
442e60f8db5SAlex Xie 		break;
443e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_ENABLE:
4441daa2bfaSLe Ma 		for (j = 0; j < adev->num_vmhubs; j++) {
445ae6d1416STom St Denis 			hub = &adev->vmhub[j];
446e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
447e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
448e60f8db5SAlex Xie 				tmp = RREG32(reg);
449e60f8db5SAlex Xie 				tmp |= bits;
450e60f8db5SAlex Xie 				WREG32(reg, tmp);
451e60f8db5SAlex Xie 			}
452e60f8db5SAlex Xie 		}
4539304ca4dSGustavo A. R. Silva 		break;
454e60f8db5SAlex Xie 	default:
455e60f8db5SAlex Xie 		break;
456e60f8db5SAlex Xie 	}
457e60f8db5SAlex Xie 
458e60f8db5SAlex Xie 	return 0;
459e60f8db5SAlex Xie }
460e60f8db5SAlex Xie 
461e60f8db5SAlex Xie static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
462e60f8db5SAlex Xie 				      struct amdgpu_irq_src *source,
463e60f8db5SAlex Xie 				      struct amdgpu_iv_entry *entry)
464e60f8db5SAlex Xie {
465c468f9e2SChristian König 	bool retry_fault = !!(entry->src_data[1] & 0x80);
46602f23f5fSAlex Deucher 	uint32_t status = 0, cid = 0, rw = 0;
467e3898719SChristian König 	struct amdgpu_task_info task_info;
468e3898719SChristian König 	struct amdgpu_vmhub *hub;
46902f23f5fSAlex Deucher 	const char *mmhub_cid;
470e3898719SChristian König 	const char *hub_name;
471e3898719SChristian König 	u64 addr;
472e60f8db5SAlex Xie 
473e60f8db5SAlex Xie 	addr = (u64)entry->src_data[0] << 12;
474e60f8db5SAlex Xie 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
475e60f8db5SAlex Xie 
4760291150dSChristian König 	if (retry_fault) {
4770291150dSChristian König 		/* Returning 1 here also prevents sending the IV to the KFD */
47822666cc1SChristian König 
4790291150dSChristian König 		/* Process it onyl if it's the first fault for this address */
4800291150dSChristian König 		if (entry->ih != &adev->irq.ih_soft &&
4810291150dSChristian König 		    amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
4820291150dSChristian König 					     entry->timestamp))
4830291150dSChristian König 			return 1;
4840291150dSChristian König 
4850291150dSChristian König 		/* Delegate it to a different ring if the hardware hasn't
4860291150dSChristian König 		 * already done it.
4870291150dSChristian König 		 */
4880291150dSChristian König 		if (in_interrupt()) {
4890291150dSChristian König 			amdgpu_irq_delegate(adev, entry, 8);
4900291150dSChristian König 			return 1;
4910291150dSChristian König 		}
4920291150dSChristian König 
4930291150dSChristian König 		/* Try to handle the recoverable page faults by filling page
4940291150dSChristian König 		 * tables
4950291150dSChristian König 		 */
4960291150dSChristian König 		if (amdgpu_vm_handle_fault(adev, entry->pasid, addr))
4970291150dSChristian König 			return 1;
4980291150dSChristian König 	}
499ec671737SChristian König 
500e3898719SChristian König 	if (!printk_ratelimit())
501e3898719SChristian König 		return 0;
50253499173SXiaojie Yuan 
503e3898719SChristian König 	if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
504e3898719SChristian König 		hub_name = "mmhub0";
505e3898719SChristian König 		hub = &adev->vmhub[AMDGPU_MMHUB_0];
506e3898719SChristian König 	} else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
507e3898719SChristian König 		hub_name = "mmhub1";
508e3898719SChristian König 		hub = &adev->vmhub[AMDGPU_MMHUB_1];
509e3898719SChristian König 	} else {
510e3898719SChristian König 		hub_name = "gfxhub0";
511e3898719SChristian König 		hub = &adev->vmhub[AMDGPU_GFXHUB_0];
5124d6cbde3SFelix Kuehling 	}
513e60f8db5SAlex Xie 
51405794effSShirish S 	memset(&task_info, 0, sizeof(struct amdgpu_task_info));
515efaa9646SAndrey Grodzovsky 	amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
516efaa9646SAndrey Grodzovsky 
5174d6cbde3SFelix Kuehling 	dev_err(adev->dev,
518c468f9e2SChristian König 		"[%s] %s page fault (src_id:%u ring:%u vmid:%u "
519c468f9e2SChristian König 		"pasid:%u, for process %s pid %d thread %s pid %d)\n",
52051c60898SLe Ma 		hub_name, retry_fault ? "retry" : "no-retry",
521c4f46f22SChristian König 		entry->src_id, entry->ring_id, entry->vmid,
522efaa9646SAndrey Grodzovsky 		entry->pasid, task_info.process_name, task_info.tgid,
523efaa9646SAndrey Grodzovsky 		task_info.task_name, task_info.pid);
524be14729aSYong Zhao 	dev_err(adev->dev, "  in page starting at address 0x%016llx from IH client 0x%x (%s)\n",
525be14729aSYong Zhao 		addr, entry->client_id,
526be14729aSYong Zhao 		soc15_ih_clientid_name[entry->client_id]);
527e3898719SChristian König 
528e3898719SChristian König 	if (amdgpu_sriov_vf(adev))
529e3898719SChristian König 		return 0;
530e3898719SChristian König 
531e3898719SChristian König 	/*
532e3898719SChristian König 	 * Issue a dummy read to wait for the status register to
533e3898719SChristian König 	 * be updated to avoid reading an incorrect value due to
534e3898719SChristian König 	 * the new fast GRBM interface.
535e3898719SChristian König 	 */
536e3898719SChristian König 	if (entry->vmid_src == AMDGPU_GFXHUB_0)
537e3898719SChristian König 		RREG32(hub->vm_l2_pro_fault_status);
538e3898719SChristian König 
539e3898719SChristian König 	status = RREG32(hub->vm_l2_pro_fault_status);
540e3898719SChristian König 	cid = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, CID);
541e3898719SChristian König 	rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW);
542e3898719SChristian König 	WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
543e3898719SChristian König 
544e3898719SChristian König 
5454d6cbde3SFelix Kuehling 	dev_err(adev->dev,
5464d6cbde3SFelix Kuehling 		"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
5474d6cbde3SFelix Kuehling 		status);
54802f23f5fSAlex Deucher 	if (hub == &adev->vmhub[AMDGPU_GFXHUB_0]) {
549be99ecbfSAlex Deucher 		dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
550e3898719SChristian König 			cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" :
551e3898719SChristian König 			gfxhub_client_ids[cid],
552be99ecbfSAlex Deucher 			cid);
55302f23f5fSAlex Deucher 	} else {
55402f23f5fSAlex Deucher 		switch (adev->asic_type) {
55502f23f5fSAlex Deucher 		case CHIP_VEGA10:
55602f23f5fSAlex Deucher 			mmhub_cid = mmhub_client_ids_vega10[cid][rw];
55702f23f5fSAlex Deucher 			break;
55802f23f5fSAlex Deucher 		case CHIP_VEGA12:
55902f23f5fSAlex Deucher 			mmhub_cid = mmhub_client_ids_vega12[cid][rw];
56002f23f5fSAlex Deucher 			break;
56102f23f5fSAlex Deucher 		case CHIP_VEGA20:
56202f23f5fSAlex Deucher 			mmhub_cid = mmhub_client_ids_vega20[cid][rw];
56302f23f5fSAlex Deucher 			break;
56402f23f5fSAlex Deucher 		case CHIP_ARCTURUS:
56502f23f5fSAlex Deucher 			mmhub_cid = mmhub_client_ids_arcturus[cid][rw];
56602f23f5fSAlex Deucher 			break;
56702f23f5fSAlex Deucher 		case CHIP_RAVEN:
56802f23f5fSAlex Deucher 			mmhub_cid = mmhub_client_ids_raven[cid][rw];
56902f23f5fSAlex Deucher 			break;
57002f23f5fSAlex Deucher 		case CHIP_RENOIR:
57102f23f5fSAlex Deucher 			mmhub_cid = mmhub_client_ids_renoir[cid][rw];
57202f23f5fSAlex Deucher 			break;
57302f23f5fSAlex Deucher 		default:
57402f23f5fSAlex Deucher 			mmhub_cid = NULL;
57502f23f5fSAlex Deucher 			break;
57602f23f5fSAlex Deucher 		}
57702f23f5fSAlex Deucher 		dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
57802f23f5fSAlex Deucher 			mmhub_cid ? mmhub_cid : "unknown", cid);
57902f23f5fSAlex Deucher 	}
5805ddd4a9aSYong Zhao 	dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
5815ddd4a9aSYong Zhao 		REG_GET_FIELD(status,
5825ddd4a9aSYong Zhao 		VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
5835ddd4a9aSYong Zhao 	dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
5845ddd4a9aSYong Zhao 		REG_GET_FIELD(status,
5855ddd4a9aSYong Zhao 		VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
5865ddd4a9aSYong Zhao 	dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
5875ddd4a9aSYong Zhao 		REG_GET_FIELD(status,
5885ddd4a9aSYong Zhao 		VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
5895ddd4a9aSYong Zhao 	dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
5905ddd4a9aSYong Zhao 		REG_GET_FIELD(status,
5915ddd4a9aSYong Zhao 		VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
59202f23f5fSAlex Deucher 	dev_err(adev->dev, "\t RW: 0x%x\n", rw);
593e60f8db5SAlex Xie 	return 0;
594e60f8db5SAlex Xie }
595e60f8db5SAlex Xie 
596e60f8db5SAlex Xie static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
597e60f8db5SAlex Xie 	.set = gmc_v9_0_vm_fault_interrupt_state,
598e60f8db5SAlex Xie 	.process = gmc_v9_0_process_interrupt,
599e60f8db5SAlex Xie };
600e60f8db5SAlex Xie 
601791c4769Sxinhui pan 
602791c4769Sxinhui pan static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
603791c4769Sxinhui pan 	.set = gmc_v9_0_ecc_interrupt_state,
60434cc4fd9STao Zhou 	.process = amdgpu_umc_process_ecc_irq,
605791c4769Sxinhui pan };
606791c4769Sxinhui pan 
607e60f8db5SAlex Xie static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
608e60f8db5SAlex Xie {
609770d13b1SChristian König 	adev->gmc.vm_fault.num_types = 1;
610770d13b1SChristian König 	adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
611791c4769Sxinhui pan 
6122ee9403eSZhigang Luo 	if (!amdgpu_sriov_vf(adev)) {
613791c4769Sxinhui pan 		adev->gmc.ecc_irq.num_types = 1;
614791c4769Sxinhui pan 		adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
615e60f8db5SAlex Xie 	}
6162ee9403eSZhigang Luo }
617e60f8db5SAlex Xie 
6182a79d868SYong Zhao static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
6192a79d868SYong Zhao 					uint32_t flush_type)
62003f89febSChristian König {
62103f89febSChristian König 	u32 req = 0;
62203f89febSChristian König 
62303f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
624c4f46f22SChristian König 			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
6252a79d868SYong Zhao 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
62603f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
62703f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
62803f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
62903f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
63003f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
63103f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
63203f89febSChristian König 			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
63303f89febSChristian König 
63403f89febSChristian König 	return req;
63503f89febSChristian König }
63603f89febSChristian König 
63790f6452cSchangzhu /**
63890f6452cSchangzhu  * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
63990f6452cSchangzhu  *
64090f6452cSchangzhu  * @adev: amdgpu_device pointer
64190f6452cSchangzhu  * @vmhub: vmhub type
64290f6452cSchangzhu  *
64390f6452cSchangzhu  */
64490f6452cSchangzhu static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
64590f6452cSchangzhu 				       uint32_t vmhub)
64690f6452cSchangzhu {
64790f6452cSchangzhu 	return ((vmhub == AMDGPU_MMHUB_0 ||
64890f6452cSchangzhu 		 vmhub == AMDGPU_MMHUB_1) &&
64990f6452cSchangzhu 		(!amdgpu_sriov_vf(adev)) &&
65054f78a76SAlex Deucher 		(!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
65154f78a76SAlex Deucher 		   (adev->apu_flags & AMD_APU_IS_PICASSO))));
65290f6452cSchangzhu }
65390f6452cSchangzhu 
654ea930000SAlex Sierra static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
655ea930000SAlex Sierra 					uint8_t vmid, uint16_t *p_pasid)
656ea930000SAlex Sierra {
657ea930000SAlex Sierra 	uint32_t value;
658ea930000SAlex Sierra 
659ea930000SAlex Sierra 	value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
660ea930000SAlex Sierra 		     + vmid);
661ea930000SAlex Sierra 	*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
662ea930000SAlex Sierra 
663ea930000SAlex Sierra 	return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
664ea930000SAlex Sierra }
665ea930000SAlex Sierra 
666e60f8db5SAlex Xie /*
667e60f8db5SAlex Xie  * GART
668e60f8db5SAlex Xie  * VMID 0 is the physical GPU addresses as used by the kernel.
669e60f8db5SAlex Xie  * VMIDs 1-15 are used for userspace clients and are handled
670e60f8db5SAlex Xie  * by the amdgpu vm/hsa code.
671e60f8db5SAlex Xie  */
672e60f8db5SAlex Xie 
673e60f8db5SAlex Xie /**
6742a79d868SYong Zhao  * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
675e60f8db5SAlex Xie  *
676e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
677e60f8db5SAlex Xie  * @vmid: vm instance to flush
678bf0df09cSLee Jones  * @vmhub: which hub to flush
6792a79d868SYong Zhao  * @flush_type: the flush type
680e60f8db5SAlex Xie  *
6812a79d868SYong Zhao  * Flush the TLB for the requested page table using certain type.
682e60f8db5SAlex Xie  */
6833ff98548SOak Zeng static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
6843ff98548SOak Zeng 					uint32_t vmhub, uint32_t flush_type)
685e60f8db5SAlex Xie {
68690f6452cSchangzhu 	bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
687e60f8db5SAlex Xie 	const unsigned eng = 17;
688b80cd524SFelix Kuehling 	u32 j, inv_req, inv_req2, tmp;
6893ff98548SOak Zeng 	struct amdgpu_vmhub *hub;
690e60f8db5SAlex Xie 
6913ff98548SOak Zeng 	BUG_ON(vmhub >= adev->num_vmhubs);
6923ff98548SOak Zeng 
6933ff98548SOak Zeng 	hub = &adev->vmhub[vmhub];
694b80cd524SFelix Kuehling 	if (adev->gmc.xgmi.num_physical_nodes &&
695b80cd524SFelix Kuehling 	    adev->asic_type == CHIP_VEGA20) {
696b80cd524SFelix Kuehling 		/* Vega20+XGMI caches PTEs in TC and TLB. Add a
697b80cd524SFelix Kuehling 		 * heavy-weight TLB flush (type 2), which flushes
698b80cd524SFelix Kuehling 		 * both. Due to a race condition with concurrent
699b80cd524SFelix Kuehling 		 * memory accesses using the same TLB cache line, we
700b80cd524SFelix Kuehling 		 * still need a second TLB flush after this.
701b80cd524SFelix Kuehling 		 */
702b80cd524SFelix Kuehling 		inv_req = gmc_v9_0_get_invalidate_req(vmid, 2);
703b80cd524SFelix Kuehling 		inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);
704b80cd524SFelix Kuehling 	} else {
70537c58ddfSFelix Kuehling 		inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
706b80cd524SFelix Kuehling 		inv_req2 = 0;
707b80cd524SFelix Kuehling 	}
708e60f8db5SAlex Xie 
70982d1a1b1SChengming Gui 	/* This is necessary for a HW workaround under SRIOV as well
71082d1a1b1SChengming Gui 	 * as GFXOFF under bare metal
71182d1a1b1SChengming Gui 	 */
71282d1a1b1SChengming Gui 	if (adev->gfx.kiq.ring.sched.ready &&
71382d1a1b1SChengming Gui 	    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
71481202807SDennis Li 	    down_read_trylock(&adev->reset_sem)) {
715148f597dSHuang Rui 		uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
716148f597dSHuang Rui 		uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
717af5fe1e9SChristian König 
71837c58ddfSFelix Kuehling 		amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
719af5fe1e9SChristian König 						   1 << vmid);
72081202807SDennis Li 		up_read(&adev->reset_sem);
7213ff98548SOak Zeng 		return;
722fc0faf04SEmily Deng 	}
7233890d111SEmily Deng 
7243890d111SEmily Deng 	spin_lock(&adev->gmc.invalidate_lock);
725f920d1bbSchangzhu 
726f920d1bbSchangzhu 	/*
727f920d1bbSchangzhu 	 * It may lose gpuvm invalidate acknowldege state across power-gating
728f920d1bbSchangzhu 	 * off cycle, add semaphore acquire before invalidation and semaphore
729f920d1bbSchangzhu 	 * release after invalidation to avoid entering power gated state
730f920d1bbSchangzhu 	 * to WA the Issue
731f920d1bbSchangzhu 	 */
732f920d1bbSchangzhu 
733f920d1bbSchangzhu 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
73490f6452cSchangzhu 	if (use_semaphore) {
735f920d1bbSchangzhu 		for (j = 0; j < adev->usec_timeout; j++) {
736f920d1bbSchangzhu 			/* a read return value of 1 means semaphore acuqire */
737148f597dSHuang Rui 			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem +
738148f597dSHuang Rui 					    hub->eng_distance * eng);
739f920d1bbSchangzhu 			if (tmp & 0x1)
740f920d1bbSchangzhu 				break;
741f920d1bbSchangzhu 			udelay(1);
742f920d1bbSchangzhu 		}
743f920d1bbSchangzhu 
744f920d1bbSchangzhu 		if (j >= adev->usec_timeout)
745f920d1bbSchangzhu 			DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
746f920d1bbSchangzhu 	}
747f920d1bbSchangzhu 
748b80cd524SFelix Kuehling 	do {
749148f597dSHuang Rui 		WREG32_NO_KIQ(hub->vm_inv_eng0_req +
750148f597dSHuang Rui 			      hub->eng_distance * eng, inv_req);
75153499173SXiaojie Yuan 
75253499173SXiaojie Yuan 		/*
753b80cd524SFelix Kuehling 		 * Issue a dummy read to wait for the ACK register to
754b80cd524SFelix Kuehling 		 * be cleared to avoid a false ACK due to the new fast
755b80cd524SFelix Kuehling 		 * GRBM interface.
75653499173SXiaojie Yuan 		 */
75753499173SXiaojie Yuan 		if (vmhub == AMDGPU_GFXHUB_0)
758148f597dSHuang Rui 			RREG32_NO_KIQ(hub->vm_inv_eng0_req +
759148f597dSHuang Rui 				      hub->eng_distance * eng);
76053499173SXiaojie Yuan 
761e60f8db5SAlex Xie 		for (j = 0; j < adev->usec_timeout; j++) {
762148f597dSHuang Rui 			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack +
763148f597dSHuang Rui 					    hub->eng_distance * eng);
764396557b0SChristian König 			if (tmp & (1 << vmid))
765e60f8db5SAlex Xie 				break;
766e60f8db5SAlex Xie 			udelay(1);
767e60f8db5SAlex Xie 		}
768f920d1bbSchangzhu 
769b80cd524SFelix Kuehling 		inv_req = inv_req2;
770b80cd524SFelix Kuehling 		inv_req2 = 0;
771b80cd524SFelix Kuehling 	} while (inv_req);
772b80cd524SFelix Kuehling 
773f920d1bbSchangzhu 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
77490f6452cSchangzhu 	if (use_semaphore)
775f920d1bbSchangzhu 		/*
776f920d1bbSchangzhu 		 * add semaphore release after invalidation,
777f920d1bbSchangzhu 		 * write with 0 means semaphore release
778f920d1bbSchangzhu 		 */
779148f597dSHuang Rui 		WREG32_NO_KIQ(hub->vm_inv_eng0_sem +
780148f597dSHuang Rui 			      hub->eng_distance * eng, 0);
781f920d1bbSchangzhu 
7823890d111SEmily Deng 	spin_unlock(&adev->gmc.invalidate_lock);
783f920d1bbSchangzhu 
784396557b0SChristian König 	if (j < adev->usec_timeout)
7853ff98548SOak Zeng 		return;
786396557b0SChristian König 
787e60f8db5SAlex Xie 	DRM_ERROR("Timeout waiting for VM flush ACK!\n");
788e60f8db5SAlex Xie }
789e60f8db5SAlex Xie 
790ea930000SAlex Sierra /**
791ea930000SAlex Sierra  * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
792ea930000SAlex Sierra  *
793ea930000SAlex Sierra  * @adev: amdgpu_device pointer
794ea930000SAlex Sierra  * @pasid: pasid to be flush
795bf0df09cSLee Jones  * @flush_type: the flush type
796bf0df09cSLee Jones  * @all_hub: flush all hubs
797ea930000SAlex Sierra  *
798ea930000SAlex Sierra  * Flush the TLB for the requested pasid.
799ea930000SAlex Sierra  */
800ea930000SAlex Sierra static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
801ea930000SAlex Sierra 					uint16_t pasid, uint32_t flush_type,
802ea930000SAlex Sierra 					bool all_hub)
803ea930000SAlex Sierra {
804ea930000SAlex Sierra 	int vmid, i;
805ea930000SAlex Sierra 	signed long r;
806ea930000SAlex Sierra 	uint32_t seq;
807ea930000SAlex Sierra 	uint16_t queried_pasid;
808ea930000SAlex Sierra 	bool ret;
809ea930000SAlex Sierra 	struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
810ea930000SAlex Sierra 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
811ea930000SAlex Sierra 
81253b3f8f4SDennis Li 	if (amdgpu_in_reset(adev))
813ea930000SAlex Sierra 		return -EIO;
814ea930000SAlex Sierra 
81581202807SDennis Li 	if (ring->sched.ready && down_read_trylock(&adev->reset_sem)) {
816b80cd524SFelix Kuehling 		/* Vega20+XGMI caches PTEs in TC and TLB. Add a
817b80cd524SFelix Kuehling 		 * heavy-weight TLB flush (type 2), which flushes
818b80cd524SFelix Kuehling 		 * both. Due to a race condition with concurrent
819b80cd524SFelix Kuehling 		 * memory accesses using the same TLB cache line, we
820b80cd524SFelix Kuehling 		 * still need a second TLB flush after this.
821b80cd524SFelix Kuehling 		 */
822b80cd524SFelix Kuehling 		bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes &&
823b80cd524SFelix Kuehling 				       adev->asic_type == CHIP_VEGA20);
824b80cd524SFelix Kuehling 		/* 2 dwords flush + 8 dwords fence */
825b80cd524SFelix Kuehling 		unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8;
826b80cd524SFelix Kuehling 
827b80cd524SFelix Kuehling 		if (vega20_xgmi_wa)
828b80cd524SFelix Kuehling 			ndw += kiq->pmf->invalidate_tlbs_size;
829b80cd524SFelix Kuehling 
830ea930000SAlex Sierra 		spin_lock(&adev->gfx.kiq.ring_lock);
83136a1707aSAlex Sierra 		/* 2 dwords flush + 8 dwords fence */
832b80cd524SFelix Kuehling 		amdgpu_ring_alloc(ring, ndw);
833b80cd524SFelix Kuehling 		if (vega20_xgmi_wa)
834b80cd524SFelix Kuehling 			kiq->pmf->kiq_invalidate_tlbs(ring,
835b80cd524SFelix Kuehling 						      pasid, 2, all_hub);
836ea930000SAlex Sierra 		kiq->pmf->kiq_invalidate_tlbs(ring,
837ea930000SAlex Sierra 					pasid, flush_type, all_hub);
83804e4e2e9SYintian Tao 		r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
83904e4e2e9SYintian Tao 		if (r) {
84004e4e2e9SYintian Tao 			amdgpu_ring_undo(ring);
841abb17b1eSColin Ian King 			spin_unlock(&adev->gfx.kiq.ring_lock);
84281202807SDennis Li 			up_read(&adev->reset_sem);
84304e4e2e9SYintian Tao 			return -ETIME;
84404e4e2e9SYintian Tao 		}
84504e4e2e9SYintian Tao 
846ea930000SAlex Sierra 		amdgpu_ring_commit(ring);
847ea930000SAlex Sierra 		spin_unlock(&adev->gfx.kiq.ring_lock);
848ea930000SAlex Sierra 		r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
849ea930000SAlex Sierra 		if (r < 1) {
850aac89168SDennis Li 			dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
85181202807SDennis Li 			up_read(&adev->reset_sem);
852ea930000SAlex Sierra 			return -ETIME;
853ea930000SAlex Sierra 		}
85481202807SDennis Li 		up_read(&adev->reset_sem);
855ea930000SAlex Sierra 		return 0;
856ea930000SAlex Sierra 	}
857ea930000SAlex Sierra 
858ea930000SAlex Sierra 	for (vmid = 1; vmid < 16; vmid++) {
859ea930000SAlex Sierra 
860ea930000SAlex Sierra 		ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
861ea930000SAlex Sierra 				&queried_pasid);
862ea930000SAlex Sierra 		if (ret && queried_pasid == pasid) {
863ea930000SAlex Sierra 			if (all_hub) {
864ea930000SAlex Sierra 				for (i = 0; i < adev->num_vmhubs; i++)
865ea930000SAlex Sierra 					gmc_v9_0_flush_gpu_tlb(adev, vmid,
866fa34edbeSFelix Kuehling 							i, flush_type);
867ea930000SAlex Sierra 			} else {
868ea930000SAlex Sierra 				gmc_v9_0_flush_gpu_tlb(adev, vmid,
869fa34edbeSFelix Kuehling 						AMDGPU_GFXHUB_0, flush_type);
870ea930000SAlex Sierra 			}
871ea930000SAlex Sierra 			break;
872ea930000SAlex Sierra 		}
873ea930000SAlex Sierra 	}
874ea930000SAlex Sierra 
875ea930000SAlex Sierra 	return 0;
876ea930000SAlex Sierra 
877ea930000SAlex Sierra }
878ea930000SAlex Sierra 
8799096d6e5SChristian König static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
880c633c00bSChristian König 					    unsigned vmid, uint64_t pd_addr)
8819096d6e5SChristian König {
88290f6452cSchangzhu 	bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
883250b4228SChristian König 	struct amdgpu_device *adev = ring->adev;
884250b4228SChristian König 	struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
8852a79d868SYong Zhao 	uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
8869096d6e5SChristian König 	unsigned eng = ring->vm_inv_eng;
8879096d6e5SChristian König 
888f920d1bbSchangzhu 	/*
889f920d1bbSchangzhu 	 * It may lose gpuvm invalidate acknowldege state across power-gating
890f920d1bbSchangzhu 	 * off cycle, add semaphore acquire before invalidation and semaphore
891f920d1bbSchangzhu 	 * release after invalidation to avoid entering power gated state
892f920d1bbSchangzhu 	 * to WA the Issue
893f920d1bbSchangzhu 	 */
894f920d1bbSchangzhu 
895f920d1bbSchangzhu 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
89690f6452cSchangzhu 	if (use_semaphore)
897f920d1bbSchangzhu 		/* a read return value of 1 means semaphore acuqire */
898f920d1bbSchangzhu 		amdgpu_ring_emit_reg_wait(ring,
899148f597dSHuang Rui 					  hub->vm_inv_eng0_sem +
900148f597dSHuang Rui 					  hub->eng_distance * eng, 0x1, 0x1);
901f920d1bbSchangzhu 
902148f597dSHuang Rui 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
903148f597dSHuang Rui 			      (hub->ctx_addr_distance * vmid),
9049096d6e5SChristian König 			      lower_32_bits(pd_addr));
9059096d6e5SChristian König 
906148f597dSHuang Rui 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
907148f597dSHuang Rui 			      (hub->ctx_addr_distance * vmid),
9089096d6e5SChristian König 			      upper_32_bits(pd_addr));
9099096d6e5SChristian König 
910148f597dSHuang Rui 	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
911148f597dSHuang Rui 					    hub->eng_distance * eng,
912148f597dSHuang Rui 					    hub->vm_inv_eng0_ack +
913148f597dSHuang Rui 					    hub->eng_distance * eng,
914f8bc9037SAlex Deucher 					    req, 1 << vmid);
915f732b6b3SChristian König 
916f920d1bbSchangzhu 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
91790f6452cSchangzhu 	if (use_semaphore)
918f920d1bbSchangzhu 		/*
919f920d1bbSchangzhu 		 * add semaphore release after invalidation,
920f920d1bbSchangzhu 		 * write with 0 means semaphore release
921f920d1bbSchangzhu 		 */
922148f597dSHuang Rui 		amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
923148f597dSHuang Rui 				      hub->eng_distance * eng, 0);
924f920d1bbSchangzhu 
9259096d6e5SChristian König 	return pd_addr;
9269096d6e5SChristian König }
9279096d6e5SChristian König 
928c633c00bSChristian König static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
929c633c00bSChristian König 					unsigned pasid)
930c633c00bSChristian König {
931c633c00bSChristian König 	struct amdgpu_device *adev = ring->adev;
932c633c00bSChristian König 	uint32_t reg;
933c633c00bSChristian König 
934f2d66571SLe Ma 	/* Do nothing because there's no lut register for mmhub1. */
935f2d66571SLe Ma 	if (ring->funcs->vmhub == AMDGPU_MMHUB_1)
936f2d66571SLe Ma 		return;
937f2d66571SLe Ma 
938a2d15ed7SLe Ma 	if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
939c633c00bSChristian König 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
940c633c00bSChristian König 	else
941c633c00bSChristian König 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
942c633c00bSChristian König 
943c633c00bSChristian König 	amdgpu_ring_emit_wreg(ring, reg, pasid);
944c633c00bSChristian König }
945c633c00bSChristian König 
946e60f8db5SAlex Xie /*
947e60f8db5SAlex Xie  * PTE format on VEGA 10:
948e60f8db5SAlex Xie  * 63:59 reserved
949e60f8db5SAlex Xie  * 58:57 mtype
950e60f8db5SAlex Xie  * 56 F
951e60f8db5SAlex Xie  * 55 L
952e60f8db5SAlex Xie  * 54 P
953e60f8db5SAlex Xie  * 53 SW
954e60f8db5SAlex Xie  * 52 T
955e60f8db5SAlex Xie  * 50:48 reserved
956e60f8db5SAlex Xie  * 47:12 4k physical page base address
957e60f8db5SAlex Xie  * 11:7 fragment
958e60f8db5SAlex Xie  * 6 write
959e60f8db5SAlex Xie  * 5 read
960e60f8db5SAlex Xie  * 4 exe
961e60f8db5SAlex Xie  * 3 Z
962e60f8db5SAlex Xie  * 2 snooped
963e60f8db5SAlex Xie  * 1 system
964e60f8db5SAlex Xie  * 0 valid
965e60f8db5SAlex Xie  *
966e60f8db5SAlex Xie  * PDE format on VEGA 10:
967e60f8db5SAlex Xie  * 63:59 block fragment size
968e60f8db5SAlex Xie  * 58:55 reserved
969e60f8db5SAlex Xie  * 54 P
970e60f8db5SAlex Xie  * 53:48 reserved
971e60f8db5SAlex Xie  * 47:6 physical base address of PD or PTE
972e60f8db5SAlex Xie  * 5:3 reserved
973e60f8db5SAlex Xie  * 2 C
974e60f8db5SAlex Xie  * 1 system
975e60f8db5SAlex Xie  * 0 valid
976e60f8db5SAlex Xie  */
977e60f8db5SAlex Xie 
97871776b6dSChristian König static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
979e60f8db5SAlex Xie 
980e60f8db5SAlex Xie {
98171776b6dSChristian König 	switch (flags) {
982e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_DEFAULT:
98371776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
984e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_NC:
98571776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
986e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_WC:
98771776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
988093e48c0SOak Zeng 	case AMDGPU_VM_MTYPE_RW:
98971776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
990e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_CC:
99171776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
992e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_UC:
99371776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
994e60f8db5SAlex Xie 	default:
99571776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
996e60f8db5SAlex Xie 	}
997e60f8db5SAlex Xie }
998e60f8db5SAlex Xie 
9993de676d8SChristian König static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
10003de676d8SChristian König 				uint64_t *addr, uint64_t *flags)
1001f75e237cSChristian König {
1002bbc9fb10SChristian König 	if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
10033de676d8SChristian König 		*addr = adev->vm_manager.vram_base_offset + *addr -
1004770d13b1SChristian König 			adev->gmc.vram_start;
10053de676d8SChristian König 	BUG_ON(*addr & 0xFFFF00000000003FULL);
10066a42fd6fSChristian König 
1007770d13b1SChristian König 	if (!adev->gmc.translate_further)
10086a42fd6fSChristian König 		return;
10096a42fd6fSChristian König 
10106a42fd6fSChristian König 	if (level == AMDGPU_VM_PDB1) {
10116a42fd6fSChristian König 		/* Set the block fragment size */
10126a42fd6fSChristian König 		if (!(*flags & AMDGPU_PDE_PTE))
10136a42fd6fSChristian König 			*flags |= AMDGPU_PDE_BFS(0x9);
10146a42fd6fSChristian König 
10156a42fd6fSChristian König 	} else if (level == AMDGPU_VM_PDB0) {
10166a42fd6fSChristian König 		if (*flags & AMDGPU_PDE_PTE)
10176a42fd6fSChristian König 			*flags &= ~AMDGPU_PDE_PTE;
10186a42fd6fSChristian König 		else
10196a42fd6fSChristian König 			*flags |= AMDGPU_PTE_TF;
10206a42fd6fSChristian König 	}
1021f75e237cSChristian König }
1022f75e237cSChristian König 
1023cbfae36cSChristian König static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
1024cbfae36cSChristian König 				struct amdgpu_bo_va_mapping *mapping,
1025cbfae36cSChristian König 				uint64_t *flags)
1026cbfae36cSChristian König {
1027cbfae36cSChristian König 	*flags &= ~AMDGPU_PTE_EXECUTABLE;
1028cbfae36cSChristian König 	*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1029cbfae36cSChristian König 
1030cbfae36cSChristian König 	*flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
1031cbfae36cSChristian König 	*flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
1032cbfae36cSChristian König 
1033cbfae36cSChristian König 	if (mapping->flags & AMDGPU_PTE_PRT) {
1034cbfae36cSChristian König 		*flags |= AMDGPU_PTE_PRT;
1035cbfae36cSChristian König 		*flags &= ~AMDGPU_PTE_VALID;
1036cbfae36cSChristian König 	}
1037cbfae36cSChristian König 
10387ffe7238SYong Zhao 	if ((adev->asic_type == CHIP_ARCTURUS ||
10397ffe7238SYong Zhao 	    adev->asic_type == CHIP_ALDEBARAN) &&
1040cbfae36cSChristian König 	    !(*flags & AMDGPU_PTE_SYSTEM) &&
1041cbfae36cSChristian König 	    mapping->bo_va->is_xgmi)
1042cbfae36cSChristian König 		*flags |= AMDGPU_PTE_SNOOPED;
104372b4db0fSEric Huang 
104472b4db0fSEric Huang 	if (adev->asic_type == CHIP_ALDEBARAN)
104572b4db0fSEric Huang 		*flags |= mapping->flags & AMDGPU_PTE_SNOOPED;
1046cbfae36cSChristian König }
1047cbfae36cSChristian König 
10487b885f0eSAlex Deucher static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
10497b885f0eSAlex Deucher {
10507b885f0eSAlex Deucher 	u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
10517b885f0eSAlex Deucher 	unsigned size;
10527b885f0eSAlex Deucher 
10537b885f0eSAlex Deucher 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
10547b885f0eSAlex Deucher 		size = AMDGPU_VBIOS_VGA_ALLOCATION;
10557b885f0eSAlex Deucher 	} else {
10567b885f0eSAlex Deucher 		u32 viewport;
10577b885f0eSAlex Deucher 
10587b885f0eSAlex Deucher 		switch (adev->asic_type) {
10597b885f0eSAlex Deucher 		case CHIP_RAVEN:
10607b885f0eSAlex Deucher 		case CHIP_RENOIR:
10617b885f0eSAlex Deucher 			viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
10627b885f0eSAlex Deucher 			size = (REG_GET_FIELD(viewport,
10637b885f0eSAlex Deucher 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
10647b885f0eSAlex Deucher 				REG_GET_FIELD(viewport,
10657b885f0eSAlex Deucher 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
10667b885f0eSAlex Deucher 				4);
10677b885f0eSAlex Deucher 			break;
10687b885f0eSAlex Deucher 		case CHIP_VEGA10:
10697b885f0eSAlex Deucher 		case CHIP_VEGA12:
10707b885f0eSAlex Deucher 		case CHIP_VEGA20:
10717b885f0eSAlex Deucher 		default:
10727b885f0eSAlex Deucher 			viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
10737b885f0eSAlex Deucher 			size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
10747b885f0eSAlex Deucher 				REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
10757b885f0eSAlex Deucher 				4);
10767b885f0eSAlex Deucher 			break;
10777b885f0eSAlex Deucher 		}
10787b885f0eSAlex Deucher 	}
10797b885f0eSAlex Deucher 
10807b885f0eSAlex Deucher 	return size;
10817b885f0eSAlex Deucher }
10827b885f0eSAlex Deucher 
1083132f34e4SChristian König static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
1084132f34e4SChristian König 	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
1085ea930000SAlex Sierra 	.flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
10869096d6e5SChristian König 	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
1087c633c00bSChristian König 	.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
108871776b6dSChristian König 	.map_mtype = gmc_v9_0_map_mtype,
1089cbfae36cSChristian König 	.get_vm_pde = gmc_v9_0_get_vm_pde,
10907b885f0eSAlex Deucher 	.get_vm_pte = gmc_v9_0_get_vm_pte,
10917b885f0eSAlex Deucher 	.get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
1092e60f8db5SAlex Xie };
1093e60f8db5SAlex Xie 
1094132f34e4SChristian König static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
1095e60f8db5SAlex Xie {
1096132f34e4SChristian König 	adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
1097e60f8db5SAlex Xie }
1098e60f8db5SAlex Xie 
10995b6b35aaSHawking Zhang static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
11005b6b35aaSHawking Zhang {
11015b6b35aaSHawking Zhang 	switch (adev->asic_type) {
1102e7da754bSMonk Liu 	case CHIP_VEGA10:
1103e7da754bSMonk Liu 		adev->umc.funcs = &umc_v6_0_funcs;
1104e7da754bSMonk Liu 		break;
11055b6b35aaSHawking Zhang 	case CHIP_VEGA20:
11063aacf4eaSTao Zhou 		adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
11073aacf4eaSTao Zhou 		adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
11083aacf4eaSTao Zhou 		adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
11094cf781c2SJohn Clements 		adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
11104cf781c2SJohn Clements 		adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
11114cf781c2SJohn Clements 		adev->umc.funcs = &umc_v6_1_funcs;
11124cf781c2SJohn Clements 		break;
11139e612c11SHawking Zhang 	case CHIP_ARCTURUS:
11143aacf4eaSTao Zhou 		adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
11153aacf4eaSTao Zhou 		adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
11163aacf4eaSTao Zhou 		adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
11174cf781c2SJohn Clements 		adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
11183aacf4eaSTao Zhou 		adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1119045c0216STao Zhou 		adev->umc.funcs = &umc_v6_1_funcs;
11205b6b35aaSHawking Zhang 		break;
11215b6b35aaSHawking Zhang 	default:
11225b6b35aaSHawking Zhang 		break;
11235b6b35aaSHawking Zhang 	}
11245b6b35aaSHawking Zhang }
11255b6b35aaSHawking Zhang 
11263d093da0STao Zhou static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
11273d093da0STao Zhou {
11283d093da0STao Zhou 	switch (adev->asic_type) {
1129f6c3623bSDennis Li 	case CHIP_ARCTURUS:
1130f6c3623bSDennis Li 		adev->mmhub.funcs = &mmhub_v9_4_funcs;
1131f6c3623bSDennis Li 		break;
11324da999cdSOak Zeng 	case CHIP_ALDEBARAN:
11334da999cdSOak Zeng 		adev->mmhub.funcs = &mmhub_v1_7_funcs;
11344da999cdSOak Zeng 		break;
11353d093da0STao Zhou 	default:
11369fb1506eSOak Zeng 		adev->mmhub.funcs = &mmhub_v1_0_funcs;
11373d093da0STao Zhou 		break;
11383d093da0STao Zhou 	}
11393d093da0STao Zhou }
11403d093da0STao Zhou 
11418ffff9b4SOak Zeng static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
11428ffff9b4SOak Zeng {
11438ffff9b4SOak Zeng 	adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
11448ffff9b4SOak Zeng }
11458ffff9b4SOak Zeng 
1146e60f8db5SAlex Xie static int gmc_v9_0_early_init(void *handle)
1147e60f8db5SAlex Xie {
1148e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1149e60f8db5SAlex Xie 
1150132f34e4SChristian König 	gmc_v9_0_set_gmc_funcs(adev);
1151e60f8db5SAlex Xie 	gmc_v9_0_set_irq_funcs(adev);
11525b6b35aaSHawking Zhang 	gmc_v9_0_set_umc_funcs(adev);
11533d093da0STao Zhou 	gmc_v9_0_set_mmhub_funcs(adev);
11548ffff9b4SOak Zeng 	gmc_v9_0_set_gfxhub_funcs(adev);
1155e60f8db5SAlex Xie 
115631691b8dSRajneesh Bhardwaj 	if (adev->asic_type == CHIP_VEGA20 ||
115731691b8dSRajneesh Bhardwaj 	    adev->asic_type == CHIP_ARCTURUS)
115831691b8dSRajneesh Bhardwaj 		adev->gmc.xgmi.supported = true;
115931691b8dSRajneesh Bhardwaj 
116031691b8dSRajneesh Bhardwaj 	if (adev->asic_type == CHIP_ALDEBARAN) {
116131691b8dSRajneesh Bhardwaj 		adev->gmc.xgmi.supported = true;
116231691b8dSRajneesh Bhardwaj 		adev->gmc.xgmi.connected_to_cpu =
116331691b8dSRajneesh Bhardwaj 			adev->smuio.funcs->is_host_gpu_xgmi_supported(adev);
116431691b8dSRajneesh Bhardwaj         }
116531691b8dSRajneesh Bhardwaj 
1166770d13b1SChristian König 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1167770d13b1SChristian König 	adev->gmc.shared_aperture_end =
1168770d13b1SChristian König 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1169bfa8eea2SFlora Cui 	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
1170770d13b1SChristian König 	adev->gmc.private_aperture_end =
1171770d13b1SChristian König 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1172a7ea6548SAlex Deucher 
1173e60f8db5SAlex Xie 	return 0;
1174e60f8db5SAlex Xie }
1175e60f8db5SAlex Xie 
1176e60f8db5SAlex Xie static int gmc_v9_0_late_init(void *handle)
1177e60f8db5SAlex Xie {
1178e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1179c5b6e585STao Zhou 	int r;
11804789c463SChristian König 
1181bdbe90f0SAlex Deucher 	r = amdgpu_gmc_allocate_vm_inv_eng(adev);
1182c713a461SEvan Quan 	if (r)
1183c713a461SEvan Quan 		return r;
11844a20300bSGuchun Chen 
11854a20300bSGuchun Chen 	/*
11864a20300bSGuchun Chen 	 * Workaround performance drop issue with VBIOS enables partial
11874a20300bSGuchun Chen 	 * writes, while disables HBM ECC for vega10.
11884a20300bSGuchun Chen 	 */
118988474ccaSGuchun Chen 	if (!amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_VEGA10)) {
11904a20300bSGuchun Chen 		if (!(adev->ras_features & (1 << AMDGPU_RAS_BLOCK__UMC))) {
1191bdf84a80SJoseph Greathouse 			if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
1192bdf84a80SJoseph Greathouse 				adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
11934a20300bSGuchun Chen 		}
1194f49ea9f8SHawking Zhang 	}
119502bab923SDavid Panariti 
1196fe5211f1SHawking Zhang 	if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
1197fe5211f1SHawking Zhang 		adev->mmhub.funcs->reset_ras_error_count(adev);
1198fe5211f1SHawking Zhang 
1199ba083492STao Zhou 	r = amdgpu_gmc_ras_late_init(adev);
1200791c4769Sxinhui pan 	if (r)
1201e60f8db5SAlex Xie 		return r;
1202e60f8db5SAlex Xie 
1203770d13b1SChristian König 	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1204e60f8db5SAlex Xie }
1205e60f8db5SAlex Xie 
1206e60f8db5SAlex Xie static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
1207770d13b1SChristian König 					struct amdgpu_gmc *mc)
1208e60f8db5SAlex Xie {
1209e60f8db5SAlex Xie 	u64 base = 0;
12109d4f837aSFrank.Min 
12119fb1506eSOak Zeng 	if (!amdgpu_sriov_vf(adev))
12129fb1506eSOak Zeng 		base = adev->mmhub.funcs->get_fb_location(adev);
12139d4f837aSFrank.Min 
12146fdd68b1SAlex Deucher 	/* add the xgmi offset of the physical node */
12156fdd68b1SAlex Deucher 	base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
121683afe835SOak Zeng 	amdgpu_gmc_vram_location(adev, mc, base);
1217961c75cfSChristian König 	amdgpu_gmc_gart_location(adev, mc);
1218c3e1b43cSChristian König 	amdgpu_gmc_agp_location(adev, mc);
1219e60f8db5SAlex Xie 	/* base offset of vram pages */
12208ffff9b4SOak Zeng 	adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
12216fdd68b1SAlex Deucher 
12226fdd68b1SAlex Deucher 	/* XXX: add the xgmi offset of the physical node? */
12236fdd68b1SAlex Deucher 	adev->vm_manager.vram_base_offset +=
12246fdd68b1SAlex Deucher 		adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1225e60f8db5SAlex Xie }
1226e60f8db5SAlex Xie 
1227e60f8db5SAlex Xie /**
1228e60f8db5SAlex Xie  * gmc_v9_0_mc_init - initialize the memory controller driver params
1229e60f8db5SAlex Xie  *
1230e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
1231e60f8db5SAlex Xie  *
1232e60f8db5SAlex Xie  * Look up the amount of vram, vram width, and decide how to place
1233e60f8db5SAlex Xie  * vram and gart within the GPU's physical address space.
1234e60f8db5SAlex Xie  * Returns 0 for success.
1235e60f8db5SAlex Xie  */
1236e60f8db5SAlex Xie static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
1237e60f8db5SAlex Xie {
1238e60f8db5SAlex Xie 	int r;
1239e60f8db5SAlex Xie 
1240e60f8db5SAlex Xie 	/* size in MB on si */
1241770d13b1SChristian König 	adev->gmc.mc_vram_size =
1242bebc0762SHawking Zhang 		adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
1243770d13b1SChristian König 	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
1244e60f8db5SAlex Xie 
1245e60f8db5SAlex Xie 	if (!(adev->flags & AMD_IS_APU)) {
1246e60f8db5SAlex Xie 		r = amdgpu_device_resize_fb_bar(adev);
1247e60f8db5SAlex Xie 		if (r)
1248e60f8db5SAlex Xie 			return r;
1249e60f8db5SAlex Xie 	}
1250770d13b1SChristian König 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
1251770d13b1SChristian König 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
1252e60f8db5SAlex Xie 
1253156a81beSChunming Zhou #ifdef CONFIG_X86_64
125431691b8dSRajneesh Bhardwaj 	/*
125531691b8dSRajneesh Bhardwaj 	 * AMD Accelerated Processing Platform (APP) supporting GPU-HOST xgmi
125631691b8dSRajneesh Bhardwaj 	 * interface can use VRAM through here as it appears system reserved
125731691b8dSRajneesh Bhardwaj 	 * memory in host address space.
125831691b8dSRajneesh Bhardwaj 	 *
125931691b8dSRajneesh Bhardwaj 	 * For APUs, VRAM is just the stolen system memory and can be accessed
126031691b8dSRajneesh Bhardwaj 	 * directly.
126131691b8dSRajneesh Bhardwaj 	 *
126231691b8dSRajneesh Bhardwaj 	 * Otherwise, use the legacy Host Data Path (HDP) through PCIe BAR.
126331691b8dSRajneesh Bhardwaj 	 */
126431691b8dSRajneesh Bhardwaj 
126531691b8dSRajneesh Bhardwaj 	/* check whether both host-gpu and gpu-gpu xgmi links exist */
1266*3de60d96SHawking Zhang 	if ((adev->flags & AMD_IS_APU) ||
1267*3de60d96SHawking Zhang 	    (adev->gmc.xgmi.supported &&
1268*3de60d96SHawking Zhang 	     adev->gmc.xgmi.connected_to_cpu)) {
1269*3de60d96SHawking Zhang 		adev->gmc.aper_base =
1270*3de60d96SHawking Zhang 			adev->gfxhub.funcs->get_mc_fb_offset(adev) +
1271*3de60d96SHawking Zhang 			adev->gmc.xgmi.physical_node_id *
127231691b8dSRajneesh Bhardwaj 			adev->gmc.xgmi.node_segment_size;
1273156a81beSChunming Zhou 		adev->gmc.aper_size = adev->gmc.real_vram_size;
1274156a81beSChunming Zhou 	}
127531691b8dSRajneesh Bhardwaj 
1276156a81beSChunming Zhou #endif
1277e60f8db5SAlex Xie 	/* In case the PCI BAR is larger than the actual amount of vram */
1278770d13b1SChristian König 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
1279770d13b1SChristian König 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
1280770d13b1SChristian König 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
1281e60f8db5SAlex Xie 
1282e60f8db5SAlex Xie 	/* set the gart size */
1283e60f8db5SAlex Xie 	if (amdgpu_gart_size == -1) {
1284e60f8db5SAlex Xie 		switch (adev->asic_type) {
1285e60f8db5SAlex Xie 		case CHIP_VEGA10:  /* all engines support GPUVM */
1286273a14cdSAlex Deucher 		case CHIP_VEGA12:  /* all engines support GPUVM */
1287d96b428cSFeifei Xu 		case CHIP_VEGA20:
12883de2ff5dSLe Ma 		case CHIP_ARCTURUS:
128985e39550SLe Ma 		case CHIP_ALDEBARAN:
1290e60f8db5SAlex Xie 		default:
1291fe19b862SMonk Liu 			adev->gmc.gart_size = 512ULL << 20;
1292e60f8db5SAlex Xie 			break;
1293e60f8db5SAlex Xie 		case CHIP_RAVEN:   /* DCE SG support */
12948787ee01SHuang Rui 		case CHIP_RENOIR:
1295770d13b1SChristian König 			adev->gmc.gart_size = 1024ULL << 20;
1296e60f8db5SAlex Xie 			break;
1297e60f8db5SAlex Xie 		}
1298e60f8db5SAlex Xie 	} else {
1299770d13b1SChristian König 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
1300e60f8db5SAlex Xie 	}
1301e60f8db5SAlex Xie 
1302770d13b1SChristian König 	gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
1303e60f8db5SAlex Xie 
1304e60f8db5SAlex Xie 	return 0;
1305e60f8db5SAlex Xie }
1306e60f8db5SAlex Xie 
1307e60f8db5SAlex Xie static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
1308e60f8db5SAlex Xie {
1309e60f8db5SAlex Xie 	int r;
1310e60f8db5SAlex Xie 
13111123b989SChristian König 	if (adev->gart.bo) {
1312e60f8db5SAlex Xie 		WARN(1, "VEGA10 PCIE GART already initialized\n");
1313e60f8db5SAlex Xie 		return 0;
1314e60f8db5SAlex Xie 	}
1315e60f8db5SAlex Xie 	/* Initialize common gart structure */
1316e60f8db5SAlex Xie 	r = amdgpu_gart_init(adev);
1317e60f8db5SAlex Xie 	if (r)
1318e60f8db5SAlex Xie 		return r;
1319e60f8db5SAlex Xie 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
13207596ab68SHawking Zhang 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
1321e60f8db5SAlex Xie 				 AMDGPU_PTE_EXECUTABLE;
1322e60f8db5SAlex Xie 	return amdgpu_gart_table_vram_alloc(adev);
1323e60f8db5SAlex Xie }
1324e60f8db5SAlex Xie 
1325b0a2db9bSAlex Deucher /**
1326b0a2db9bSAlex Deucher  * gmc_v9_0_save_registers - saves regs
1327b0a2db9bSAlex Deucher  *
1328b0a2db9bSAlex Deucher  * @adev: amdgpu_device pointer
1329b0a2db9bSAlex Deucher  *
1330b0a2db9bSAlex Deucher  * This saves potential register values that should be
1331b0a2db9bSAlex Deucher  * restored upon resume
1332b0a2db9bSAlex Deucher  */
1333b0a2db9bSAlex Deucher static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
1334ebdef28eSAlex Deucher {
1335b0a2db9bSAlex Deucher 	if (adev->asic_type == CHIP_RAVEN)
1336b0a2db9bSAlex Deucher 		adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
1337ebdef28eSAlex Deucher }
1338ebdef28eSAlex Deucher 
1339e60f8db5SAlex Xie static int gmc_v9_0_sw_init(void *handle)
1340e60f8db5SAlex Xie {
1341ad02e08eSOri Messinger 	int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
1342e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1343e60f8db5SAlex Xie 
13448ffff9b4SOak Zeng 	adev->gfxhub.funcs->init(adev);
13459fb1506eSOak Zeng 
13469fb1506eSOak Zeng 	adev->mmhub.funcs->init(adev);
1347e60f8db5SAlex Xie 
1348770d13b1SChristian König 	spin_lock_init(&adev->gmc.invalidate_lock);
1349e60f8db5SAlex Xie 
1350ad02e08eSOri Messinger 	r = amdgpu_atomfirmware_get_vram_info(adev,
1351ad02e08eSOri Messinger 		&vram_width, &vram_type, &vram_vendor);
1352631cdbd2SAlex Deucher 	if (amdgpu_sriov_vf(adev))
1353631cdbd2SAlex Deucher 		/* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
1354631cdbd2SAlex Deucher 		 * and DF related registers is not readable, seems hardcord is the
1355631cdbd2SAlex Deucher 		 * only way to set the correct vram_width
1356631cdbd2SAlex Deucher 		 */
1357631cdbd2SAlex Deucher 		adev->gmc.vram_width = 2048;
1358631cdbd2SAlex Deucher 	else if (amdgpu_emu_mode != 1)
1359631cdbd2SAlex Deucher 		adev->gmc.vram_width = vram_width;
1360631cdbd2SAlex Deucher 
1361631cdbd2SAlex Deucher 	if (!adev->gmc.vram_width) {
1362631cdbd2SAlex Deucher 		int chansize, numchan;
1363631cdbd2SAlex Deucher 
1364631cdbd2SAlex Deucher 		/* hbm memory channel size */
1365631cdbd2SAlex Deucher 		if (adev->flags & AMD_IS_APU)
1366631cdbd2SAlex Deucher 			chansize = 64;
1367631cdbd2SAlex Deucher 		else
1368631cdbd2SAlex Deucher 			chansize = 128;
1369631cdbd2SAlex Deucher 
1370bdf84a80SJoseph Greathouse 		numchan = adev->df.funcs->get_hbm_channel_number(adev);
1371631cdbd2SAlex Deucher 		adev->gmc.vram_width = numchan * chansize;
1372631cdbd2SAlex Deucher 	}
1373631cdbd2SAlex Deucher 
1374631cdbd2SAlex Deucher 	adev->gmc.vram_type = vram_type;
1375ad02e08eSOri Messinger 	adev->gmc.vram_vendor = vram_vendor;
1376e60f8db5SAlex Xie 	switch (adev->asic_type) {
1377e60f8db5SAlex Xie 	case CHIP_RAVEN:
13781daa2bfaSLe Ma 		adev->num_vmhubs = 2;
13791daa2bfaSLe Ma 
13806a42fd6fSChristian König 		if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
1381f3368128SChristian König 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
13826a42fd6fSChristian König 		} else {
13836a42fd6fSChristian König 			/* vm_size is 128TB + 512GB for legacy 3-level page support */
13846a42fd6fSChristian König 			amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
1385770d13b1SChristian König 			adev->gmc.translate_further =
13866a42fd6fSChristian König 				adev->vm_manager.num_level > 1;
13876a42fd6fSChristian König 		}
1388e60f8db5SAlex Xie 		break;
1389e60f8db5SAlex Xie 	case CHIP_VEGA10:
1390273a14cdSAlex Deucher 	case CHIP_VEGA12:
1391d96b428cSFeifei Xu 	case CHIP_VEGA20:
13928787ee01SHuang Rui 	case CHIP_RENOIR:
139385e39550SLe Ma 	case CHIP_ALDEBARAN:
13941daa2bfaSLe Ma 		adev->num_vmhubs = 2;
13951daa2bfaSLe Ma 
13968787ee01SHuang Rui 
1397e60f8db5SAlex Xie 		/*
1398e60f8db5SAlex Xie 		 * To fulfill 4-level page support,
1399e60f8db5SAlex Xie 		 * vm size is 256TB (48bit), maximum size of Vega10,
1400e60f8db5SAlex Xie 		 * block size 512 (9bit)
1401e60f8db5SAlex Xie 		 */
1402cdba61daSwentalou 		/* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
1403cdba61daSwentalou 		if (amdgpu_sriov_vf(adev))
1404cdba61daSwentalou 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
1405cdba61daSwentalou 		else
1406f3368128SChristian König 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1407e60f8db5SAlex Xie 		break;
14083de2ff5dSLe Ma 	case CHIP_ARCTURUS:
1409c8a6e2a3SLe Ma 		adev->num_vmhubs = 3;
1410c8a6e2a3SLe Ma 
14113de2ff5dSLe Ma 		/* Keep the vm size same with Vega20 */
14123de2ff5dSLe Ma 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
14133de2ff5dSLe Ma 		break;
1414e60f8db5SAlex Xie 	default:
1415e60f8db5SAlex Xie 		break;
1416e60f8db5SAlex Xie 	}
1417e60f8db5SAlex Xie 
1418e60f8db5SAlex Xie 	/* This interrupt is VMC page fault.*/
141944a99b65SAndrey Grodzovsky 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
1420770d13b1SChristian König 				&adev->gmc.vm_fault);
142130da7bb1SChristian König 	if (r)
142230da7bb1SChristian König 		return r;
142330da7bb1SChristian König 
14247d19b15fSLe Ma 	if (adev->asic_type == CHIP_ARCTURUS) {
14257d19b15fSLe Ma 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
14267d19b15fSLe Ma 					&adev->gmc.vm_fault);
14277d19b15fSLe Ma 		if (r)
14287d19b15fSLe Ma 			return r;
14297d19b15fSLe Ma 	}
14307d19b15fSLe Ma 
143144a99b65SAndrey Grodzovsky 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
1432770d13b1SChristian König 				&adev->gmc.vm_fault);
1433e60f8db5SAlex Xie 
1434e60f8db5SAlex Xie 	if (r)
1435e60f8db5SAlex Xie 		return r;
1436e60f8db5SAlex Xie 
14372ee9403eSZhigang Luo 	if (!amdgpu_sriov_vf(adev)) {
1438791c4769Sxinhui pan 		/* interrupt sent to DF. */
1439791c4769Sxinhui pan 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1440791c4769Sxinhui pan 				      &adev->gmc.ecc_irq);
1441791c4769Sxinhui pan 		if (r)
1442791c4769Sxinhui pan 			return r;
14432ee9403eSZhigang Luo 	}
1444791c4769Sxinhui pan 
1445e60f8db5SAlex Xie 	/* Set the internal MC address mask
1446e60f8db5SAlex Xie 	 * This is the max address of the GPU's
1447e60f8db5SAlex Xie 	 * internal address space.
1448e60f8db5SAlex Xie 	 */
1449770d13b1SChristian König 	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
1450e60f8db5SAlex Xie 
1451244511f3SChristoph Hellwig 	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
1452e60f8db5SAlex Xie 	if (r) {
1453e60f8db5SAlex Xie 		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
1454244511f3SChristoph Hellwig 		return r;
1455e60f8db5SAlex Xie 	}
1456244511f3SChristoph Hellwig 	adev->need_swiotlb = drm_need_swiotlb(44);
1457e60f8db5SAlex Xie 
145847622ba0SAlex Deucher 	if (adev->gmc.xgmi.supported) {
14598ffff9b4SOak Zeng 		r = adev->gfxhub.funcs->get_xgmi_info(adev);
1460bf0a60b7SAlex Deucher 		if (r)
1461bf0a60b7SAlex Deucher 			return r;
1462bf0a60b7SAlex Deucher 	}
1463bf0a60b7SAlex Deucher 
1464e60f8db5SAlex Xie 	r = gmc_v9_0_mc_init(adev);
1465e60f8db5SAlex Xie 	if (r)
1466e60f8db5SAlex Xie 		return r;
1467e60f8db5SAlex Xie 
14687b885f0eSAlex Deucher 	amdgpu_gmc_get_vbios_allocations(adev);
1469ebdef28eSAlex Deucher 
1470e60f8db5SAlex Xie 	/* Memory manager */
1471e60f8db5SAlex Xie 	r = amdgpu_bo_init(adev);
1472e60f8db5SAlex Xie 	if (r)
1473e60f8db5SAlex Xie 		return r;
1474e60f8db5SAlex Xie 
1475e60f8db5SAlex Xie 	r = gmc_v9_0_gart_init(adev);
1476e60f8db5SAlex Xie 	if (r)
1477e60f8db5SAlex Xie 		return r;
1478e60f8db5SAlex Xie 
147905ec3edaSChristian König 	/*
148005ec3edaSChristian König 	 * number of VMs
148105ec3edaSChristian König 	 * VMID 0 is reserved for System
148281659b20SFelix Kuehling 	 * amdgpu graphics/compute will use VMIDs 1..n-1
148381659b20SFelix Kuehling 	 * amdkfd will use VMIDs n..15
148481659b20SFelix Kuehling 	 *
148581659b20SFelix Kuehling 	 * The first KFD VMID is 8 for GPUs with graphics, 3 for
148681659b20SFelix Kuehling 	 * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
148781659b20SFelix Kuehling 	 * for video processing.
148805ec3edaSChristian König 	 */
148981659b20SFelix Kuehling 	adev->vm_manager.first_kfd_vmid =
149081659b20SFelix Kuehling 		adev->asic_type == CHIP_ARCTURUS ? 3 : 8;
149105ec3edaSChristian König 
149205ec3edaSChristian König 	amdgpu_vm_manager_init(adev);
149305ec3edaSChristian König 
1494b0a2db9bSAlex Deucher 	gmc_v9_0_save_registers(adev);
1495b0a2db9bSAlex Deucher 
149605ec3edaSChristian König 	return 0;
1497e60f8db5SAlex Xie }
1498e60f8db5SAlex Xie 
1499e60f8db5SAlex Xie static int gmc_v9_0_sw_fini(void *handle)
1500e60f8db5SAlex Xie {
1501e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1502e60f8db5SAlex Xie 
15032adf1344STao Zhou 	amdgpu_gmc_ras_fini(adev);
1504f59548c8SMonk Liu 	amdgpu_gem_force_release(adev);
1505e60f8db5SAlex Xie 	amdgpu_vm_manager_fini(adev);
1506a3d9103eSAndrey Grodzovsky 	amdgpu_gart_table_vram_free(adev);
1507e60f8db5SAlex Xie 	amdgpu_bo_fini(adev);
1508a3d9103eSAndrey Grodzovsky 	amdgpu_gart_fini(adev);
1509e60f8db5SAlex Xie 
1510e60f8db5SAlex Xie 	return 0;
1511e60f8db5SAlex Xie }
1512e60f8db5SAlex Xie 
1513e60f8db5SAlex Xie static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
1514e60f8db5SAlex Xie {
1515946a4d5bSShaoyun Liu 
1516e60f8db5SAlex Xie 	switch (adev->asic_type) {
1517e60f8db5SAlex Xie 	case CHIP_VEGA10:
15184cd4c5c0SMonk Liu 		if (amdgpu_sriov_vf(adev))
151998cad2deSTrigger Huang 			break;
1520df561f66SGustavo A. R. Silva 		fallthrough;
1521d96b428cSFeifei Xu 	case CHIP_VEGA20:
1522946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
15235c583018SEvan Quan 						golden_settings_mmhub_1_0_0,
1524c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1525946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
15265c583018SEvan Quan 						golden_settings_athub_1_0_0,
1527c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1528e60f8db5SAlex Xie 		break;
1529273a14cdSAlex Deucher 	case CHIP_VEGA12:
1530273a14cdSAlex Deucher 		break;
1531e4f3abaaSChunming Zhou 	case CHIP_RAVEN:
15328787ee01SHuang Rui 		/* TODO for renoir */
1533946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
15345c583018SEvan Quan 						golden_settings_athub_1_0_0,
1535c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1536e4f3abaaSChunming Zhou 		break;
1537e60f8db5SAlex Xie 	default:
1538e60f8db5SAlex Xie 		break;
1539e60f8db5SAlex Xie 	}
1540e60f8db5SAlex Xie }
1541e60f8db5SAlex Xie 
1542e60f8db5SAlex Xie /**
1543c2ecd79bSShirish S  * gmc_v9_0_restore_registers - restores regs
1544c2ecd79bSShirish S  *
1545c2ecd79bSShirish S  * @adev: amdgpu_device pointer
1546c2ecd79bSShirish S  *
1547c2ecd79bSShirish S  * This restores register values, saved at suspend.
1548c2ecd79bSShirish S  */
1549b0a2db9bSAlex Deucher void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
1550c2ecd79bSShirish S {
15510eaa8012SShirish S 	if (adev->asic_type == CHIP_RAVEN) {
1552f8646661SAlex Deucher 		WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
15530eaa8012SShirish S 		WARN_ON(adev->gmc.sdpif_register !=
15540eaa8012SShirish S 			RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
15550eaa8012SShirish S 	}
1556c2ecd79bSShirish S }
1557c2ecd79bSShirish S 
1558c2ecd79bSShirish S /**
1559e60f8db5SAlex Xie  * gmc_v9_0_gart_enable - gart enable
1560e60f8db5SAlex Xie  *
1561e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
1562e60f8db5SAlex Xie  */
1563e60f8db5SAlex Xie static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1564e60f8db5SAlex Xie {
1565cb1545f7SOak Zeng 	int r;
1566e60f8db5SAlex Xie 
15671123b989SChristian König 	if (adev->gart.bo == NULL) {
1568e60f8db5SAlex Xie 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1569e60f8db5SAlex Xie 		return -EINVAL;
1570e60f8db5SAlex Xie 	}
1571ce1b1b66SMonk Liu 	r = amdgpu_gart_table_vram_pin(adev);
1572ce1b1b66SMonk Liu 	if (r)
1573ce1b1b66SMonk Liu 		return r;
1574e60f8db5SAlex Xie 
15758ffff9b4SOak Zeng 	r = adev->gfxhub.funcs->gart_enable(adev);
1576e60f8db5SAlex Xie 	if (r)
1577e60f8db5SAlex Xie 		return r;
1578e60f8db5SAlex Xie 
15799fb1506eSOak Zeng 	r = adev->mmhub.funcs->gart_enable(adev);
1580e60f8db5SAlex Xie 	if (r)
1581e60f8db5SAlex Xie 		return r;
1582e60f8db5SAlex Xie 
1583cb1545f7SOak Zeng 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1584cb1545f7SOak Zeng 		 (unsigned)(adev->gmc.gart_size >> 20),
1585cb1545f7SOak Zeng 		 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1586cb1545f7SOak Zeng 	adev->gart.ready = true;
1587cb1545f7SOak Zeng 	return 0;
1588cb1545f7SOak Zeng }
1589cb1545f7SOak Zeng 
1590cb1545f7SOak Zeng static int gmc_v9_0_hw_init(void *handle)
1591cb1545f7SOak Zeng {
1592cb1545f7SOak Zeng 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1593cb1545f7SOak Zeng 	bool value;
1594cb1545f7SOak Zeng 	int r, i;
1595cb1545f7SOak Zeng 
1596cb1545f7SOak Zeng 	/* The sequence of these two function calls matters.*/
1597cb1545f7SOak Zeng 	gmc_v9_0_init_golden_registers(adev);
1598cb1545f7SOak Zeng 
1599cb1545f7SOak Zeng 	if (adev->mode_info.num_crtc) {
1600cb1545f7SOak Zeng 		/* Lockout access through VGA aperture*/
1601cb1545f7SOak Zeng 		WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1602cb1545f7SOak Zeng 		/* disable VGA render */
1603cb1545f7SOak Zeng 		WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1604cb1545f7SOak Zeng 	}
1605cb1545f7SOak Zeng 
16069fb1506eSOak Zeng 	if (adev->mmhub.funcs->update_power_gating)
16079fb1506eSOak Zeng 		adev->mmhub.funcs->update_power_gating(adev, true);
16089fb1506eSOak Zeng 
1609455d40c9SLikun Gao 	adev->hdp.funcs->init_registers(adev);
1610fe2b5323STiecheng Zhou 
16111d4e0a8cSMonk Liu 	/* After HDP is initialized, flush HDP.*/
1612455d40c9SLikun Gao 	adev->hdp.funcs->flush_hdp(adev, NULL);
16131d4e0a8cSMonk Liu 
1614e60f8db5SAlex Xie 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1615e60f8db5SAlex Xie 		value = false;
1616e60f8db5SAlex Xie 	else
1617e60f8db5SAlex Xie 		value = true;
1618e60f8db5SAlex Xie 
161920bf2f6fSZhigang Luo 	if (!amdgpu_sriov_vf(adev)) {
16208ffff9b4SOak Zeng 		adev->gfxhub.funcs->set_fault_enable_default(adev, value);
16219fb1506eSOak Zeng 		adev->mmhub.funcs->set_fault_enable_default(adev, value);
162220bf2f6fSZhigang Luo 	}
16233ff98548SOak Zeng 	for (i = 0; i < adev->num_vmhubs; ++i)
16243ff98548SOak Zeng 		gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
1625e60f8db5SAlex Xie 
1626e7da754bSMonk Liu 	if (adev->umc.funcs && adev->umc.funcs->init_registers)
1627e7da754bSMonk Liu 		adev->umc.funcs->init_registers(adev);
1628e7da754bSMonk Liu 
1629e60f8db5SAlex Xie 	r = gmc_v9_0_gart_enable(adev);
1630e60f8db5SAlex Xie 
1631e60f8db5SAlex Xie 	return r;
1632e60f8db5SAlex Xie }
1633e60f8db5SAlex Xie 
1634e60f8db5SAlex Xie /**
1635e60f8db5SAlex Xie  * gmc_v9_0_gart_disable - gart disable
1636e60f8db5SAlex Xie  *
1637e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
1638e60f8db5SAlex Xie  *
1639e60f8db5SAlex Xie  * This disables all VM page table.
1640e60f8db5SAlex Xie  */
1641e60f8db5SAlex Xie static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1642e60f8db5SAlex Xie {
16438ffff9b4SOak Zeng 	adev->gfxhub.funcs->gart_disable(adev);
16449fb1506eSOak Zeng 	adev->mmhub.funcs->gart_disable(adev);
1645ce1b1b66SMonk Liu 	amdgpu_gart_table_vram_unpin(adev);
1646e60f8db5SAlex Xie }
1647e60f8db5SAlex Xie 
1648e60f8db5SAlex Xie static int gmc_v9_0_hw_fini(void *handle)
1649e60f8db5SAlex Xie {
1650e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1651e60f8db5SAlex Xie 
16525dd696aeSTrigger Huang 	if (amdgpu_sriov_vf(adev)) {
16535dd696aeSTrigger Huang 		/* full access mode, so don't touch any GMC register */
16545dd696aeSTrigger Huang 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
16555dd696aeSTrigger Huang 		return 0;
16565dd696aeSTrigger Huang 	}
16575dd696aeSTrigger Huang 
1658791c4769Sxinhui pan 	amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1659770d13b1SChristian König 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1660e60f8db5SAlex Xie 	gmc_v9_0_gart_disable(adev);
1661e60f8db5SAlex Xie 
1662e60f8db5SAlex Xie 	return 0;
1663e60f8db5SAlex Xie }
1664e60f8db5SAlex Xie 
1665e60f8db5SAlex Xie static int gmc_v9_0_suspend(void *handle)
1666e60f8db5SAlex Xie {
1667e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1668e60f8db5SAlex Xie 
1669c24a3c05SLiu Shixin 	return gmc_v9_0_hw_fini(adev);
1670e60f8db5SAlex Xie }
1671e60f8db5SAlex Xie 
1672e60f8db5SAlex Xie static int gmc_v9_0_resume(void *handle)
1673e60f8db5SAlex Xie {
1674e60f8db5SAlex Xie 	int r;
1675e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1676e60f8db5SAlex Xie 
1677e60f8db5SAlex Xie 	r = gmc_v9_0_hw_init(adev);
1678e60f8db5SAlex Xie 	if (r)
1679e60f8db5SAlex Xie 		return r;
1680e60f8db5SAlex Xie 
1681620f774fSChristian König 	amdgpu_vmid_reset_all(adev);
1682e60f8db5SAlex Xie 
168332601d48SChristian König 	return 0;
1684e60f8db5SAlex Xie }
1685e60f8db5SAlex Xie 
1686e60f8db5SAlex Xie static bool gmc_v9_0_is_idle(void *handle)
1687e60f8db5SAlex Xie {
1688e60f8db5SAlex Xie 	/* MC is always ready in GMC v9.*/
1689e60f8db5SAlex Xie 	return true;
1690e60f8db5SAlex Xie }
1691e60f8db5SAlex Xie 
1692e60f8db5SAlex Xie static int gmc_v9_0_wait_for_idle(void *handle)
1693e60f8db5SAlex Xie {
1694e60f8db5SAlex Xie 	/* There is no need to wait for MC idle in GMC v9.*/
1695e60f8db5SAlex Xie 	return 0;
1696e60f8db5SAlex Xie }
1697e60f8db5SAlex Xie 
1698e60f8db5SAlex Xie static int gmc_v9_0_soft_reset(void *handle)
1699e60f8db5SAlex Xie {
1700e60f8db5SAlex Xie 	/* XXX for emulation.*/
1701e60f8db5SAlex Xie 	return 0;
1702e60f8db5SAlex Xie }
1703e60f8db5SAlex Xie 
1704e60f8db5SAlex Xie static int gmc_v9_0_set_clockgating_state(void *handle,
1705e60f8db5SAlex Xie 					enum amd_clockgating_state state)
1706e60f8db5SAlex Xie {
1707d5583d4fSHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1708d5583d4fSHuang Rui 
17099fb1506eSOak Zeng 	adev->mmhub.funcs->set_clockgating(adev, state);
1710bee7b51aSLe Ma 
1711bee7b51aSLe Ma 	athub_v1_0_set_clockgating(adev, state);
1712bee7b51aSLe Ma 
1713bee7b51aSLe Ma 	return 0;
1714e60f8db5SAlex Xie }
1715e60f8db5SAlex Xie 
171613052be5SHuang Rui static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
171713052be5SHuang Rui {
171813052be5SHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
171913052be5SHuang Rui 
17209fb1506eSOak Zeng 	adev->mmhub.funcs->get_clockgating(adev, flags);
1721bee7b51aSLe Ma 
1722bee7b51aSLe Ma 	athub_v1_0_get_clockgating(adev, flags);
172313052be5SHuang Rui }
172413052be5SHuang Rui 
1725e60f8db5SAlex Xie static int gmc_v9_0_set_powergating_state(void *handle,
1726e60f8db5SAlex Xie 					enum amd_powergating_state state)
1727e60f8db5SAlex Xie {
1728e60f8db5SAlex Xie 	return 0;
1729e60f8db5SAlex Xie }
1730e60f8db5SAlex Xie 
1731e60f8db5SAlex Xie const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1732e60f8db5SAlex Xie 	.name = "gmc_v9_0",
1733e60f8db5SAlex Xie 	.early_init = gmc_v9_0_early_init,
1734e60f8db5SAlex Xie 	.late_init = gmc_v9_0_late_init,
1735e60f8db5SAlex Xie 	.sw_init = gmc_v9_0_sw_init,
1736e60f8db5SAlex Xie 	.sw_fini = gmc_v9_0_sw_fini,
1737e60f8db5SAlex Xie 	.hw_init = gmc_v9_0_hw_init,
1738e60f8db5SAlex Xie 	.hw_fini = gmc_v9_0_hw_fini,
1739e60f8db5SAlex Xie 	.suspend = gmc_v9_0_suspend,
1740e60f8db5SAlex Xie 	.resume = gmc_v9_0_resume,
1741e60f8db5SAlex Xie 	.is_idle = gmc_v9_0_is_idle,
1742e60f8db5SAlex Xie 	.wait_for_idle = gmc_v9_0_wait_for_idle,
1743e60f8db5SAlex Xie 	.soft_reset = gmc_v9_0_soft_reset,
1744e60f8db5SAlex Xie 	.set_clockgating_state = gmc_v9_0_set_clockgating_state,
1745e60f8db5SAlex Xie 	.set_powergating_state = gmc_v9_0_set_powergating_state,
174613052be5SHuang Rui 	.get_clockgating_state = gmc_v9_0_get_clockgating_state,
1747e60f8db5SAlex Xie };
1748e60f8db5SAlex Xie 
1749e60f8db5SAlex Xie const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1750e60f8db5SAlex Xie {
1751e60f8db5SAlex Xie 	.type = AMD_IP_BLOCK_TYPE_GMC,
1752e60f8db5SAlex Xie 	.major = 9,
1753e60f8db5SAlex Xie 	.minor = 0,
1754e60f8db5SAlex Xie 	.rev = 0,
1755e60f8db5SAlex Xie 	.funcs = &gmc_v9_0_ip_funcs,
1756e60f8db5SAlex Xie };
1757