xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c (revision 498d46fe)
1e60f8db5SAlex Xie /*
2e60f8db5SAlex Xie  * Copyright 2016 Advanced Micro Devices, Inc.
3e60f8db5SAlex Xie  *
4e60f8db5SAlex Xie  * Permission is hereby granted, free of charge, to any person obtaining a
5e60f8db5SAlex Xie  * copy of this software and associated documentation files (the "Software"),
6e60f8db5SAlex Xie  * to deal in the Software without restriction, including without limitation
7e60f8db5SAlex Xie  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8e60f8db5SAlex Xie  * and/or sell copies of the Software, and to permit persons to whom the
9e60f8db5SAlex Xie  * Software is furnished to do so, subject to the following conditions:
10e60f8db5SAlex Xie  *
11e60f8db5SAlex Xie  * The above copyright notice and this permission notice shall be included in
12e60f8db5SAlex Xie  * all copies or substantial portions of the Software.
13e60f8db5SAlex Xie  *
14e60f8db5SAlex Xie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15e60f8db5SAlex Xie  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16e60f8db5SAlex Xie  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17e60f8db5SAlex Xie  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18e60f8db5SAlex Xie  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19e60f8db5SAlex Xie  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20e60f8db5SAlex Xie  * OTHER DEALINGS IN THE SOFTWARE.
21e60f8db5SAlex Xie  *
22e60f8db5SAlex Xie  */
23f867723bSSam Ravnborg 
24e60f8db5SAlex Xie #include <linux/firmware.h>
25f867723bSSam Ravnborg #include <linux/pci.h>
26f867723bSSam Ravnborg 
27fd5fd480SChunming Zhou #include <drm/drm_cache.h>
28f867723bSSam Ravnborg 
29e60f8db5SAlex Xie #include "amdgpu.h"
30e60f8db5SAlex Xie #include "gmc_v9_0.h"
318d6a5230SAlex Deucher #include "amdgpu_atomfirmware.h"
322cddc50eSHuang Rui #include "amdgpu_gem.h"
33e60f8db5SAlex Xie 
34cde5c34fSFeifei Xu #include "gc/gc_9_0_sh_mask.h"
35135d4b10SFeifei Xu #include "dce/dce_12_0_offset.h"
36135d4b10SFeifei Xu #include "dce/dce_12_0_sh_mask.h"
37fb960bd2SFeifei Xu #include "vega10_enum.h"
3865417d9fSFeifei Xu #include "mmhub/mmhub_1_0_offset.h"
39ea930000SAlex Sierra #include "athub/athub_1_0_sh_mask.h"
406ce68225SFeifei Xu #include "athub/athub_1_0_offset.h"
41250b4228SChristian König #include "oss/osssys_4_0_offset.h"
42e60f8db5SAlex Xie 
43946a4d5bSShaoyun Liu #include "soc15.h"
44ea930000SAlex Sierra #include "soc15d.h"
45e60f8db5SAlex Xie #include "soc15_common.h"
4690c7a935SFeifei Xu #include "umc/umc_6_0_sh_mask.h"
47e60f8db5SAlex Xie 
48e60f8db5SAlex Xie #include "gfxhub_v1_0.h"
49e60f8db5SAlex Xie #include "mmhub_v1_0.h"
50bee7b51aSLe Ma #include "athub_v1_0.h"
51bf0a60b7SAlex Deucher #include "gfxhub_v1_1.h"
5251cce480SLe Ma #include "mmhub_v9_4.h"
5385e39550SLe Ma #include "mmhub_v1_7.h"
545b6b35aaSHawking Zhang #include "umc_v6_1.h"
55e7da754bSMonk Liu #include "umc_v6_0.h"
56186c8a85SJohn Clements #include "umc_v6_7.h"
576f12507fSHawking Zhang #include "hdp_v4_0.h"
583907c492SJohn Clements #include "mca_v3_0.h"
59e60f8db5SAlex Xie 
6044a99b65SAndrey Grodzovsky #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
6144a99b65SAndrey Grodzovsky 
62791c4769Sxinhui pan #include "amdgpu_ras.h"
63029fbd43SHawking Zhang #include "amdgpu_xgmi.h"
64791c4769Sxinhui pan 
65ebdef28eSAlex Deucher /* add these here since we already include dce12 headers and these are for DCN */
66ebdef28eSAlex Deucher #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
67ebdef28eSAlex Deucher #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
68ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT                                        0x0
69ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT                                       0x10
70ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
71ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
72f8646661SAlex Deucher #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0                                                                  0x049d
73f8646661SAlex Deucher #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX                                                         2
74f8646661SAlex Deucher 
75dc5d4affSHarry Wentland #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2                                                          0x05ea
76dc5d4affSHarry Wentland #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX                                                 2
77dc5d4affSHarry Wentland 
78ebdef28eSAlex Deucher 
79be99ecbfSAlex Deucher static const char *gfxhub_client_ids[] = {
80be99ecbfSAlex Deucher 	"CB",
81be99ecbfSAlex Deucher 	"DB",
82be99ecbfSAlex Deucher 	"IA",
83be99ecbfSAlex Deucher 	"WD",
84be99ecbfSAlex Deucher 	"CPF",
85be99ecbfSAlex Deucher 	"CPC",
86be99ecbfSAlex Deucher 	"CPG",
87be99ecbfSAlex Deucher 	"RLC",
88be99ecbfSAlex Deucher 	"TCP",
89be99ecbfSAlex Deucher 	"SQC (inst)",
90be99ecbfSAlex Deucher 	"SQC (data)",
91be99ecbfSAlex Deucher 	"SQG",
92be99ecbfSAlex Deucher 	"PA",
93be99ecbfSAlex Deucher };
94be99ecbfSAlex Deucher 
9502f23f5fSAlex Deucher static const char *mmhub_client_ids_raven[][2] = {
9602f23f5fSAlex Deucher 	[0][0] = "MP1",
9702f23f5fSAlex Deucher 	[1][0] = "MP0",
9802f23f5fSAlex Deucher 	[2][0] = "VCN",
9902f23f5fSAlex Deucher 	[3][0] = "VCNU",
10002f23f5fSAlex Deucher 	[4][0] = "HDP",
10102f23f5fSAlex Deucher 	[5][0] = "DCE",
10202f23f5fSAlex Deucher 	[13][0] = "UTCL2",
10302f23f5fSAlex Deucher 	[19][0] = "TLS",
10402f23f5fSAlex Deucher 	[26][0] = "OSS",
10502f23f5fSAlex Deucher 	[27][0] = "SDMA0",
10602f23f5fSAlex Deucher 	[0][1] = "MP1",
10702f23f5fSAlex Deucher 	[1][1] = "MP0",
10802f23f5fSAlex Deucher 	[2][1] = "VCN",
10902f23f5fSAlex Deucher 	[3][1] = "VCNU",
11002f23f5fSAlex Deucher 	[4][1] = "HDP",
11102f23f5fSAlex Deucher 	[5][1] = "XDP",
11202f23f5fSAlex Deucher 	[6][1] = "DBGU0",
11302f23f5fSAlex Deucher 	[7][1] = "DCE",
11402f23f5fSAlex Deucher 	[8][1] = "DCEDWB0",
11502f23f5fSAlex Deucher 	[9][1] = "DCEDWB1",
11602f23f5fSAlex Deucher 	[26][1] = "OSS",
11702f23f5fSAlex Deucher 	[27][1] = "SDMA0",
11802f23f5fSAlex Deucher };
11902f23f5fSAlex Deucher 
12002f23f5fSAlex Deucher static const char *mmhub_client_ids_renoir[][2] = {
12102f23f5fSAlex Deucher 	[0][0] = "MP1",
12202f23f5fSAlex Deucher 	[1][0] = "MP0",
12302f23f5fSAlex Deucher 	[2][0] = "HDP",
12402f23f5fSAlex Deucher 	[4][0] = "DCEDMC",
12502f23f5fSAlex Deucher 	[5][0] = "DCEVGA",
12602f23f5fSAlex Deucher 	[13][0] = "UTCL2",
12702f23f5fSAlex Deucher 	[19][0] = "TLS",
12802f23f5fSAlex Deucher 	[26][0] = "OSS",
12902f23f5fSAlex Deucher 	[27][0] = "SDMA0",
13002f23f5fSAlex Deucher 	[28][0] = "VCN",
13102f23f5fSAlex Deucher 	[29][0] = "VCNU",
13202f23f5fSAlex Deucher 	[30][0] = "JPEG",
13302f23f5fSAlex Deucher 	[0][1] = "MP1",
13402f23f5fSAlex Deucher 	[1][1] = "MP0",
13502f23f5fSAlex Deucher 	[2][1] = "HDP",
13602f23f5fSAlex Deucher 	[3][1] = "XDP",
13702f23f5fSAlex Deucher 	[6][1] = "DBGU0",
13802f23f5fSAlex Deucher 	[7][1] = "DCEDMC",
13902f23f5fSAlex Deucher 	[8][1] = "DCEVGA",
14002f23f5fSAlex Deucher 	[9][1] = "DCEDWB",
14102f23f5fSAlex Deucher 	[26][1] = "OSS",
14202f23f5fSAlex Deucher 	[27][1] = "SDMA0",
14302f23f5fSAlex Deucher 	[28][1] = "VCN",
14402f23f5fSAlex Deucher 	[29][1] = "VCNU",
14502f23f5fSAlex Deucher 	[30][1] = "JPEG",
14602f23f5fSAlex Deucher };
14702f23f5fSAlex Deucher 
14802f23f5fSAlex Deucher static const char *mmhub_client_ids_vega10[][2] = {
14902f23f5fSAlex Deucher 	[0][0] = "MP0",
15002f23f5fSAlex Deucher 	[1][0] = "UVD",
15102f23f5fSAlex Deucher 	[2][0] = "UVDU",
15202f23f5fSAlex Deucher 	[3][0] = "HDP",
15302f23f5fSAlex Deucher 	[13][0] = "UTCL2",
15402f23f5fSAlex Deucher 	[14][0] = "OSS",
15502f23f5fSAlex Deucher 	[15][0] = "SDMA1",
15602f23f5fSAlex Deucher 	[32+0][0] = "VCE0",
15702f23f5fSAlex Deucher 	[32+1][0] = "VCE0U",
15802f23f5fSAlex Deucher 	[32+2][0] = "XDMA",
15902f23f5fSAlex Deucher 	[32+3][0] = "DCE",
16002f23f5fSAlex Deucher 	[32+4][0] = "MP1",
16102f23f5fSAlex Deucher 	[32+14][0] = "SDMA0",
16202f23f5fSAlex Deucher 	[0][1] = "MP0",
16302f23f5fSAlex Deucher 	[1][1] = "UVD",
16402f23f5fSAlex Deucher 	[2][1] = "UVDU",
16502f23f5fSAlex Deucher 	[3][1] = "DBGU0",
16602f23f5fSAlex Deucher 	[4][1] = "HDP",
16702f23f5fSAlex Deucher 	[5][1] = "XDP",
16802f23f5fSAlex Deucher 	[14][1] = "OSS",
16902f23f5fSAlex Deucher 	[15][1] = "SDMA0",
17002f23f5fSAlex Deucher 	[32+0][1] = "VCE0",
17102f23f5fSAlex Deucher 	[32+1][1] = "VCE0U",
17202f23f5fSAlex Deucher 	[32+2][1] = "XDMA",
17302f23f5fSAlex Deucher 	[32+3][1] = "DCE",
17402f23f5fSAlex Deucher 	[32+4][1] = "DCEDWB",
17502f23f5fSAlex Deucher 	[32+5][1] = "MP1",
17602f23f5fSAlex Deucher 	[32+6][1] = "DBGU1",
17702f23f5fSAlex Deucher 	[32+14][1] = "SDMA1",
17802f23f5fSAlex Deucher };
17902f23f5fSAlex Deucher 
18002f23f5fSAlex Deucher static const char *mmhub_client_ids_vega12[][2] = {
18102f23f5fSAlex Deucher 	[0][0] = "MP0",
18202f23f5fSAlex Deucher 	[1][0] = "VCE0",
18302f23f5fSAlex Deucher 	[2][0] = "VCE0U",
18402f23f5fSAlex Deucher 	[3][0] = "HDP",
18502f23f5fSAlex Deucher 	[13][0] = "UTCL2",
18602f23f5fSAlex Deucher 	[14][0] = "OSS",
18702f23f5fSAlex Deucher 	[15][0] = "SDMA1",
18802f23f5fSAlex Deucher 	[32+0][0] = "DCE",
18902f23f5fSAlex Deucher 	[32+1][0] = "XDMA",
19002f23f5fSAlex Deucher 	[32+2][0] = "UVD",
19102f23f5fSAlex Deucher 	[32+3][0] = "UVDU",
19202f23f5fSAlex Deucher 	[32+4][0] = "MP1",
19302f23f5fSAlex Deucher 	[32+15][0] = "SDMA0",
19402f23f5fSAlex Deucher 	[0][1] = "MP0",
19502f23f5fSAlex Deucher 	[1][1] = "VCE0",
19602f23f5fSAlex Deucher 	[2][1] = "VCE0U",
19702f23f5fSAlex Deucher 	[3][1] = "DBGU0",
19802f23f5fSAlex Deucher 	[4][1] = "HDP",
19902f23f5fSAlex Deucher 	[5][1] = "XDP",
20002f23f5fSAlex Deucher 	[14][1] = "OSS",
20102f23f5fSAlex Deucher 	[15][1] = "SDMA0",
20202f23f5fSAlex Deucher 	[32+0][1] = "DCE",
20302f23f5fSAlex Deucher 	[32+1][1] = "DCEDWB",
20402f23f5fSAlex Deucher 	[32+2][1] = "XDMA",
20502f23f5fSAlex Deucher 	[32+3][1] = "UVD",
20602f23f5fSAlex Deucher 	[32+4][1] = "UVDU",
20702f23f5fSAlex Deucher 	[32+5][1] = "MP1",
20802f23f5fSAlex Deucher 	[32+6][1] = "DBGU1",
20902f23f5fSAlex Deucher 	[32+15][1] = "SDMA1",
21002f23f5fSAlex Deucher };
21102f23f5fSAlex Deucher 
21202f23f5fSAlex Deucher static const char *mmhub_client_ids_vega20[][2] = {
21302f23f5fSAlex Deucher 	[0][0] = "XDMA",
21402f23f5fSAlex Deucher 	[1][0] = "DCE",
21502f23f5fSAlex Deucher 	[2][0] = "VCE0",
21602f23f5fSAlex Deucher 	[3][0] = "VCE0U",
21702f23f5fSAlex Deucher 	[4][0] = "UVD",
21802f23f5fSAlex Deucher 	[5][0] = "UVD1U",
21902f23f5fSAlex Deucher 	[13][0] = "OSS",
22002f23f5fSAlex Deucher 	[14][0] = "HDP",
22102f23f5fSAlex Deucher 	[15][0] = "SDMA0",
22202f23f5fSAlex Deucher 	[32+0][0] = "UVD",
22302f23f5fSAlex Deucher 	[32+1][0] = "UVDU",
22402f23f5fSAlex Deucher 	[32+2][0] = "MP1",
22502f23f5fSAlex Deucher 	[32+3][0] = "MP0",
22602f23f5fSAlex Deucher 	[32+12][0] = "UTCL2",
22702f23f5fSAlex Deucher 	[32+14][0] = "SDMA1",
22802f23f5fSAlex Deucher 	[0][1] = "XDMA",
22902f23f5fSAlex Deucher 	[1][1] = "DCE",
23002f23f5fSAlex Deucher 	[2][1] = "DCEDWB",
23102f23f5fSAlex Deucher 	[3][1] = "VCE0",
23202f23f5fSAlex Deucher 	[4][1] = "VCE0U",
23302f23f5fSAlex Deucher 	[5][1] = "UVD1",
23402f23f5fSAlex Deucher 	[6][1] = "UVD1U",
23502f23f5fSAlex Deucher 	[7][1] = "DBGU0",
23602f23f5fSAlex Deucher 	[8][1] = "XDP",
23702f23f5fSAlex Deucher 	[13][1] = "OSS",
23802f23f5fSAlex Deucher 	[14][1] = "HDP",
23902f23f5fSAlex Deucher 	[15][1] = "SDMA0",
24002f23f5fSAlex Deucher 	[32+0][1] = "UVD",
24102f23f5fSAlex Deucher 	[32+1][1] = "UVDU",
24202f23f5fSAlex Deucher 	[32+2][1] = "DBGU1",
24302f23f5fSAlex Deucher 	[32+3][1] = "MP1",
24402f23f5fSAlex Deucher 	[32+4][1] = "MP0",
24502f23f5fSAlex Deucher 	[32+14][1] = "SDMA1",
24602f23f5fSAlex Deucher };
24702f23f5fSAlex Deucher 
24802f23f5fSAlex Deucher static const char *mmhub_client_ids_arcturus[][2] = {
249e83db774SAlex Deucher 	[0][0] = "DBGU1",
250e83db774SAlex Deucher 	[1][0] = "XDP",
25102f23f5fSAlex Deucher 	[2][0] = "MP1",
25202f23f5fSAlex Deucher 	[14][0] = "HDP",
253e83db774SAlex Deucher 	[171][0] = "JPEG",
254e83db774SAlex Deucher 	[172][0] = "VCN",
255e83db774SAlex Deucher 	[173][0] = "VCNU",
256e83db774SAlex Deucher 	[203][0] = "JPEG1",
257e83db774SAlex Deucher 	[204][0] = "VCN1",
258e83db774SAlex Deucher 	[205][0] = "VCN1U",
259e83db774SAlex Deucher 	[256][0] = "SDMA0",
260e83db774SAlex Deucher 	[257][0] = "SDMA1",
261e83db774SAlex Deucher 	[258][0] = "SDMA2",
262e83db774SAlex Deucher 	[259][0] = "SDMA3",
263e83db774SAlex Deucher 	[260][0] = "SDMA4",
264e83db774SAlex Deucher 	[261][0] = "SDMA5",
265e83db774SAlex Deucher 	[262][0] = "SDMA6",
266e83db774SAlex Deucher 	[263][0] = "SDMA7",
267e83db774SAlex Deucher 	[384][0] = "OSS",
26802f23f5fSAlex Deucher 	[0][1] = "DBGU1",
26902f23f5fSAlex Deucher 	[1][1] = "XDP",
27002f23f5fSAlex Deucher 	[2][1] = "MP1",
27102f23f5fSAlex Deucher 	[14][1] = "HDP",
272e83db774SAlex Deucher 	[171][1] = "JPEG",
273e83db774SAlex Deucher 	[172][1] = "VCN",
274e83db774SAlex Deucher 	[173][1] = "VCNU",
275e83db774SAlex Deucher 	[203][1] = "JPEG1",
276e83db774SAlex Deucher 	[204][1] = "VCN1",
277e83db774SAlex Deucher 	[205][1] = "VCN1U",
278e83db774SAlex Deucher 	[256][1] = "SDMA0",
279e83db774SAlex Deucher 	[257][1] = "SDMA1",
280e83db774SAlex Deucher 	[258][1] = "SDMA2",
281e83db774SAlex Deucher 	[259][1] = "SDMA3",
282e83db774SAlex Deucher 	[260][1] = "SDMA4",
283e83db774SAlex Deucher 	[261][1] = "SDMA5",
284e83db774SAlex Deucher 	[262][1] = "SDMA6",
285e83db774SAlex Deucher 	[263][1] = "SDMA7",
286e83db774SAlex Deucher 	[384][1] = "OSS",
28702f23f5fSAlex Deucher };
288e60f8db5SAlex Xie 
289e844cd99SAlex Deucher static const char *mmhub_client_ids_aldebaran[][2] = {
290e844cd99SAlex Deucher 	[2][0] = "MP1",
291e844cd99SAlex Deucher 	[3][0] = "MP0",
292f4ec3e50SAlex Sierra 	[32+1][0] = "DBGU_IO0",
293f4ec3e50SAlex Sierra 	[32+2][0] = "DBGU_IO2",
294e844cd99SAlex Deucher 	[32+4][0] = "MPIO",
295e844cd99SAlex Deucher 	[96+11][0] = "JPEG0",
296e844cd99SAlex Deucher 	[96+12][0] = "VCN0",
297e844cd99SAlex Deucher 	[96+13][0] = "VCNU0",
298e844cd99SAlex Deucher 	[128+11][0] = "JPEG1",
299e844cd99SAlex Deucher 	[128+12][0] = "VCN1",
300e844cd99SAlex Deucher 	[128+13][0] = "VCNU1",
301f4ec3e50SAlex Sierra 	[160+1][0] = "XDP",
302e844cd99SAlex Deucher 	[160+14][0] = "HDP",
303f4ec3e50SAlex Sierra 	[256+0][0] = "SDMA0",
304f4ec3e50SAlex Sierra 	[256+1][0] = "SDMA1",
305f4ec3e50SAlex Sierra 	[256+2][0] = "SDMA2",
306f4ec3e50SAlex Sierra 	[256+3][0] = "SDMA3",
307f4ec3e50SAlex Sierra 	[256+4][0] = "SDMA4",
308f4ec3e50SAlex Sierra 	[384+0][0] = "OSS",
309e844cd99SAlex Deucher 	[2][1] = "MP1",
310e844cd99SAlex Deucher 	[3][1] = "MP0",
311e844cd99SAlex Deucher 	[32+1][1] = "DBGU_IO0",
312e844cd99SAlex Deucher 	[32+2][1] = "DBGU_IO2",
313e844cd99SAlex Deucher 	[32+4][1] = "MPIO",
314e844cd99SAlex Deucher 	[96+11][1] = "JPEG0",
315e844cd99SAlex Deucher 	[96+12][1] = "VCN0",
316e844cd99SAlex Deucher 	[96+13][1] = "VCNU0",
317e844cd99SAlex Deucher 	[128+11][1] = "JPEG1",
318e844cd99SAlex Deucher 	[128+12][1] = "VCN1",
319e844cd99SAlex Deucher 	[128+13][1] = "VCNU1",
320f4ec3e50SAlex Sierra 	[160+1][1] = "XDP",
321e844cd99SAlex Deucher 	[160+14][1] = "HDP",
322f4ec3e50SAlex Sierra 	[256+0][1] = "SDMA0",
323f4ec3e50SAlex Sierra 	[256+1][1] = "SDMA1",
324f4ec3e50SAlex Sierra 	[256+2][1] = "SDMA2",
325f4ec3e50SAlex Sierra 	[256+3][1] = "SDMA3",
326f4ec3e50SAlex Sierra 	[256+4][1] = "SDMA4",
327f4ec3e50SAlex Sierra 	[384+0][1] = "OSS",
328e844cd99SAlex Deucher };
329e844cd99SAlex Deucher 
330946a4d5bSShaoyun Liu static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
3315c583018SEvan Quan {
332946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
333946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
3345c583018SEvan Quan };
3355c583018SEvan Quan 
336946a4d5bSShaoyun Liu static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
3375c583018SEvan Quan {
338946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
339946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
3405c583018SEvan Quan };
3415c583018SEvan Quan 
342791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
343791c4769Sxinhui pan 	(0x000143c0 + 0x00000000),
344791c4769Sxinhui pan 	(0x000143c0 + 0x00000800),
345791c4769Sxinhui pan 	(0x000143c0 + 0x00001000),
346791c4769Sxinhui pan 	(0x000143c0 + 0x00001800),
347791c4769Sxinhui pan 	(0x000543c0 + 0x00000000),
348791c4769Sxinhui pan 	(0x000543c0 + 0x00000800),
349791c4769Sxinhui pan 	(0x000543c0 + 0x00001000),
350791c4769Sxinhui pan 	(0x000543c0 + 0x00001800),
351791c4769Sxinhui pan 	(0x000943c0 + 0x00000000),
352791c4769Sxinhui pan 	(0x000943c0 + 0x00000800),
353791c4769Sxinhui pan 	(0x000943c0 + 0x00001000),
354791c4769Sxinhui pan 	(0x000943c0 + 0x00001800),
355791c4769Sxinhui pan 	(0x000d43c0 + 0x00000000),
356791c4769Sxinhui pan 	(0x000d43c0 + 0x00000800),
357791c4769Sxinhui pan 	(0x000d43c0 + 0x00001000),
358791c4769Sxinhui pan 	(0x000d43c0 + 0x00001800),
359791c4769Sxinhui pan 	(0x001143c0 + 0x00000000),
360791c4769Sxinhui pan 	(0x001143c0 + 0x00000800),
361791c4769Sxinhui pan 	(0x001143c0 + 0x00001000),
362791c4769Sxinhui pan 	(0x001143c0 + 0x00001800),
363791c4769Sxinhui pan 	(0x001543c0 + 0x00000000),
364791c4769Sxinhui pan 	(0x001543c0 + 0x00000800),
365791c4769Sxinhui pan 	(0x001543c0 + 0x00001000),
366791c4769Sxinhui pan 	(0x001543c0 + 0x00001800),
367791c4769Sxinhui pan 	(0x001943c0 + 0x00000000),
368791c4769Sxinhui pan 	(0x001943c0 + 0x00000800),
369791c4769Sxinhui pan 	(0x001943c0 + 0x00001000),
370791c4769Sxinhui pan 	(0x001943c0 + 0x00001800),
371791c4769Sxinhui pan 	(0x001d43c0 + 0x00000000),
372791c4769Sxinhui pan 	(0x001d43c0 + 0x00000800),
373791c4769Sxinhui pan 	(0x001d43c0 + 0x00001000),
374791c4769Sxinhui pan 	(0x001d43c0 + 0x00001800),
37502bab923SDavid Panariti };
37602bab923SDavid Panariti 
377791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
378791c4769Sxinhui pan 	(0x000143e0 + 0x00000000),
379791c4769Sxinhui pan 	(0x000143e0 + 0x00000800),
380791c4769Sxinhui pan 	(0x000143e0 + 0x00001000),
381791c4769Sxinhui pan 	(0x000143e0 + 0x00001800),
382791c4769Sxinhui pan 	(0x000543e0 + 0x00000000),
383791c4769Sxinhui pan 	(0x000543e0 + 0x00000800),
384791c4769Sxinhui pan 	(0x000543e0 + 0x00001000),
385791c4769Sxinhui pan 	(0x000543e0 + 0x00001800),
386791c4769Sxinhui pan 	(0x000943e0 + 0x00000000),
387791c4769Sxinhui pan 	(0x000943e0 + 0x00000800),
388791c4769Sxinhui pan 	(0x000943e0 + 0x00001000),
389791c4769Sxinhui pan 	(0x000943e0 + 0x00001800),
390791c4769Sxinhui pan 	(0x000d43e0 + 0x00000000),
391791c4769Sxinhui pan 	(0x000d43e0 + 0x00000800),
392791c4769Sxinhui pan 	(0x000d43e0 + 0x00001000),
393791c4769Sxinhui pan 	(0x000d43e0 + 0x00001800),
394791c4769Sxinhui pan 	(0x001143e0 + 0x00000000),
395791c4769Sxinhui pan 	(0x001143e0 + 0x00000800),
396791c4769Sxinhui pan 	(0x001143e0 + 0x00001000),
397791c4769Sxinhui pan 	(0x001143e0 + 0x00001800),
398791c4769Sxinhui pan 	(0x001543e0 + 0x00000000),
399791c4769Sxinhui pan 	(0x001543e0 + 0x00000800),
400791c4769Sxinhui pan 	(0x001543e0 + 0x00001000),
401791c4769Sxinhui pan 	(0x001543e0 + 0x00001800),
402791c4769Sxinhui pan 	(0x001943e0 + 0x00000000),
403791c4769Sxinhui pan 	(0x001943e0 + 0x00000800),
404791c4769Sxinhui pan 	(0x001943e0 + 0x00001000),
405791c4769Sxinhui pan 	(0x001943e0 + 0x00001800),
406791c4769Sxinhui pan 	(0x001d43e0 + 0x00000000),
407791c4769Sxinhui pan 	(0x001d43e0 + 0x00000800),
408791c4769Sxinhui pan 	(0x001d43e0 + 0x00001000),
409791c4769Sxinhui pan 	(0x001d43e0 + 0x00001800),
41002bab923SDavid Panariti };
41102bab923SDavid Panariti 
412791c4769Sxinhui pan static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
413791c4769Sxinhui pan 		struct amdgpu_irq_src *src,
414791c4769Sxinhui pan 		unsigned type,
415791c4769Sxinhui pan 		enum amdgpu_interrupt_state state)
416791c4769Sxinhui pan {
417791c4769Sxinhui pan 	u32 bits, i, tmp, reg;
418791c4769Sxinhui pan 
4191e2c6d55SJohn Clements 	/* Devices newer then VEGA10/12 shall have these programming
4201e2c6d55SJohn Clements 	     sequences performed by PSP BL */
4211e2c6d55SJohn Clements 	if (adev->asic_type >= CHIP_VEGA20)
4221e2c6d55SJohn Clements 		return 0;
4231e2c6d55SJohn Clements 
424791c4769Sxinhui pan 	bits = 0x7f;
425791c4769Sxinhui pan 
426791c4769Sxinhui pan 	switch (state) {
427791c4769Sxinhui pan 	case AMDGPU_IRQ_STATE_DISABLE:
428791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
429791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_addrs[i];
430791c4769Sxinhui pan 			tmp = RREG32(reg);
431791c4769Sxinhui pan 			tmp &= ~bits;
432791c4769Sxinhui pan 			WREG32(reg, tmp);
433791c4769Sxinhui pan 		}
434791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
435791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
436791c4769Sxinhui pan 			tmp = RREG32(reg);
437791c4769Sxinhui pan 			tmp &= ~bits;
438791c4769Sxinhui pan 			WREG32(reg, tmp);
439791c4769Sxinhui pan 		}
440791c4769Sxinhui pan 		break;
441791c4769Sxinhui pan 	case AMDGPU_IRQ_STATE_ENABLE:
442791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
443791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_addrs[i];
444791c4769Sxinhui pan 			tmp = RREG32(reg);
445791c4769Sxinhui pan 			tmp |= bits;
446791c4769Sxinhui pan 			WREG32(reg, tmp);
447791c4769Sxinhui pan 		}
448791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
449791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
450791c4769Sxinhui pan 			tmp = RREG32(reg);
451791c4769Sxinhui pan 			tmp |= bits;
452791c4769Sxinhui pan 			WREG32(reg, tmp);
453791c4769Sxinhui pan 		}
454791c4769Sxinhui pan 		break;
455791c4769Sxinhui pan 	default:
456791c4769Sxinhui pan 		break;
457791c4769Sxinhui pan 	}
458791c4769Sxinhui pan 
459791c4769Sxinhui pan 	return 0;
460791c4769Sxinhui pan }
461791c4769Sxinhui pan 
462e60f8db5SAlex Xie static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
463e60f8db5SAlex Xie 					struct amdgpu_irq_src *src,
464e60f8db5SAlex Xie 					unsigned type,
465e60f8db5SAlex Xie 					enum amdgpu_interrupt_state state)
466e60f8db5SAlex Xie {
467e60f8db5SAlex Xie 	struct amdgpu_vmhub *hub;
468ae6d1416STom St Denis 	u32 tmp, reg, bits, i, j;
469e60f8db5SAlex Xie 
47011250164SChristian König 	bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
47111250164SChristian König 		VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
47211250164SChristian König 		VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
47311250164SChristian König 		VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
47411250164SChristian König 		VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
47511250164SChristian König 		VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
47611250164SChristian König 		VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
47711250164SChristian König 
478e60f8db5SAlex Xie 	switch (state) {
479e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_DISABLE:
4801daa2bfaSLe Ma 		for (j = 0; j < adev->num_vmhubs; j++) {
481ae6d1416STom St Denis 			hub = &adev->vmhub[j];
482e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
483e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
48492f153bbSVictor Skvortsov 
48592f153bbSVictor Skvortsov 				if (j == AMDGPU_GFXHUB_0)
48692f153bbSVictor Skvortsov 					tmp = RREG32_SOC15_IP(GC, reg);
48792f153bbSVictor Skvortsov 				else
48892f153bbSVictor Skvortsov 					tmp = RREG32_SOC15_IP(MMHUB, reg);
48992f153bbSVictor Skvortsov 
490e60f8db5SAlex Xie 				tmp &= ~bits;
49192f153bbSVictor Skvortsov 
49292f153bbSVictor Skvortsov 				if (j == AMDGPU_GFXHUB_0)
49392f153bbSVictor Skvortsov 					WREG32_SOC15_IP(GC, reg, tmp);
49492f153bbSVictor Skvortsov 				else
49592f153bbSVictor Skvortsov 					WREG32_SOC15_IP(MMHUB, reg, tmp);
496e60f8db5SAlex Xie 			}
497e60f8db5SAlex Xie 		}
498e60f8db5SAlex Xie 		break;
499e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_ENABLE:
5001daa2bfaSLe Ma 		for (j = 0; j < adev->num_vmhubs; j++) {
501ae6d1416STom St Denis 			hub = &adev->vmhub[j];
502e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
503e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
50492f153bbSVictor Skvortsov 
50592f153bbSVictor Skvortsov 				if (j == AMDGPU_GFXHUB_0)
50692f153bbSVictor Skvortsov 					tmp = RREG32_SOC15_IP(GC, reg);
50792f153bbSVictor Skvortsov 				else
50892f153bbSVictor Skvortsov 					tmp = RREG32_SOC15_IP(MMHUB, reg);
50992f153bbSVictor Skvortsov 
510e60f8db5SAlex Xie 				tmp |= bits;
51192f153bbSVictor Skvortsov 
51292f153bbSVictor Skvortsov 				if (j == AMDGPU_GFXHUB_0)
51392f153bbSVictor Skvortsov 					WREG32_SOC15_IP(GC, reg, tmp);
51492f153bbSVictor Skvortsov 				else
51592f153bbSVictor Skvortsov 					WREG32_SOC15_IP(MMHUB, reg, tmp);
516e60f8db5SAlex Xie 			}
517e60f8db5SAlex Xie 		}
5189304ca4dSGustavo A. R. Silva 		break;
519e60f8db5SAlex Xie 	default:
520e60f8db5SAlex Xie 		break;
521e60f8db5SAlex Xie 	}
522e60f8db5SAlex Xie 
523e60f8db5SAlex Xie 	return 0;
524e60f8db5SAlex Xie }
525e60f8db5SAlex Xie 
526e60f8db5SAlex Xie static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
527e60f8db5SAlex Xie 				      struct amdgpu_irq_src *source,
528e60f8db5SAlex Xie 				      struct amdgpu_iv_entry *entry)
529e60f8db5SAlex Xie {
530c468f9e2SChristian König 	bool retry_fault = !!(entry->src_data[1] & 0x80);
531ff891a2eSPhilip Yang 	bool write_fault = !!(entry->src_data[1] & 0x20);
53202f23f5fSAlex Deucher 	uint32_t status = 0, cid = 0, rw = 0;
533e3898719SChristian König 	struct amdgpu_task_info task_info;
534e3898719SChristian König 	struct amdgpu_vmhub *hub;
53502f23f5fSAlex Deucher 	const char *mmhub_cid;
536e3898719SChristian König 	const char *hub_name;
537e3898719SChristian König 	u64 addr;
538e60f8db5SAlex Xie 
539e60f8db5SAlex Xie 	addr = (u64)entry->src_data[0] << 12;
540e60f8db5SAlex Xie 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
541e60f8db5SAlex Xie 
5420291150dSChristian König 	if (retry_fault) {
5430291150dSChristian König 		/* Returning 1 here also prevents sending the IV to the KFD */
54422666cc1SChristian König 
5450291150dSChristian König 		/* Process it onyl if it's the first fault for this address */
5460291150dSChristian König 		if (entry->ih != &adev->irq.ih_soft &&
5473c2d6ea2SPhilip Yang 		    amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
5480291150dSChristian König 					     entry->timestamp))
5490291150dSChristian König 			return 1;
5500291150dSChristian König 
5510291150dSChristian König 		/* Delegate it to a different ring if the hardware hasn't
5520291150dSChristian König 		 * already done it.
5530291150dSChristian König 		 */
55458df0d71SSebastian Andrzej Siewior 		if (entry->ih == &adev->irq.ih) {
5550291150dSChristian König 			amdgpu_irq_delegate(adev, entry, 8);
5560291150dSChristian König 			return 1;
5570291150dSChristian König 		}
5580291150dSChristian König 
5590291150dSChristian König 		/* Try to handle the recoverable page faults by filling page
5600291150dSChristian König 		 * tables
5610291150dSChristian König 		 */
562ff891a2eSPhilip Yang 		if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault))
5630291150dSChristian König 			return 1;
5640291150dSChristian König 	}
565ec671737SChristian König 
566e3898719SChristian König 	if (!printk_ratelimit())
567e3898719SChristian König 		return 0;
56853499173SXiaojie Yuan 
569e3898719SChristian König 	if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
570e3898719SChristian König 		hub_name = "mmhub0";
571e3898719SChristian König 		hub = &adev->vmhub[AMDGPU_MMHUB_0];
572e3898719SChristian König 	} else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
573e3898719SChristian König 		hub_name = "mmhub1";
574e3898719SChristian König 		hub = &adev->vmhub[AMDGPU_MMHUB_1];
575e3898719SChristian König 	} else {
576e3898719SChristian König 		hub_name = "gfxhub0";
577e3898719SChristian König 		hub = &adev->vmhub[AMDGPU_GFXHUB_0];
5784d6cbde3SFelix Kuehling 	}
579e60f8db5SAlex Xie 
58005794effSShirish S 	memset(&task_info, 0, sizeof(struct amdgpu_task_info));
581efaa9646SAndrey Grodzovsky 	amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
582efaa9646SAndrey Grodzovsky 
5834d6cbde3SFelix Kuehling 	dev_err(adev->dev,
584c468f9e2SChristian König 		"[%s] %s page fault (src_id:%u ring:%u vmid:%u "
585c468f9e2SChristian König 		"pasid:%u, for process %s pid %d thread %s pid %d)\n",
58651c60898SLe Ma 		hub_name, retry_fault ? "retry" : "no-retry",
587c4f46f22SChristian König 		entry->src_id, entry->ring_id, entry->vmid,
588efaa9646SAndrey Grodzovsky 		entry->pasid, task_info.process_name, task_info.tgid,
589efaa9646SAndrey Grodzovsky 		task_info.task_name, task_info.pid);
590be14729aSYong Zhao 	dev_err(adev->dev, "  in page starting at address 0x%016llx from IH client 0x%x (%s)\n",
591be14729aSYong Zhao 		addr, entry->client_id,
592be14729aSYong Zhao 		soc15_ih_clientid_name[entry->client_id]);
593e3898719SChristian König 
594e3898719SChristian König 	if (amdgpu_sriov_vf(adev))
595e3898719SChristian König 		return 0;
596e3898719SChristian König 
597e3898719SChristian König 	/*
598e3898719SChristian König 	 * Issue a dummy read to wait for the status register to
599e3898719SChristian König 	 * be updated to avoid reading an incorrect value due to
600e3898719SChristian König 	 * the new fast GRBM interface.
601e3898719SChristian König 	 */
6027845d80dSAlex Deucher 	if ((entry->vmid_src == AMDGPU_GFXHUB_0) &&
603630e959fSAlex Deucher 	    (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2)))
604e3898719SChristian König 		RREG32(hub->vm_l2_pro_fault_status);
605e3898719SChristian König 
606e3898719SChristian König 	status = RREG32(hub->vm_l2_pro_fault_status);
607e3898719SChristian König 	cid = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, CID);
608e3898719SChristian König 	rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW);
609e3898719SChristian König 	WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
610e3898719SChristian König 
611e3898719SChristian König 
6124d6cbde3SFelix Kuehling 	dev_err(adev->dev,
6134d6cbde3SFelix Kuehling 		"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
6144d6cbde3SFelix Kuehling 		status);
61502f23f5fSAlex Deucher 	if (hub == &adev->vmhub[AMDGPU_GFXHUB_0]) {
616be99ecbfSAlex Deucher 		dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
617e3898719SChristian König 			cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" :
618e3898719SChristian König 			gfxhub_client_ids[cid],
619be99ecbfSAlex Deucher 			cid);
62002f23f5fSAlex Deucher 	} else {
621630e959fSAlex Deucher 		switch (adev->ip_versions[MMHUB_HWIP][0]) {
622630e959fSAlex Deucher 		case IP_VERSION(9, 0, 0):
62302f23f5fSAlex Deucher 			mmhub_cid = mmhub_client_ids_vega10[cid][rw];
62402f23f5fSAlex Deucher 			break;
625630e959fSAlex Deucher 		case IP_VERSION(9, 3, 0):
62602f23f5fSAlex Deucher 			mmhub_cid = mmhub_client_ids_vega12[cid][rw];
62702f23f5fSAlex Deucher 			break;
628630e959fSAlex Deucher 		case IP_VERSION(9, 4, 0):
62902f23f5fSAlex Deucher 			mmhub_cid = mmhub_client_ids_vega20[cid][rw];
63002f23f5fSAlex Deucher 			break;
631630e959fSAlex Deucher 		case IP_VERSION(9, 4, 1):
63202f23f5fSAlex Deucher 			mmhub_cid = mmhub_client_ids_arcturus[cid][rw];
63302f23f5fSAlex Deucher 			break;
634630e959fSAlex Deucher 		case IP_VERSION(9, 1, 0):
635630e959fSAlex Deucher 		case IP_VERSION(9, 2, 0):
63602f23f5fSAlex Deucher 			mmhub_cid = mmhub_client_ids_raven[cid][rw];
63702f23f5fSAlex Deucher 			break;
638630e959fSAlex Deucher 		case IP_VERSION(1, 5, 0):
639630e959fSAlex Deucher 		case IP_VERSION(2, 4, 0):
64002f23f5fSAlex Deucher 			mmhub_cid = mmhub_client_ids_renoir[cid][rw];
64102f23f5fSAlex Deucher 			break;
642630e959fSAlex Deucher 		case IP_VERSION(9, 4, 2):
643e844cd99SAlex Deucher 			mmhub_cid = mmhub_client_ids_aldebaran[cid][rw];
644e844cd99SAlex Deucher 			break;
64502f23f5fSAlex Deucher 		default:
64602f23f5fSAlex Deucher 			mmhub_cid = NULL;
64702f23f5fSAlex Deucher 			break;
64802f23f5fSAlex Deucher 		}
64902f23f5fSAlex Deucher 		dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
65002f23f5fSAlex Deucher 			mmhub_cid ? mmhub_cid : "unknown", cid);
65102f23f5fSAlex Deucher 	}
6525ddd4a9aSYong Zhao 	dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
6535ddd4a9aSYong Zhao 		REG_GET_FIELD(status,
6545ddd4a9aSYong Zhao 		VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
6555ddd4a9aSYong Zhao 	dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
6565ddd4a9aSYong Zhao 		REG_GET_FIELD(status,
6575ddd4a9aSYong Zhao 		VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
6585ddd4a9aSYong Zhao 	dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
6595ddd4a9aSYong Zhao 		REG_GET_FIELD(status,
6605ddd4a9aSYong Zhao 		VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
6615ddd4a9aSYong Zhao 	dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
6625ddd4a9aSYong Zhao 		REG_GET_FIELD(status,
6635ddd4a9aSYong Zhao 		VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
66402f23f5fSAlex Deucher 	dev_err(adev->dev, "\t RW: 0x%x\n", rw);
665e60f8db5SAlex Xie 	return 0;
666e60f8db5SAlex Xie }
667e60f8db5SAlex Xie 
668e60f8db5SAlex Xie static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
669e60f8db5SAlex Xie 	.set = gmc_v9_0_vm_fault_interrupt_state,
670e60f8db5SAlex Xie 	.process = gmc_v9_0_process_interrupt,
671e60f8db5SAlex Xie };
672e60f8db5SAlex Xie 
673791c4769Sxinhui pan 
674791c4769Sxinhui pan static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
675791c4769Sxinhui pan 	.set = gmc_v9_0_ecc_interrupt_state,
67634cc4fd9STao Zhou 	.process = amdgpu_umc_process_ecc_irq,
677791c4769Sxinhui pan };
678791c4769Sxinhui pan 
679e60f8db5SAlex Xie static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
680e60f8db5SAlex Xie {
681770d13b1SChristian König 	adev->gmc.vm_fault.num_types = 1;
682770d13b1SChristian König 	adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
683791c4769Sxinhui pan 
68468d705ddSHawking Zhang 	if (!amdgpu_sriov_vf(adev) &&
68568d705ddSHawking Zhang 	    !adev->gmc.xgmi.connected_to_cpu) {
686791c4769Sxinhui pan 		adev->gmc.ecc_irq.num_types = 1;
687791c4769Sxinhui pan 		adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
688e60f8db5SAlex Xie 	}
6892ee9403eSZhigang Luo }
690e60f8db5SAlex Xie 
6912a79d868SYong Zhao static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
6922a79d868SYong Zhao 					uint32_t flush_type)
69303f89febSChristian König {
69403f89febSChristian König 	u32 req = 0;
69503f89febSChristian König 
69603f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
697c4f46f22SChristian König 			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
6982a79d868SYong Zhao 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
69903f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
70003f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
70103f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
70203f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
70303f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
70403f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
70503f89febSChristian König 			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
70603f89febSChristian König 
70703f89febSChristian König 	return req;
70803f89febSChristian König }
70903f89febSChristian König 
71090f6452cSchangzhu /**
71190f6452cSchangzhu  * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
71290f6452cSchangzhu  *
71390f6452cSchangzhu  * @adev: amdgpu_device pointer
71490f6452cSchangzhu  * @vmhub: vmhub type
71590f6452cSchangzhu  *
71690f6452cSchangzhu  */
71790f6452cSchangzhu static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
71890f6452cSchangzhu 				       uint32_t vmhub)
71990f6452cSchangzhu {
720630e959fSAlex Deucher 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
721d477c5aaSHawking Zhang 		return false;
722d477c5aaSHawking Zhang 
72390f6452cSchangzhu 	return ((vmhub == AMDGPU_MMHUB_0 ||
72490f6452cSchangzhu 		 vmhub == AMDGPU_MMHUB_1) &&
72590f6452cSchangzhu 		(!amdgpu_sriov_vf(adev)) &&
72654f78a76SAlex Deucher 		(!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
72754f78a76SAlex Deucher 		   (adev->apu_flags & AMD_APU_IS_PICASSO))));
72890f6452cSchangzhu }
72990f6452cSchangzhu 
730ea930000SAlex Sierra static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
731ea930000SAlex Sierra 					uint8_t vmid, uint16_t *p_pasid)
732ea930000SAlex Sierra {
733ea930000SAlex Sierra 	uint32_t value;
734ea930000SAlex Sierra 
735ea930000SAlex Sierra 	value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
736ea930000SAlex Sierra 		     + vmid);
737ea930000SAlex Sierra 	*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
738ea930000SAlex Sierra 
739ea930000SAlex Sierra 	return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
740ea930000SAlex Sierra }
741ea930000SAlex Sierra 
742e60f8db5SAlex Xie /*
743e60f8db5SAlex Xie  * GART
744e60f8db5SAlex Xie  * VMID 0 is the physical GPU addresses as used by the kernel.
745e60f8db5SAlex Xie  * VMIDs 1-15 are used for userspace clients and are handled
746e60f8db5SAlex Xie  * by the amdgpu vm/hsa code.
747e60f8db5SAlex Xie  */
748e60f8db5SAlex Xie 
749e60f8db5SAlex Xie /**
7502a79d868SYong Zhao  * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
751e60f8db5SAlex Xie  *
752e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
753e60f8db5SAlex Xie  * @vmid: vm instance to flush
754bf0df09cSLee Jones  * @vmhub: which hub to flush
7552a79d868SYong Zhao  * @flush_type: the flush type
756e60f8db5SAlex Xie  *
7572a79d868SYong Zhao  * Flush the TLB for the requested page table using certain type.
758e60f8db5SAlex Xie  */
7593ff98548SOak Zeng static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
7603ff98548SOak Zeng 					uint32_t vmhub, uint32_t flush_type)
761e60f8db5SAlex Xie {
76290f6452cSchangzhu 	bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
763e60f8db5SAlex Xie 	const unsigned eng = 17;
764b80cd524SFelix Kuehling 	u32 j, inv_req, inv_req2, tmp;
7653ff98548SOak Zeng 	struct amdgpu_vmhub *hub;
766e60f8db5SAlex Xie 
7673ff98548SOak Zeng 	BUG_ON(vmhub >= adev->num_vmhubs);
7683ff98548SOak Zeng 
7693ff98548SOak Zeng 	hub = &adev->vmhub[vmhub];
770b80cd524SFelix Kuehling 	if (adev->gmc.xgmi.num_physical_nodes &&
771630e959fSAlex Deucher 	    adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0)) {
772b80cd524SFelix Kuehling 		/* Vega20+XGMI caches PTEs in TC and TLB. Add a
773b80cd524SFelix Kuehling 		 * heavy-weight TLB flush (type 2), which flushes
774b80cd524SFelix Kuehling 		 * both. Due to a race condition with concurrent
775b80cd524SFelix Kuehling 		 * memory accesses using the same TLB cache line, we
776b80cd524SFelix Kuehling 		 * still need a second TLB flush after this.
777b80cd524SFelix Kuehling 		 */
778b80cd524SFelix Kuehling 		inv_req = gmc_v9_0_get_invalidate_req(vmid, 2);
779b80cd524SFelix Kuehling 		inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);
780b80cd524SFelix Kuehling 	} else {
78137c58ddfSFelix Kuehling 		inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
782b80cd524SFelix Kuehling 		inv_req2 = 0;
783b80cd524SFelix Kuehling 	}
784e60f8db5SAlex Xie 
78582d1a1b1SChengming Gui 	/* This is necessary for a HW workaround under SRIOV as well
78682d1a1b1SChengming Gui 	 * as GFXOFF under bare metal
78782d1a1b1SChengming Gui 	 */
78882d1a1b1SChengming Gui 	if (adev->gfx.kiq.ring.sched.ready &&
78982d1a1b1SChengming Gui 	    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
79081202807SDennis Li 	    down_read_trylock(&adev->reset_sem)) {
791148f597dSHuang Rui 		uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
792148f597dSHuang Rui 		uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
793af5fe1e9SChristian König 
79437c58ddfSFelix Kuehling 		amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
795af5fe1e9SChristian König 						   1 << vmid);
79681202807SDennis Li 		up_read(&adev->reset_sem);
7973ff98548SOak Zeng 		return;
798fc0faf04SEmily Deng 	}
7993890d111SEmily Deng 
8003890d111SEmily Deng 	spin_lock(&adev->gmc.invalidate_lock);
801f920d1bbSchangzhu 
802f920d1bbSchangzhu 	/*
803f920d1bbSchangzhu 	 * It may lose gpuvm invalidate acknowldege state across power-gating
804f920d1bbSchangzhu 	 * off cycle, add semaphore acquire before invalidation and semaphore
805f920d1bbSchangzhu 	 * release after invalidation to avoid entering power gated state
806f920d1bbSchangzhu 	 * to WA the Issue
807f920d1bbSchangzhu 	 */
808f920d1bbSchangzhu 
809f920d1bbSchangzhu 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
81090f6452cSchangzhu 	if (use_semaphore) {
811f920d1bbSchangzhu 		for (j = 0; j < adev->usec_timeout; j++) {
81292f153bbSVictor Skvortsov 			/* a read return value of 1 means semaphore acquire */
81392f153bbSVictor Skvortsov 			if (vmhub == AMDGPU_GFXHUB_0)
81492f153bbSVictor Skvortsov 				tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng);
81592f153bbSVictor Skvortsov 			else
81692f153bbSVictor Skvortsov 				tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng);
81792f153bbSVictor Skvortsov 
818f920d1bbSchangzhu 			if (tmp & 0x1)
819f920d1bbSchangzhu 				break;
820f920d1bbSchangzhu 			udelay(1);
821f920d1bbSchangzhu 		}
822f920d1bbSchangzhu 
823f920d1bbSchangzhu 		if (j >= adev->usec_timeout)
824f920d1bbSchangzhu 			DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
825f920d1bbSchangzhu 	}
826f920d1bbSchangzhu 
827b80cd524SFelix Kuehling 	do {
82892f153bbSVictor Skvortsov 		if (vmhub == AMDGPU_GFXHUB_0)
82992f153bbSVictor Skvortsov 			WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
83092f153bbSVictor Skvortsov 		else
83192f153bbSVictor Skvortsov 			WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
83253499173SXiaojie Yuan 
83353499173SXiaojie Yuan 		/*
834b80cd524SFelix Kuehling 		 * Issue a dummy read to wait for the ACK register to
835b80cd524SFelix Kuehling 		 * be cleared to avoid a false ACK due to the new fast
836b80cd524SFelix Kuehling 		 * GRBM interface.
83753499173SXiaojie Yuan 		 */
8387845d80dSAlex Deucher 		if ((vmhub == AMDGPU_GFXHUB_0) &&
839630e959fSAlex Deucher 		    (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2)))
840148f597dSHuang Rui 			RREG32_NO_KIQ(hub->vm_inv_eng0_req +
841148f597dSHuang Rui 				      hub->eng_distance * eng);
84253499173SXiaojie Yuan 
843e60f8db5SAlex Xie 		for (j = 0; j < adev->usec_timeout; j++) {
84492f153bbSVictor Skvortsov 			if (vmhub == AMDGPU_GFXHUB_0)
84592f153bbSVictor Skvortsov 				tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_ack + hub->eng_distance * eng);
84692f153bbSVictor Skvortsov 			else
84792f153bbSVictor Skvortsov 				tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_ack + hub->eng_distance * eng);
84892f153bbSVictor Skvortsov 
849396557b0SChristian König 			if (tmp & (1 << vmid))
850e60f8db5SAlex Xie 				break;
851e60f8db5SAlex Xie 			udelay(1);
852e60f8db5SAlex Xie 		}
853f920d1bbSchangzhu 
854b80cd524SFelix Kuehling 		inv_req = inv_req2;
855b80cd524SFelix Kuehling 		inv_req2 = 0;
856b80cd524SFelix Kuehling 	} while (inv_req);
857b80cd524SFelix Kuehling 
858f920d1bbSchangzhu 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
85992f153bbSVictor Skvortsov 	if (use_semaphore) {
860f920d1bbSchangzhu 		/*
861f920d1bbSchangzhu 		 * add semaphore release after invalidation,
862f920d1bbSchangzhu 		 * write with 0 means semaphore release
863f920d1bbSchangzhu 		 */
86492f153bbSVictor Skvortsov 		if (vmhub == AMDGPU_GFXHUB_0)
86592f153bbSVictor Skvortsov 			WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0);
86692f153bbSVictor Skvortsov 		else
86792f153bbSVictor Skvortsov 			WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0);
86892f153bbSVictor Skvortsov 	}
869f920d1bbSchangzhu 
8703890d111SEmily Deng 	spin_unlock(&adev->gmc.invalidate_lock);
871f920d1bbSchangzhu 
872396557b0SChristian König 	if (j < adev->usec_timeout)
8733ff98548SOak Zeng 		return;
874396557b0SChristian König 
875e60f8db5SAlex Xie 	DRM_ERROR("Timeout waiting for VM flush ACK!\n");
876e60f8db5SAlex Xie }
877e60f8db5SAlex Xie 
878ea930000SAlex Sierra /**
879ea930000SAlex Sierra  * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
880ea930000SAlex Sierra  *
881ea930000SAlex Sierra  * @adev: amdgpu_device pointer
882ea930000SAlex Sierra  * @pasid: pasid to be flush
883bf0df09cSLee Jones  * @flush_type: the flush type
884bf0df09cSLee Jones  * @all_hub: flush all hubs
885ea930000SAlex Sierra  *
886ea930000SAlex Sierra  * Flush the TLB for the requested pasid.
887ea930000SAlex Sierra  */
888ea930000SAlex Sierra static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
889ea930000SAlex Sierra 					uint16_t pasid, uint32_t flush_type,
890ea930000SAlex Sierra 					bool all_hub)
891ea930000SAlex Sierra {
892ea930000SAlex Sierra 	int vmid, i;
893ea930000SAlex Sierra 	signed long r;
894ea930000SAlex Sierra 	uint32_t seq;
895ea930000SAlex Sierra 	uint16_t queried_pasid;
896ea930000SAlex Sierra 	bool ret;
897ea930000SAlex Sierra 	struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
898ea930000SAlex Sierra 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
899ea930000SAlex Sierra 
90053b3f8f4SDennis Li 	if (amdgpu_in_reset(adev))
901ea930000SAlex Sierra 		return -EIO;
902ea930000SAlex Sierra 
90381202807SDennis Li 	if (ring->sched.ready && down_read_trylock(&adev->reset_sem)) {
904b80cd524SFelix Kuehling 		/* Vega20+XGMI caches PTEs in TC and TLB. Add a
905b80cd524SFelix Kuehling 		 * heavy-weight TLB flush (type 2), which flushes
906b80cd524SFelix Kuehling 		 * both. Due to a race condition with concurrent
907b80cd524SFelix Kuehling 		 * memory accesses using the same TLB cache line, we
908b80cd524SFelix Kuehling 		 * still need a second TLB flush after this.
909b80cd524SFelix Kuehling 		 */
910b80cd524SFelix Kuehling 		bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes &&
911630e959fSAlex Deucher 				       adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0));
912b80cd524SFelix Kuehling 		/* 2 dwords flush + 8 dwords fence */
913b80cd524SFelix Kuehling 		unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8;
914b80cd524SFelix Kuehling 
915b80cd524SFelix Kuehling 		if (vega20_xgmi_wa)
916b80cd524SFelix Kuehling 			ndw += kiq->pmf->invalidate_tlbs_size;
917b80cd524SFelix Kuehling 
918ea930000SAlex Sierra 		spin_lock(&adev->gfx.kiq.ring_lock);
91936a1707aSAlex Sierra 		/* 2 dwords flush + 8 dwords fence */
920b80cd524SFelix Kuehling 		amdgpu_ring_alloc(ring, ndw);
921b80cd524SFelix Kuehling 		if (vega20_xgmi_wa)
922b80cd524SFelix Kuehling 			kiq->pmf->kiq_invalidate_tlbs(ring,
923b80cd524SFelix Kuehling 						      pasid, 2, all_hub);
924ea930000SAlex Sierra 		kiq->pmf->kiq_invalidate_tlbs(ring,
925ea930000SAlex Sierra 					pasid, flush_type, all_hub);
92604e4e2e9SYintian Tao 		r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
92704e4e2e9SYintian Tao 		if (r) {
92804e4e2e9SYintian Tao 			amdgpu_ring_undo(ring);
929abb17b1eSColin Ian King 			spin_unlock(&adev->gfx.kiq.ring_lock);
93081202807SDennis Li 			up_read(&adev->reset_sem);
93104e4e2e9SYintian Tao 			return -ETIME;
93204e4e2e9SYintian Tao 		}
93304e4e2e9SYintian Tao 
934ea930000SAlex Sierra 		amdgpu_ring_commit(ring);
935ea930000SAlex Sierra 		spin_unlock(&adev->gfx.kiq.ring_lock);
936ea930000SAlex Sierra 		r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
937ea930000SAlex Sierra 		if (r < 1) {
938aac89168SDennis Li 			dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
93981202807SDennis Li 			up_read(&adev->reset_sem);
940ea930000SAlex Sierra 			return -ETIME;
941ea930000SAlex Sierra 		}
94281202807SDennis Li 		up_read(&adev->reset_sem);
943ea930000SAlex Sierra 		return 0;
944ea930000SAlex Sierra 	}
945ea930000SAlex Sierra 
946ea930000SAlex Sierra 	for (vmid = 1; vmid < 16; vmid++) {
947ea930000SAlex Sierra 
948ea930000SAlex Sierra 		ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
949ea930000SAlex Sierra 				&queried_pasid);
950ea930000SAlex Sierra 		if (ret && queried_pasid == pasid) {
951ea930000SAlex Sierra 			if (all_hub) {
952ea930000SAlex Sierra 				for (i = 0; i < adev->num_vmhubs; i++)
953ea930000SAlex Sierra 					gmc_v9_0_flush_gpu_tlb(adev, vmid,
954fa34edbeSFelix Kuehling 							i, flush_type);
955ea930000SAlex Sierra 			} else {
956ea930000SAlex Sierra 				gmc_v9_0_flush_gpu_tlb(adev, vmid,
957fa34edbeSFelix Kuehling 						AMDGPU_GFXHUB_0, flush_type);
958ea930000SAlex Sierra 			}
959ea930000SAlex Sierra 			break;
960ea930000SAlex Sierra 		}
961ea930000SAlex Sierra 	}
962ea930000SAlex Sierra 
963ea930000SAlex Sierra 	return 0;
964ea930000SAlex Sierra 
965ea930000SAlex Sierra }
966ea930000SAlex Sierra 
9679096d6e5SChristian König static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
968c633c00bSChristian König 					    unsigned vmid, uint64_t pd_addr)
9699096d6e5SChristian König {
97090f6452cSchangzhu 	bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
971250b4228SChristian König 	struct amdgpu_device *adev = ring->adev;
972250b4228SChristian König 	struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
9732a79d868SYong Zhao 	uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
9749096d6e5SChristian König 	unsigned eng = ring->vm_inv_eng;
9759096d6e5SChristian König 
976f920d1bbSchangzhu 	/*
977f920d1bbSchangzhu 	 * It may lose gpuvm invalidate acknowldege state across power-gating
978f920d1bbSchangzhu 	 * off cycle, add semaphore acquire before invalidation and semaphore
979f920d1bbSchangzhu 	 * release after invalidation to avoid entering power gated state
980f920d1bbSchangzhu 	 * to WA the Issue
981f920d1bbSchangzhu 	 */
982f920d1bbSchangzhu 
983f920d1bbSchangzhu 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
98490f6452cSchangzhu 	if (use_semaphore)
985f920d1bbSchangzhu 		/* a read return value of 1 means semaphore acuqire */
986f920d1bbSchangzhu 		amdgpu_ring_emit_reg_wait(ring,
987148f597dSHuang Rui 					  hub->vm_inv_eng0_sem +
988148f597dSHuang Rui 					  hub->eng_distance * eng, 0x1, 0x1);
989f920d1bbSchangzhu 
990148f597dSHuang Rui 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
991148f597dSHuang Rui 			      (hub->ctx_addr_distance * vmid),
9929096d6e5SChristian König 			      lower_32_bits(pd_addr));
9939096d6e5SChristian König 
994148f597dSHuang Rui 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
995148f597dSHuang Rui 			      (hub->ctx_addr_distance * vmid),
9969096d6e5SChristian König 			      upper_32_bits(pd_addr));
9979096d6e5SChristian König 
998148f597dSHuang Rui 	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
999148f597dSHuang Rui 					    hub->eng_distance * eng,
1000148f597dSHuang Rui 					    hub->vm_inv_eng0_ack +
1001148f597dSHuang Rui 					    hub->eng_distance * eng,
1002f8bc9037SAlex Deucher 					    req, 1 << vmid);
1003f732b6b3SChristian König 
1004f920d1bbSchangzhu 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
100590f6452cSchangzhu 	if (use_semaphore)
1006f920d1bbSchangzhu 		/*
1007f920d1bbSchangzhu 		 * add semaphore release after invalidation,
1008f920d1bbSchangzhu 		 * write with 0 means semaphore release
1009f920d1bbSchangzhu 		 */
1010148f597dSHuang Rui 		amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
1011148f597dSHuang Rui 				      hub->eng_distance * eng, 0);
1012f920d1bbSchangzhu 
10139096d6e5SChristian König 	return pd_addr;
10149096d6e5SChristian König }
10159096d6e5SChristian König 
1016c633c00bSChristian König static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
1017c633c00bSChristian König 					unsigned pasid)
1018c633c00bSChristian König {
1019c633c00bSChristian König 	struct amdgpu_device *adev = ring->adev;
1020c633c00bSChristian König 	uint32_t reg;
1021c633c00bSChristian König 
1022f2d66571SLe Ma 	/* Do nothing because there's no lut register for mmhub1. */
1023f2d66571SLe Ma 	if (ring->funcs->vmhub == AMDGPU_MMHUB_1)
1024f2d66571SLe Ma 		return;
1025f2d66571SLe Ma 
1026a2d15ed7SLe Ma 	if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
1027c633c00bSChristian König 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
1028c633c00bSChristian König 	else
1029c633c00bSChristian König 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
1030c633c00bSChristian König 
1031c633c00bSChristian König 	amdgpu_ring_emit_wreg(ring, reg, pasid);
1032c633c00bSChristian König }
1033c633c00bSChristian König 
1034e60f8db5SAlex Xie /*
1035e60f8db5SAlex Xie  * PTE format on VEGA 10:
1036e60f8db5SAlex Xie  * 63:59 reserved
1037e60f8db5SAlex Xie  * 58:57 mtype
1038e60f8db5SAlex Xie  * 56 F
1039e60f8db5SAlex Xie  * 55 L
1040e60f8db5SAlex Xie  * 54 P
1041e60f8db5SAlex Xie  * 53 SW
1042e60f8db5SAlex Xie  * 52 T
1043e60f8db5SAlex Xie  * 50:48 reserved
1044e60f8db5SAlex Xie  * 47:12 4k physical page base address
1045e60f8db5SAlex Xie  * 11:7 fragment
1046e60f8db5SAlex Xie  * 6 write
1047e60f8db5SAlex Xie  * 5 read
1048e60f8db5SAlex Xie  * 4 exe
1049e60f8db5SAlex Xie  * 3 Z
1050e60f8db5SAlex Xie  * 2 snooped
1051e60f8db5SAlex Xie  * 1 system
1052e60f8db5SAlex Xie  * 0 valid
1053e60f8db5SAlex Xie  *
1054e60f8db5SAlex Xie  * PDE format on VEGA 10:
1055e60f8db5SAlex Xie  * 63:59 block fragment size
1056e60f8db5SAlex Xie  * 58:55 reserved
1057e60f8db5SAlex Xie  * 54 P
1058e60f8db5SAlex Xie  * 53:48 reserved
1059e60f8db5SAlex Xie  * 47:6 physical base address of PD or PTE
1060e60f8db5SAlex Xie  * 5:3 reserved
1061e60f8db5SAlex Xie  * 2 C
1062e60f8db5SAlex Xie  * 1 system
1063e60f8db5SAlex Xie  * 0 valid
1064e60f8db5SAlex Xie  */
1065e60f8db5SAlex Xie 
106671776b6dSChristian König static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
1067e60f8db5SAlex Xie 
1068e60f8db5SAlex Xie {
106971776b6dSChristian König 	switch (flags) {
1070e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_DEFAULT:
107171776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1072e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_NC:
107371776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1074e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_WC:
107571776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
1076093e48c0SOak Zeng 	case AMDGPU_VM_MTYPE_RW:
107771776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
1078e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_CC:
107971776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
1080e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_UC:
108171776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
1082e60f8db5SAlex Xie 	default:
108371776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1084e60f8db5SAlex Xie 	}
1085e60f8db5SAlex Xie }
1086e60f8db5SAlex Xie 
10873de676d8SChristian König static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
10883de676d8SChristian König 				uint64_t *addr, uint64_t *flags)
1089f75e237cSChristian König {
1090bbc9fb10SChristian König 	if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
10910ca565abSOak Zeng 		*addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
10923de676d8SChristian König 	BUG_ON(*addr & 0xFFFF00000000003FULL);
10936a42fd6fSChristian König 
1094770d13b1SChristian König 	if (!adev->gmc.translate_further)
10956a42fd6fSChristian König 		return;
10966a42fd6fSChristian König 
10976a42fd6fSChristian König 	if (level == AMDGPU_VM_PDB1) {
10986a42fd6fSChristian König 		/* Set the block fragment size */
10996a42fd6fSChristian König 		if (!(*flags & AMDGPU_PDE_PTE))
11006a42fd6fSChristian König 			*flags |= AMDGPU_PDE_BFS(0x9);
11016a42fd6fSChristian König 
11026a42fd6fSChristian König 	} else if (level == AMDGPU_VM_PDB0) {
11036a42fd6fSChristian König 		if (*flags & AMDGPU_PDE_PTE)
11046a42fd6fSChristian König 			*flags &= ~AMDGPU_PDE_PTE;
11056a42fd6fSChristian König 		else
11066a42fd6fSChristian König 			*flags |= AMDGPU_PTE_TF;
11076a42fd6fSChristian König 	}
1108f75e237cSChristian König }
1109f75e237cSChristian König 
1110cbfae36cSChristian König static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
1111cbfae36cSChristian König 				struct amdgpu_bo_va_mapping *mapping,
1112cbfae36cSChristian König 				uint64_t *flags)
1113cbfae36cSChristian König {
1114cbfae36cSChristian König 	*flags &= ~AMDGPU_PTE_EXECUTABLE;
1115cbfae36cSChristian König 	*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1116cbfae36cSChristian König 
1117cbfae36cSChristian König 	*flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
1118cbfae36cSChristian König 	*flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
1119cbfae36cSChristian König 
1120cbfae36cSChristian König 	if (mapping->flags & AMDGPU_PTE_PRT) {
1121cbfae36cSChristian König 		*flags |= AMDGPU_PTE_PRT;
1122cbfae36cSChristian König 		*flags &= ~AMDGPU_PTE_VALID;
1123cbfae36cSChristian König 	}
1124cbfae36cSChristian König 
1125630e959fSAlex Deucher 	if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
1126630e959fSAlex Deucher 	     adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) &&
1127cbfae36cSChristian König 	    !(*flags & AMDGPU_PTE_SYSTEM) &&
1128cbfae36cSChristian König 	    mapping->bo_va->is_xgmi)
1129cbfae36cSChristian König 		*flags |= AMDGPU_PTE_SNOOPED;
113072b4db0fSEric Huang 
1131630e959fSAlex Deucher 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
113272b4db0fSEric Huang 		*flags |= mapping->flags & AMDGPU_PTE_SNOOPED;
1133cbfae36cSChristian König }
1134cbfae36cSChristian König 
11357b885f0eSAlex Deucher static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
11367b885f0eSAlex Deucher {
11377b885f0eSAlex Deucher 	u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
11387b885f0eSAlex Deucher 	unsigned size;
11397b885f0eSAlex Deucher 
1140dc5d4affSHarry Wentland 	/* TODO move to DC so GMC doesn't need to hard-code DCN registers */
1141dc5d4affSHarry Wentland 
11427b885f0eSAlex Deucher 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
11437b885f0eSAlex Deucher 		size = AMDGPU_VBIOS_VGA_ALLOCATION;
11447b885f0eSAlex Deucher 	} else {
11457b885f0eSAlex Deucher 		u32 viewport;
11467b885f0eSAlex Deucher 
1147630e959fSAlex Deucher 		switch (adev->ip_versions[DCE_HWIP][0]) {
1148630e959fSAlex Deucher 		case IP_VERSION(1, 0, 0):
1149630e959fSAlex Deucher 		case IP_VERSION(1, 0, 1):
11507b885f0eSAlex Deucher 			viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
11517b885f0eSAlex Deucher 			size = (REG_GET_FIELD(viewport,
11527b885f0eSAlex Deucher 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
11537b885f0eSAlex Deucher 				REG_GET_FIELD(viewport,
11547b885f0eSAlex Deucher 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
11557b885f0eSAlex Deucher 				4);
11567b885f0eSAlex Deucher 			break;
1157dc5d4affSHarry Wentland 		case IP_VERSION(2, 1, 0):
1158dc5d4affSHarry Wentland 			viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2);
1159dc5d4affSHarry Wentland 			size = (REG_GET_FIELD(viewport,
1160dc5d4affSHarry Wentland 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1161dc5d4affSHarry Wentland 				REG_GET_FIELD(viewport,
1162dc5d4affSHarry Wentland 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1163dc5d4affSHarry Wentland 				4);
1164dc5d4affSHarry Wentland 			break;
11657b885f0eSAlex Deucher 		default:
11667b885f0eSAlex Deucher 			viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
11677b885f0eSAlex Deucher 			size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
11687b885f0eSAlex Deucher 				REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
11697b885f0eSAlex Deucher 				4);
11707b885f0eSAlex Deucher 			break;
11717b885f0eSAlex Deucher 		}
11727b885f0eSAlex Deucher 	}
11737b885f0eSAlex Deucher 
11747b885f0eSAlex Deucher 	return size;
11757b885f0eSAlex Deucher }
11767b885f0eSAlex Deucher 
1177132f34e4SChristian König static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
1178132f34e4SChristian König 	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
1179ea930000SAlex Sierra 	.flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
11809096d6e5SChristian König 	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
1181c633c00bSChristian König 	.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
118271776b6dSChristian König 	.map_mtype = gmc_v9_0_map_mtype,
1183cbfae36cSChristian König 	.get_vm_pde = gmc_v9_0_get_vm_pde,
11847b885f0eSAlex Deucher 	.get_vm_pte = gmc_v9_0_get_vm_pte,
11857b885f0eSAlex Deucher 	.get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
1186e60f8db5SAlex Xie };
1187e60f8db5SAlex Xie 
1188132f34e4SChristian König static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
1189e60f8db5SAlex Xie {
1190132f34e4SChristian König 	adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
1191e60f8db5SAlex Xie }
1192e60f8db5SAlex Xie 
11935b6b35aaSHawking Zhang static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
11945b6b35aaSHawking Zhang {
1195630e959fSAlex Deucher 	switch (adev->ip_versions[UMC_HWIP][0]) {
1196630e959fSAlex Deucher 	case IP_VERSION(6, 0, 0):
1197e7da754bSMonk Liu 		adev->umc.funcs = &umc_v6_0_funcs;
1198e7da754bSMonk Liu 		break;
1199630e959fSAlex Deucher 	case IP_VERSION(6, 1, 1):
12003aacf4eaSTao Zhou 		adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
12013aacf4eaSTao Zhou 		adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
12023aacf4eaSTao Zhou 		adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
12034cf781c2SJohn Clements 		adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
12044cf781c2SJohn Clements 		adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1205efe17d5aSyipechai 		adev->umc.ras = &umc_v6_1_ras;
12064cf781c2SJohn Clements 		break;
1207630e959fSAlex Deucher 	case IP_VERSION(6, 1, 2):
12083aacf4eaSTao Zhou 		adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
12093aacf4eaSTao Zhou 		adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
12103aacf4eaSTao Zhou 		adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
12114cf781c2SJohn Clements 		adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
12123aacf4eaSTao Zhou 		adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1213efe17d5aSyipechai 		adev->umc.ras = &umc_v6_1_ras;
12145b6b35aaSHawking Zhang 		break;
1215630e959fSAlex Deucher 	case IP_VERSION(6, 7, 0):
1216*498d46feSTao Zhou 		adev->umc.max_ras_err_cnt_per_query =
1217*498d46feSTao Zhou 			UMC_V6_7_TOTAL_CHANNEL_NUM * UMC_V6_7_BAD_PAGE_NUM_PER_CHANNEL;
1218719e433eSMukul Joshi 		adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM;
1219719e433eSMukul Joshi 		adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM;
1220186c8a85SJohn Clements 		adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET;
1221186c8a85SJohn Clements 		if (!adev->gmc.xgmi.connected_to_cpu)
1222efe17d5aSyipechai 			adev->umc.ras = &umc_v6_7_ras;
1223186c8a85SJohn Clements 		if (1 & adev->smuio.funcs->get_die_id(adev))
1224186c8a85SJohn Clements 			adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_first[0][0];
1225186c8a85SJohn Clements 		else
1226186c8a85SJohn Clements 			adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_second[0][0];
1227186c8a85SJohn Clements 		break;
12285b6b35aaSHawking Zhang 	default:
12295b6b35aaSHawking Zhang 		break;
12305b6b35aaSHawking Zhang 	}
1231efe17d5aSyipechai 
1232efe17d5aSyipechai 	if (adev->umc.ras) {
1233efe17d5aSyipechai 		amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block);
1234efe17d5aSyipechai 
1235efe17d5aSyipechai 		strcpy(adev->umc.ras->ras_block.name, "umc");
1236efe17d5aSyipechai 		adev->umc.ras->ras_block.block = AMDGPU_RAS_BLOCK__UMC;
1237efe17d5aSyipechai 
1238efe17d5aSyipechai 		/* If don't define special ras_late_init function, use default ras_late_init */
1239efe17d5aSyipechai 		if (!adev->umc.ras->ras_block.ras_late_init)
1240efe17d5aSyipechai 				adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
1241efe17d5aSyipechai 
1242efe17d5aSyipechai 		/* If don't define special ras_fini function, use default ras_fini */
1243efe17d5aSyipechai 		if (!adev->umc.ras->ras_block.ras_fini)
1244efe17d5aSyipechai 				adev->umc.ras->ras_block.ras_fini = amdgpu_umc_ras_fini;
1245efe17d5aSyipechai 	}
12465b6b35aaSHawking Zhang }
12475b6b35aaSHawking Zhang 
12483d093da0STao Zhou static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
12493d093da0STao Zhou {
1250630e959fSAlex Deucher 	switch (adev->ip_versions[MMHUB_HWIP][0]) {
1251630e959fSAlex Deucher 	case IP_VERSION(9, 4, 1):
1252f6c3623bSDennis Li 		adev->mmhub.funcs = &mmhub_v9_4_funcs;
1253f6c3623bSDennis Li 		break;
1254630e959fSAlex Deucher 	case IP_VERSION(9, 4, 2):
12554da999cdSOak Zeng 		adev->mmhub.funcs = &mmhub_v1_7_funcs;
12564da999cdSOak Zeng 		break;
12573d093da0STao Zhou 	default:
12589fb1506eSOak Zeng 		adev->mmhub.funcs = &mmhub_v1_0_funcs;
12593d093da0STao Zhou 		break;
12603d093da0STao Zhou 	}
12613d093da0STao Zhou }
12623d093da0STao Zhou 
1263d844c6d7SHawking Zhang static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
1264d844c6d7SHawking Zhang {
1265630e959fSAlex Deucher 	switch (adev->ip_versions[MMHUB_HWIP][0]) {
1266630e959fSAlex Deucher 	case IP_VERSION(9, 4, 0):
12675e67bba3Syipechai 		adev->mmhub.ras = &mmhub_v1_0_ras;
1268d844c6d7SHawking Zhang 		break;
1269630e959fSAlex Deucher 	case IP_VERSION(9, 4, 1):
12705e67bba3Syipechai 		adev->mmhub.ras = &mmhub_v9_4_ras;
1271d844c6d7SHawking Zhang 		break;
1272630e959fSAlex Deucher 	case IP_VERSION(9, 4, 2):
12735e67bba3Syipechai 		adev->mmhub.ras = &mmhub_v1_7_ras;
1274d844c6d7SHawking Zhang 		break;
1275d844c6d7SHawking Zhang 	default:
1276d844c6d7SHawking Zhang 		/* mmhub ras is not available */
1277d844c6d7SHawking Zhang 		break;
1278d844c6d7SHawking Zhang 	}
12795e67bba3Syipechai 
12805e67bba3Syipechai 	if (adev->mmhub.ras) {
12815e67bba3Syipechai 		amdgpu_ras_register_ras_block(adev, &adev->mmhub.ras->ras_block);
12825e67bba3Syipechai 
12835e67bba3Syipechai 		strcpy(adev->mmhub.ras->ras_block.name,"mmhub");
12845e67bba3Syipechai 		adev->mmhub.ras->ras_block.block = AMDGPU_RAS_BLOCK__MMHUB;
12855e67bba3Syipechai 
12865e67bba3Syipechai 		/* If don't define special ras_late_init function, use default ras_late_init */
12875e67bba3Syipechai 		if (!adev->mmhub.ras->ras_block.ras_late_init)
12885e67bba3Syipechai 			adev->mmhub.ras->ras_block.ras_late_init = amdgpu_mmhub_ras_late_init;
12895e67bba3Syipechai 
12905e67bba3Syipechai 		/* If don't define special ras_fini function, use default ras_fini */
12915e67bba3Syipechai 		if (!adev->mmhub.ras->ras_block.ras_fini)
12925e67bba3Syipechai 			adev->mmhub.ras->ras_block.ras_fini = amdgpu_mmhub_ras_fini;
12935e67bba3Syipechai 	}
1294d844c6d7SHawking Zhang }
1295d844c6d7SHawking Zhang 
12968ffff9b4SOak Zeng static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
12978ffff9b4SOak Zeng {
12988ffff9b4SOak Zeng 	adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
12998ffff9b4SOak Zeng }
13008ffff9b4SOak Zeng 
13016f12507fSHawking Zhang static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev)
13026f12507fSHawking Zhang {
13036d76e904Syipechai 	adev->hdp.ras = &hdp_v4_0_ras;
13046d76e904Syipechai 	amdgpu_ras_register_ras_block(adev, &adev->hdp.ras->ras_block);
13056f12507fSHawking Zhang }
13066f12507fSHawking Zhang 
13073907c492SJohn Clements static void gmc_v9_0_set_mca_funcs(struct amdgpu_device *adev)
13083907c492SJohn Clements {
1309630e959fSAlex Deucher 	/* is UMC the right IP to check for MCA?  Maybe DF? */
1310630e959fSAlex Deucher 	switch (adev->ip_versions[UMC_HWIP][0]) {
1311630e959fSAlex Deucher 	case IP_VERSION(6, 7, 0):
13123907c492SJohn Clements 		if (!adev->gmc.xgmi.connected_to_cpu)
13133907c492SJohn Clements 			adev->mca.funcs = &mca_v3_0_funcs;
13143907c492SJohn Clements 		break;
13153907c492SJohn Clements 	default:
13163907c492SJohn Clements 		break;
13173907c492SJohn Clements 	}
13183907c492SJohn Clements }
13193907c492SJohn Clements 
1320e60f8db5SAlex Xie static int gmc_v9_0_early_init(void *handle)
1321e60f8db5SAlex Xie {
13221f33bd18Syipechai 	int r;
1323e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1324e60f8db5SAlex Xie 
1325630e959fSAlex Deucher 	/* ARCT and VEGA20 don't have XGMI defined in their IP discovery tables */
132631691b8dSRajneesh Bhardwaj 	if (adev->asic_type == CHIP_VEGA20 ||
132731691b8dSRajneesh Bhardwaj 	    adev->asic_type == CHIP_ARCTURUS)
132831691b8dSRajneesh Bhardwaj 		adev->gmc.xgmi.supported = true;
132931691b8dSRajneesh Bhardwaj 
1330630e959fSAlex Deucher 	if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(6, 1, 0)) {
133131691b8dSRajneesh Bhardwaj 		adev->gmc.xgmi.supported = true;
133231691b8dSRajneesh Bhardwaj 		adev->gmc.xgmi.connected_to_cpu =
133331691b8dSRajneesh Bhardwaj 			adev->smuio.funcs->is_host_gpu_xgmi_supported(adev);
133431691b8dSRajneesh Bhardwaj 	}
133531691b8dSRajneesh Bhardwaj 
133649070c4eSHawking Zhang 	gmc_v9_0_set_gmc_funcs(adev);
133749070c4eSHawking Zhang 	gmc_v9_0_set_irq_funcs(adev);
133849070c4eSHawking Zhang 	gmc_v9_0_set_umc_funcs(adev);
133949070c4eSHawking Zhang 	gmc_v9_0_set_mmhub_funcs(adev);
1340d844c6d7SHawking Zhang 	gmc_v9_0_set_mmhub_ras_funcs(adev);
134149070c4eSHawking Zhang 	gmc_v9_0_set_gfxhub_funcs(adev);
13426f12507fSHawking Zhang 	gmc_v9_0_set_hdp_ras_funcs(adev);
13433907c492SJohn Clements 	gmc_v9_0_set_mca_funcs(adev);
134449070c4eSHawking Zhang 
1345770d13b1SChristian König 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1346770d13b1SChristian König 	adev->gmc.shared_aperture_end =
1347770d13b1SChristian König 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1348bfa8eea2SFlora Cui 	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
1349770d13b1SChristian König 	adev->gmc.private_aperture_end =
1350770d13b1SChristian König 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1351a7ea6548SAlex Deucher 
13521f33bd18Syipechai 	r = amdgpu_gmc_ras_early_init(adev);
13531f33bd18Syipechai 	if (r)
13541f33bd18Syipechai 		return r;
13551f33bd18Syipechai 
1356e60f8db5SAlex Xie 	return 0;
1357e60f8db5SAlex Xie }
1358e60f8db5SAlex Xie 
1359e60f8db5SAlex Xie static int gmc_v9_0_late_init(void *handle)
1360e60f8db5SAlex Xie {
1361e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1362c5b6e585STao Zhou 	int r;
13634789c463SChristian König 
1364bdbe90f0SAlex Deucher 	r = amdgpu_gmc_allocate_vm_inv_eng(adev);
1365c713a461SEvan Quan 	if (r)
1366c713a461SEvan Quan 		return r;
13674a20300bSGuchun Chen 
13684a20300bSGuchun Chen 	/*
13694a20300bSGuchun Chen 	 * Workaround performance drop issue with VBIOS enables partial
13704a20300bSGuchun Chen 	 * writes, while disables HBM ECC for vega10.
13714a20300bSGuchun Chen 	 */
1372630e959fSAlex Deucher 	if (!amdgpu_sriov_vf(adev) &&
1373630e959fSAlex Deucher 	    (adev->ip_versions[UMC_HWIP][0] == IP_VERSION(6, 0, 0))) {
13748ab0d6f0SLuben Tuikov 		if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) {
1375cace4bffSHawking Zhang 			if (adev->df.funcs &&
1376cace4bffSHawking Zhang 			    adev->df.funcs->enable_ecc_force_par_wr_rmw)
1377bdf84a80SJoseph Greathouse 				adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
13784a20300bSGuchun Chen 		}
1379f49ea9f8SHawking Zhang 	}
138002bab923SDavid Panariti 
13818f6368a9SJohn Clements 	if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
13825e67bba3Syipechai 		if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
13835e67bba3Syipechai 		    adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
13845e67bba3Syipechai 			adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
1385fe5211f1SHawking Zhang 
13866d76e904Syipechai 		if (adev->hdp.ras && adev->hdp.ras->ras_block.hw_ops &&
13876d76e904Syipechai 		    adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count)
13886d76e904Syipechai 			adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count(adev);
13898f6368a9SJohn Clements 	}
139078871b6cSHawking Zhang 
1391ba083492STao Zhou 	r = amdgpu_gmc_ras_late_init(adev);
1392791c4769Sxinhui pan 	if (r)
1393e60f8db5SAlex Xie 		return r;
1394e60f8db5SAlex Xie 
1395770d13b1SChristian König 	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1396e60f8db5SAlex Xie }
1397e60f8db5SAlex Xie 
1398e60f8db5SAlex Xie static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
1399770d13b1SChristian König 					struct amdgpu_gmc *mc)
1400e60f8db5SAlex Xie {
1401adbe2e3dSZhigang Luo 	u64 base = adev->mmhub.funcs->get_fb_location(adev);
14029d4f837aSFrank.Min 
14036fdd68b1SAlex Deucher 	/* add the xgmi offset of the physical node */
14046fdd68b1SAlex Deucher 	base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1405f527f310SOak Zeng 	if (adev->gmc.xgmi.connected_to_cpu) {
1406f527f310SOak Zeng 		amdgpu_gmc_sysvm_location(adev, mc);
1407f527f310SOak Zeng 	} else {
140883afe835SOak Zeng 		amdgpu_gmc_vram_location(adev, mc, base);
1409961c75cfSChristian König 		amdgpu_gmc_gart_location(adev, mc);
1410c3e1b43cSChristian König 		amdgpu_gmc_agp_location(adev, mc);
1411f527f310SOak Zeng 	}
1412e60f8db5SAlex Xie 	/* base offset of vram pages */
14138ffff9b4SOak Zeng 	adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
14146fdd68b1SAlex Deucher 
14156fdd68b1SAlex Deucher 	/* XXX: add the xgmi offset of the physical node? */
14166fdd68b1SAlex Deucher 	adev->vm_manager.vram_base_offset +=
14176fdd68b1SAlex Deucher 		adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1418e60f8db5SAlex Xie }
1419e60f8db5SAlex Xie 
1420e60f8db5SAlex Xie /**
1421e60f8db5SAlex Xie  * gmc_v9_0_mc_init - initialize the memory controller driver params
1422e60f8db5SAlex Xie  *
1423e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
1424e60f8db5SAlex Xie  *
1425e60f8db5SAlex Xie  * Look up the amount of vram, vram width, and decide how to place
1426e60f8db5SAlex Xie  * vram and gart within the GPU's physical address space.
1427e60f8db5SAlex Xie  * Returns 0 for success.
1428e60f8db5SAlex Xie  */
1429e60f8db5SAlex Xie static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
1430e60f8db5SAlex Xie {
1431e60f8db5SAlex Xie 	int r;
1432e60f8db5SAlex Xie 
1433e60f8db5SAlex Xie 	/* size in MB on si */
1434770d13b1SChristian König 	adev->gmc.mc_vram_size =
1435bebc0762SHawking Zhang 		adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
1436770d13b1SChristian König 	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
1437e60f8db5SAlex Xie 
1438be566196SOak Zeng 	if (!(adev->flags & AMD_IS_APU) &&
1439be566196SOak Zeng 	    !adev->gmc.xgmi.connected_to_cpu) {
1440e60f8db5SAlex Xie 		r = amdgpu_device_resize_fb_bar(adev);
1441e60f8db5SAlex Xie 		if (r)
1442e60f8db5SAlex Xie 			return r;
1443e60f8db5SAlex Xie 	}
1444770d13b1SChristian König 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
1445770d13b1SChristian König 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
1446e60f8db5SAlex Xie 
1447156a81beSChunming Zhou #ifdef CONFIG_X86_64
144831691b8dSRajneesh Bhardwaj 	/*
144931691b8dSRajneesh Bhardwaj 	 * AMD Accelerated Processing Platform (APP) supporting GPU-HOST xgmi
145031691b8dSRajneesh Bhardwaj 	 * interface can use VRAM through here as it appears system reserved
145131691b8dSRajneesh Bhardwaj 	 * memory in host address space.
145231691b8dSRajneesh Bhardwaj 	 *
145331691b8dSRajneesh Bhardwaj 	 * For APUs, VRAM is just the stolen system memory and can be accessed
145431691b8dSRajneesh Bhardwaj 	 * directly.
145531691b8dSRajneesh Bhardwaj 	 *
145631691b8dSRajneesh Bhardwaj 	 * Otherwise, use the legacy Host Data Path (HDP) through PCIe BAR.
145731691b8dSRajneesh Bhardwaj 	 */
145831691b8dSRajneesh Bhardwaj 
145931691b8dSRajneesh Bhardwaj 	/* check whether both host-gpu and gpu-gpu xgmi links exist */
14603de60d96SHawking Zhang 	if ((adev->flags & AMD_IS_APU) ||
14613de60d96SHawking Zhang 	    (adev->gmc.xgmi.supported &&
14623de60d96SHawking Zhang 	     adev->gmc.xgmi.connected_to_cpu)) {
14633de60d96SHawking Zhang 		adev->gmc.aper_base =
14643de60d96SHawking Zhang 			adev->gfxhub.funcs->get_mc_fb_offset(adev) +
14653de60d96SHawking Zhang 			adev->gmc.xgmi.physical_node_id *
146631691b8dSRajneesh Bhardwaj 			adev->gmc.xgmi.node_segment_size;
1467156a81beSChunming Zhou 		adev->gmc.aper_size = adev->gmc.real_vram_size;
1468156a81beSChunming Zhou 	}
146931691b8dSRajneesh Bhardwaj 
1470156a81beSChunming Zhou #endif
1471e60f8db5SAlex Xie 	/* In case the PCI BAR is larger than the actual amount of vram */
1472770d13b1SChristian König 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
1473770d13b1SChristian König 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
1474770d13b1SChristian König 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
1475e60f8db5SAlex Xie 
1476e60f8db5SAlex Xie 	/* set the gart size */
1477e60f8db5SAlex Xie 	if (amdgpu_gart_size == -1) {
1478630e959fSAlex Deucher 		switch (adev->ip_versions[GC_HWIP][0]) {
1479630e959fSAlex Deucher 		case IP_VERSION(9, 0, 1):  /* all engines support GPUVM */
1480630e959fSAlex Deucher 		case IP_VERSION(9, 2, 1):  /* all engines support GPUVM */
1481630e959fSAlex Deucher 		case IP_VERSION(9, 4, 0):
1482630e959fSAlex Deucher 		case IP_VERSION(9, 4, 1):
1483630e959fSAlex Deucher 		case IP_VERSION(9, 4, 2):
1484e60f8db5SAlex Xie 		default:
1485fe19b862SMonk Liu 			adev->gmc.gart_size = 512ULL << 20;
1486e60f8db5SAlex Xie 			break;
1487630e959fSAlex Deucher 		case IP_VERSION(9, 1, 0):   /* DCE SG support */
1488630e959fSAlex Deucher 		case IP_VERSION(9, 2, 2):   /* DCE SG support */
1489630e959fSAlex Deucher 		case IP_VERSION(9, 3, 0):
1490770d13b1SChristian König 			adev->gmc.gart_size = 1024ULL << 20;
1491e60f8db5SAlex Xie 			break;
1492e60f8db5SAlex Xie 		}
1493e60f8db5SAlex Xie 	} else {
1494770d13b1SChristian König 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
1495e60f8db5SAlex Xie 	}
1496e60f8db5SAlex Xie 
1497f1dc12caSOak Zeng 	adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
1498f1dc12caSOak Zeng 
1499770d13b1SChristian König 	gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
1500e60f8db5SAlex Xie 
1501e60f8db5SAlex Xie 	return 0;
1502e60f8db5SAlex Xie }
1503e60f8db5SAlex Xie 
1504e60f8db5SAlex Xie static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
1505e60f8db5SAlex Xie {
1506e60f8db5SAlex Xie 	int r;
1507e60f8db5SAlex Xie 
15081123b989SChristian König 	if (adev->gart.bo) {
1509e60f8db5SAlex Xie 		WARN(1, "VEGA10 PCIE GART already initialized\n");
1510e60f8db5SAlex Xie 		return 0;
1511e60f8db5SAlex Xie 	}
15127b454b3aSOak Zeng 
15137b454b3aSOak Zeng 	if (adev->gmc.xgmi.connected_to_cpu) {
15147b454b3aSOak Zeng 		adev->gmc.vmid0_page_table_depth = 1;
15157b454b3aSOak Zeng 		adev->gmc.vmid0_page_table_block_size = 12;
15167b454b3aSOak Zeng 	} else {
15177b454b3aSOak Zeng 		adev->gmc.vmid0_page_table_depth = 0;
15187b454b3aSOak Zeng 		adev->gmc.vmid0_page_table_block_size = 0;
15197b454b3aSOak Zeng 	}
15207b454b3aSOak Zeng 
1521e60f8db5SAlex Xie 	/* Initialize common gart structure */
1522e60f8db5SAlex Xie 	r = amdgpu_gart_init(adev);
1523e60f8db5SAlex Xie 	if (r)
1524e60f8db5SAlex Xie 		return r;
1525e60f8db5SAlex Xie 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
15267596ab68SHawking Zhang 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
1527e60f8db5SAlex Xie 				 AMDGPU_PTE_EXECUTABLE;
1528522510a6SOak Zeng 
1529522510a6SOak Zeng 	r = amdgpu_gart_table_vram_alloc(adev);
1530522510a6SOak Zeng 	if (r)
1531522510a6SOak Zeng 		return r;
1532522510a6SOak Zeng 
1533522510a6SOak Zeng 	if (adev->gmc.xgmi.connected_to_cpu) {
1534522510a6SOak Zeng 		r = amdgpu_gmc_pdb0_alloc(adev);
1535522510a6SOak Zeng 	}
1536522510a6SOak Zeng 
1537522510a6SOak Zeng 	return r;
1538e60f8db5SAlex Xie }
1539e60f8db5SAlex Xie 
1540b0a2db9bSAlex Deucher /**
1541b0a2db9bSAlex Deucher  * gmc_v9_0_save_registers - saves regs
1542b0a2db9bSAlex Deucher  *
1543b0a2db9bSAlex Deucher  * @adev: amdgpu_device pointer
1544b0a2db9bSAlex Deucher  *
1545b0a2db9bSAlex Deucher  * This saves potential register values that should be
1546b0a2db9bSAlex Deucher  * restored upon resume
1547b0a2db9bSAlex Deucher  */
1548b0a2db9bSAlex Deucher static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
1549ebdef28eSAlex Deucher {
1550630e959fSAlex Deucher 	if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
1551630e959fSAlex Deucher 	    (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1)))
1552b0a2db9bSAlex Deucher 		adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
1553ebdef28eSAlex Deucher }
1554ebdef28eSAlex Deucher 
1555e60f8db5SAlex Xie static int gmc_v9_0_sw_init(void *handle)
1556e60f8db5SAlex Xie {
1557ad02e08eSOri Messinger 	int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
1558e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1559e60f8db5SAlex Xie 
15608ffff9b4SOak Zeng 	adev->gfxhub.funcs->init(adev);
15619fb1506eSOak Zeng 
15629fb1506eSOak Zeng 	adev->mmhub.funcs->init(adev);
15633907c492SJohn Clements 	if (adev->mca.funcs)
15643907c492SJohn Clements 		adev->mca.funcs->init(adev);
1565e60f8db5SAlex Xie 
1566770d13b1SChristian König 	spin_lock_init(&adev->gmc.invalidate_lock);
1567e60f8db5SAlex Xie 
1568ad02e08eSOri Messinger 	r = amdgpu_atomfirmware_get_vram_info(adev,
1569ad02e08eSOri Messinger 		&vram_width, &vram_type, &vram_vendor);
1570631cdbd2SAlex Deucher 	if (amdgpu_sriov_vf(adev))
1571631cdbd2SAlex Deucher 		/* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
1572631cdbd2SAlex Deucher 		 * and DF related registers is not readable, seems hardcord is the
1573631cdbd2SAlex Deucher 		 * only way to set the correct vram_width
1574631cdbd2SAlex Deucher 		 */
1575631cdbd2SAlex Deucher 		adev->gmc.vram_width = 2048;
1576631cdbd2SAlex Deucher 	else if (amdgpu_emu_mode != 1)
1577631cdbd2SAlex Deucher 		adev->gmc.vram_width = vram_width;
1578631cdbd2SAlex Deucher 
1579631cdbd2SAlex Deucher 	if (!adev->gmc.vram_width) {
1580631cdbd2SAlex Deucher 		int chansize, numchan;
1581631cdbd2SAlex Deucher 
1582631cdbd2SAlex Deucher 		/* hbm memory channel size */
1583631cdbd2SAlex Deucher 		if (adev->flags & AMD_IS_APU)
1584631cdbd2SAlex Deucher 			chansize = 64;
1585631cdbd2SAlex Deucher 		else
1586631cdbd2SAlex Deucher 			chansize = 128;
1587cace4bffSHawking Zhang 		if (adev->df.funcs &&
1588cace4bffSHawking Zhang 		    adev->df.funcs->get_hbm_channel_number) {
1589bdf84a80SJoseph Greathouse 			numchan = adev->df.funcs->get_hbm_channel_number(adev);
1590631cdbd2SAlex Deucher 			adev->gmc.vram_width = numchan * chansize;
1591631cdbd2SAlex Deucher 		}
1592cace4bffSHawking Zhang 	}
1593631cdbd2SAlex Deucher 
1594631cdbd2SAlex Deucher 	adev->gmc.vram_type = vram_type;
1595ad02e08eSOri Messinger 	adev->gmc.vram_vendor = vram_vendor;
1596630e959fSAlex Deucher 	switch (adev->ip_versions[GC_HWIP][0]) {
1597630e959fSAlex Deucher 	case IP_VERSION(9, 1, 0):
1598630e959fSAlex Deucher 	case IP_VERSION(9, 2, 2):
15991daa2bfaSLe Ma 		adev->num_vmhubs = 2;
16001daa2bfaSLe Ma 
16016a42fd6fSChristian König 		if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
1602f3368128SChristian König 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
16036a42fd6fSChristian König 		} else {
16046a42fd6fSChristian König 			/* vm_size is 128TB + 512GB for legacy 3-level page support */
16056a42fd6fSChristian König 			amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
1606770d13b1SChristian König 			adev->gmc.translate_further =
16076a42fd6fSChristian König 				adev->vm_manager.num_level > 1;
16086a42fd6fSChristian König 		}
1609e60f8db5SAlex Xie 		break;
1610630e959fSAlex Deucher 	case IP_VERSION(9, 0, 1):
1611630e959fSAlex Deucher 	case IP_VERSION(9, 2, 1):
1612630e959fSAlex Deucher 	case IP_VERSION(9, 4, 0):
1613630e959fSAlex Deucher 	case IP_VERSION(9, 3, 0):
1614630e959fSAlex Deucher 	case IP_VERSION(9, 4, 2):
16151daa2bfaSLe Ma 		adev->num_vmhubs = 2;
16161daa2bfaSLe Ma 
16178787ee01SHuang Rui 
1618e60f8db5SAlex Xie 		/*
1619e60f8db5SAlex Xie 		 * To fulfill 4-level page support,
1620e60f8db5SAlex Xie 		 * vm size is 256TB (48bit), maximum size of Vega10,
1621e60f8db5SAlex Xie 		 * block size 512 (9bit)
1622e60f8db5SAlex Xie 		 */
1623cdba61daSwentalou 		/* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
1624cdba61daSwentalou 		if (amdgpu_sriov_vf(adev))
1625cdba61daSwentalou 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
1626cdba61daSwentalou 		else
1627f3368128SChristian König 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1628e60f8db5SAlex Xie 		break;
1629630e959fSAlex Deucher 	case IP_VERSION(9, 4, 1):
1630c8a6e2a3SLe Ma 		adev->num_vmhubs = 3;
1631c8a6e2a3SLe Ma 
16323de2ff5dSLe Ma 		/* Keep the vm size same with Vega20 */
16333de2ff5dSLe Ma 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
16343de2ff5dSLe Ma 		break;
1635e60f8db5SAlex Xie 	default:
1636e60f8db5SAlex Xie 		break;
1637e60f8db5SAlex Xie 	}
1638e60f8db5SAlex Xie 
1639e60f8db5SAlex Xie 	/* This interrupt is VMC page fault.*/
164044a99b65SAndrey Grodzovsky 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
1641770d13b1SChristian König 				&adev->gmc.vm_fault);
164230da7bb1SChristian König 	if (r)
164330da7bb1SChristian König 		return r;
164430da7bb1SChristian König 
1645630e959fSAlex Deucher 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) {
16467d19b15fSLe Ma 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
16477d19b15fSLe Ma 					&adev->gmc.vm_fault);
16487d19b15fSLe Ma 		if (r)
16497d19b15fSLe Ma 			return r;
16507d19b15fSLe Ma 	}
16517d19b15fSLe Ma 
165244a99b65SAndrey Grodzovsky 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
1653770d13b1SChristian König 				&adev->gmc.vm_fault);
1654e60f8db5SAlex Xie 
1655e60f8db5SAlex Xie 	if (r)
1656e60f8db5SAlex Xie 		return r;
1657e60f8db5SAlex Xie 
165868d705ddSHawking Zhang 	if (!amdgpu_sriov_vf(adev) &&
165968d705ddSHawking Zhang 	    !adev->gmc.xgmi.connected_to_cpu) {
1660791c4769Sxinhui pan 		/* interrupt sent to DF. */
1661791c4769Sxinhui pan 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1662791c4769Sxinhui pan 				      &adev->gmc.ecc_irq);
1663791c4769Sxinhui pan 		if (r)
1664791c4769Sxinhui pan 			return r;
16652ee9403eSZhigang Luo 	}
1666791c4769Sxinhui pan 
1667e60f8db5SAlex Xie 	/* Set the internal MC address mask
1668e60f8db5SAlex Xie 	 * This is the max address of the GPU's
1669e60f8db5SAlex Xie 	 * internal address space.
1670e60f8db5SAlex Xie 	 */
1671770d13b1SChristian König 	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
1672e60f8db5SAlex Xie 
1673244511f3SChristoph Hellwig 	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
1674e60f8db5SAlex Xie 	if (r) {
1675e60f8db5SAlex Xie 		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
1676244511f3SChristoph Hellwig 		return r;
1677e60f8db5SAlex Xie 	}
1678244511f3SChristoph Hellwig 	adev->need_swiotlb = drm_need_swiotlb(44);
1679e60f8db5SAlex Xie 
1680e60f8db5SAlex Xie 	r = gmc_v9_0_mc_init(adev);
1681e60f8db5SAlex Xie 	if (r)
1682e60f8db5SAlex Xie 		return r;
1683e60f8db5SAlex Xie 
16847b885f0eSAlex Deucher 	amdgpu_gmc_get_vbios_allocations(adev);
1685ebdef28eSAlex Deucher 
1686e60f8db5SAlex Xie 	/* Memory manager */
1687e60f8db5SAlex Xie 	r = amdgpu_bo_init(adev);
1688e60f8db5SAlex Xie 	if (r)
1689e60f8db5SAlex Xie 		return r;
1690e60f8db5SAlex Xie 
1691e60f8db5SAlex Xie 	r = gmc_v9_0_gart_init(adev);
1692e60f8db5SAlex Xie 	if (r)
1693e60f8db5SAlex Xie 		return r;
1694e60f8db5SAlex Xie 
169505ec3edaSChristian König 	/*
169605ec3edaSChristian König 	 * number of VMs
169705ec3edaSChristian König 	 * VMID 0 is reserved for System
169881659b20SFelix Kuehling 	 * amdgpu graphics/compute will use VMIDs 1..n-1
169981659b20SFelix Kuehling 	 * amdkfd will use VMIDs n..15
170081659b20SFelix Kuehling 	 *
170181659b20SFelix Kuehling 	 * The first KFD VMID is 8 for GPUs with graphics, 3 for
170281659b20SFelix Kuehling 	 * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
170381659b20SFelix Kuehling 	 * for video processing.
170405ec3edaSChristian König 	 */
170581659b20SFelix Kuehling 	adev->vm_manager.first_kfd_vmid =
1706630e959fSAlex Deucher 		(adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
1707630e959fSAlex Deucher 		 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) ? 3 : 8;
170805ec3edaSChristian König 
170905ec3edaSChristian König 	amdgpu_vm_manager_init(adev);
171005ec3edaSChristian König 
1711b0a2db9bSAlex Deucher 	gmc_v9_0_save_registers(adev);
1712b0a2db9bSAlex Deucher 
171305ec3edaSChristian König 	return 0;
1714e60f8db5SAlex Xie }
1715e60f8db5SAlex Xie 
1716e60f8db5SAlex Xie static int gmc_v9_0_sw_fini(void *handle)
1717e60f8db5SAlex Xie {
1718e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1719e60f8db5SAlex Xie 
17202adf1344STao Zhou 	amdgpu_gmc_ras_fini(adev);
1721f59548c8SMonk Liu 	amdgpu_gem_force_release(adev);
1722e60f8db5SAlex Xie 	amdgpu_vm_manager_fini(adev);
1723a3d9103eSAndrey Grodzovsky 	amdgpu_gart_table_vram_free(adev);
172437c49dedSLijo Lazar 	amdgpu_bo_unref(&adev->gmc.pdb0_bo);
1725e60f8db5SAlex Xie 	amdgpu_bo_fini(adev);
1726e60f8db5SAlex Xie 
1727e60f8db5SAlex Xie 	return 0;
1728e60f8db5SAlex Xie }
1729e60f8db5SAlex Xie 
1730e60f8db5SAlex Xie static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
1731e60f8db5SAlex Xie {
1732946a4d5bSShaoyun Liu 
1733630e959fSAlex Deucher 	switch (adev->ip_versions[MMHUB_HWIP][0]) {
1734630e959fSAlex Deucher 	case IP_VERSION(9, 0, 0):
17354cd4c5c0SMonk Liu 		if (amdgpu_sriov_vf(adev))
173698cad2deSTrigger Huang 			break;
1737df561f66SGustavo A. R. Silva 		fallthrough;
1738630e959fSAlex Deucher 	case IP_VERSION(9, 4, 0):
1739946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
17405c583018SEvan Quan 						golden_settings_mmhub_1_0_0,
1741c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1742946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
17435c583018SEvan Quan 						golden_settings_athub_1_0_0,
1744c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1745e60f8db5SAlex Xie 		break;
1746630e959fSAlex Deucher 	case IP_VERSION(9, 1, 0):
1747630e959fSAlex Deucher 	case IP_VERSION(9, 2, 0):
17488787ee01SHuang Rui 		/* TODO for renoir */
1749946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
17505c583018SEvan Quan 						golden_settings_athub_1_0_0,
1751c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1752e4f3abaaSChunming Zhou 		break;
1753e60f8db5SAlex Xie 	default:
1754e60f8db5SAlex Xie 		break;
1755e60f8db5SAlex Xie 	}
1756e60f8db5SAlex Xie }
1757e60f8db5SAlex Xie 
1758e60f8db5SAlex Xie /**
1759c2ecd79bSShirish S  * gmc_v9_0_restore_registers - restores regs
1760c2ecd79bSShirish S  *
1761c2ecd79bSShirish S  * @adev: amdgpu_device pointer
1762c2ecd79bSShirish S  *
1763c2ecd79bSShirish S  * This restores register values, saved at suspend.
1764c2ecd79bSShirish S  */
1765b0a2db9bSAlex Deucher void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
1766c2ecd79bSShirish S {
1767630e959fSAlex Deucher 	if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
1768630e959fSAlex Deucher 	    (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) {
1769f8646661SAlex Deucher 		WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
17700eaa8012SShirish S 		WARN_ON(adev->gmc.sdpif_register !=
17710eaa8012SShirish S 			RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
17720eaa8012SShirish S 	}
1773c2ecd79bSShirish S }
1774c2ecd79bSShirish S 
1775c2ecd79bSShirish S /**
1776e60f8db5SAlex Xie  * gmc_v9_0_gart_enable - gart enable
1777e60f8db5SAlex Xie  *
1778e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
1779e60f8db5SAlex Xie  */
1780e60f8db5SAlex Xie static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1781e60f8db5SAlex Xie {
1782cb1545f7SOak Zeng 	int r;
1783e60f8db5SAlex Xie 
1784522510a6SOak Zeng 	if (adev->gmc.xgmi.connected_to_cpu)
1785522510a6SOak Zeng 		amdgpu_gmc_init_pdb0(adev);
1786522510a6SOak Zeng 
17871123b989SChristian König 	if (adev->gart.bo == NULL) {
1788e60f8db5SAlex Xie 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1789e60f8db5SAlex Xie 		return -EINVAL;
1790e60f8db5SAlex Xie 	}
1791522510a6SOak Zeng 
17921b08dfb8SChristian König 	amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
17938ffff9b4SOak Zeng 	r = adev->gfxhub.funcs->gart_enable(adev);
1794e60f8db5SAlex Xie 	if (r)
1795e60f8db5SAlex Xie 		return r;
1796e60f8db5SAlex Xie 
17979fb1506eSOak Zeng 	r = adev->mmhub.funcs->gart_enable(adev);
1798e60f8db5SAlex Xie 	if (r)
1799e60f8db5SAlex Xie 		return r;
1800e60f8db5SAlex Xie 
1801522510a6SOak Zeng 	DRM_INFO("PCIE GART of %uM enabled.\n",
1802522510a6SOak Zeng 		 (unsigned)(adev->gmc.gart_size >> 20));
1803522510a6SOak Zeng 	if (adev->gmc.pdb0_bo)
1804522510a6SOak Zeng 		DRM_INFO("PDB0 located at 0x%016llX\n",
1805522510a6SOak Zeng 				(unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo));
1806522510a6SOak Zeng 	DRM_INFO("PTB located at 0x%016llX\n",
1807cb1545f7SOak Zeng 			(unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1808522510a6SOak Zeng 
1809cb1545f7SOak Zeng 	return 0;
1810cb1545f7SOak Zeng }
1811cb1545f7SOak Zeng 
1812cb1545f7SOak Zeng static int gmc_v9_0_hw_init(void *handle)
1813cb1545f7SOak Zeng {
1814cb1545f7SOak Zeng 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1815cb1545f7SOak Zeng 	bool value;
1816479e3b02SXiaojian Du 	int i, r;
1817cb1545f7SOak Zeng 
1818cb1545f7SOak Zeng 	/* The sequence of these two function calls matters.*/
1819cb1545f7SOak Zeng 	gmc_v9_0_init_golden_registers(adev);
1820cb1545f7SOak Zeng 
1821cb1545f7SOak Zeng 	if (adev->mode_info.num_crtc) {
1822cb1545f7SOak Zeng 		/* Lockout access through VGA aperture*/
1823cb1545f7SOak Zeng 		WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1824cb1545f7SOak Zeng 		/* disable VGA render */
1825cb1545f7SOak Zeng 		WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1826cb1545f7SOak Zeng 	}
1827cb1545f7SOak Zeng 
18289fb1506eSOak Zeng 	if (adev->mmhub.funcs->update_power_gating)
18299fb1506eSOak Zeng 		adev->mmhub.funcs->update_power_gating(adev, true);
18309fb1506eSOak Zeng 
1831455d40c9SLikun Gao 	adev->hdp.funcs->init_registers(adev);
1832fe2b5323STiecheng Zhou 
18331d4e0a8cSMonk Liu 	/* After HDP is initialized, flush HDP.*/
1834455d40c9SLikun Gao 	adev->hdp.funcs->flush_hdp(adev, NULL);
18351d4e0a8cSMonk Liu 
1836e60f8db5SAlex Xie 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1837e60f8db5SAlex Xie 		value = false;
1838e60f8db5SAlex Xie 	else
1839e60f8db5SAlex Xie 		value = true;
1840e60f8db5SAlex Xie 
184120bf2f6fSZhigang Luo 	if (!amdgpu_sriov_vf(adev)) {
18428ffff9b4SOak Zeng 		adev->gfxhub.funcs->set_fault_enable_default(adev, value);
18439fb1506eSOak Zeng 		adev->mmhub.funcs->set_fault_enable_default(adev, value);
184420bf2f6fSZhigang Luo 	}
18453ff98548SOak Zeng 	for (i = 0; i < adev->num_vmhubs; ++i)
18463ff98548SOak Zeng 		gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
1847e60f8db5SAlex Xie 
1848e7da754bSMonk Liu 	if (adev->umc.funcs && adev->umc.funcs->init_registers)
1849e7da754bSMonk Liu 		adev->umc.funcs->init_registers(adev);
1850e7da754bSMonk Liu 
1851479e3b02SXiaojian Du 	r = gmc_v9_0_gart_enable(adev);
1852479e3b02SXiaojian Du 	if (r)
1853479e3b02SXiaojian Du 		return r;
1854479e3b02SXiaojian Du 
1855479e3b02SXiaojian Du 	if (amdgpu_emu_mode == 1)
1856479e3b02SXiaojian Du 		return amdgpu_gmc_vram_checking(adev);
1857479e3b02SXiaojian Du 	else
1858479e3b02SXiaojian Du 		return r;
1859e60f8db5SAlex Xie }
1860e60f8db5SAlex Xie 
1861e60f8db5SAlex Xie /**
1862e60f8db5SAlex Xie  * gmc_v9_0_gart_disable - gart disable
1863e60f8db5SAlex Xie  *
1864e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
1865e60f8db5SAlex Xie  *
1866e60f8db5SAlex Xie  * This disables all VM page table.
1867e60f8db5SAlex Xie  */
1868e60f8db5SAlex Xie static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1869e60f8db5SAlex Xie {
18708ffff9b4SOak Zeng 	adev->gfxhub.funcs->gart_disable(adev);
18719fb1506eSOak Zeng 	adev->mmhub.funcs->gart_disable(adev);
1872e60f8db5SAlex Xie }
1873e60f8db5SAlex Xie 
1874e60f8db5SAlex Xie static int gmc_v9_0_hw_fini(void *handle)
1875e60f8db5SAlex Xie {
1876e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1877e60f8db5SAlex Xie 
187871cf9e72SLeslie Shi 	gmc_v9_0_gart_disable(adev);
187971cf9e72SLeslie Shi 
18805dd696aeSTrigger Huang 	if (amdgpu_sriov_vf(adev)) {
18815dd696aeSTrigger Huang 		/* full access mode, so don't touch any GMC register */
18825dd696aeSTrigger Huang 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
18835dd696aeSTrigger Huang 		return 0;
18845dd696aeSTrigger Huang 	}
18855dd696aeSTrigger Huang 
188617252701SEvan Quan 	/*
188717252701SEvan Quan 	 * Pair the operations did in gmc_v9_0_hw_init and thus maintain
188817252701SEvan Quan 	 * a correct cached state for GMC. Otherwise, the "gate" again
188917252701SEvan Quan 	 * operation on S3 resuming will fail due to wrong cached state.
189017252701SEvan Quan 	 */
189117252701SEvan Quan 	if (adev->mmhub.funcs->update_power_gating)
189217252701SEvan Quan 		adev->mmhub.funcs->update_power_gating(adev, false);
189317252701SEvan Quan 
1894791c4769Sxinhui pan 	amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1895770d13b1SChristian König 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1896e60f8db5SAlex Xie 
1897e60f8db5SAlex Xie 	return 0;
1898e60f8db5SAlex Xie }
1899e60f8db5SAlex Xie 
1900e60f8db5SAlex Xie static int gmc_v9_0_suspend(void *handle)
1901e60f8db5SAlex Xie {
1902e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1903e60f8db5SAlex Xie 
1904c24a3c05SLiu Shixin 	return gmc_v9_0_hw_fini(adev);
1905e60f8db5SAlex Xie }
1906e60f8db5SAlex Xie 
1907e60f8db5SAlex Xie static int gmc_v9_0_resume(void *handle)
1908e60f8db5SAlex Xie {
1909e60f8db5SAlex Xie 	int r;
1910e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1911e60f8db5SAlex Xie 
1912e60f8db5SAlex Xie 	r = gmc_v9_0_hw_init(adev);
1913e60f8db5SAlex Xie 	if (r)
1914e60f8db5SAlex Xie 		return r;
1915e60f8db5SAlex Xie 
1916620f774fSChristian König 	amdgpu_vmid_reset_all(adev);
1917e60f8db5SAlex Xie 
191832601d48SChristian König 	return 0;
1919e60f8db5SAlex Xie }
1920e60f8db5SAlex Xie 
1921e60f8db5SAlex Xie static bool gmc_v9_0_is_idle(void *handle)
1922e60f8db5SAlex Xie {
1923e60f8db5SAlex Xie 	/* MC is always ready in GMC v9.*/
1924e60f8db5SAlex Xie 	return true;
1925e60f8db5SAlex Xie }
1926e60f8db5SAlex Xie 
1927e60f8db5SAlex Xie static int gmc_v9_0_wait_for_idle(void *handle)
1928e60f8db5SAlex Xie {
1929e60f8db5SAlex Xie 	/* There is no need to wait for MC idle in GMC v9.*/
1930e60f8db5SAlex Xie 	return 0;
1931e60f8db5SAlex Xie }
1932e60f8db5SAlex Xie 
1933e60f8db5SAlex Xie static int gmc_v9_0_soft_reset(void *handle)
1934e60f8db5SAlex Xie {
1935e60f8db5SAlex Xie 	/* XXX for emulation.*/
1936e60f8db5SAlex Xie 	return 0;
1937e60f8db5SAlex Xie }
1938e60f8db5SAlex Xie 
1939e60f8db5SAlex Xie static int gmc_v9_0_set_clockgating_state(void *handle,
1940e60f8db5SAlex Xie 					enum amd_clockgating_state state)
1941e60f8db5SAlex Xie {
1942d5583d4fSHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1943d5583d4fSHuang Rui 
19449fb1506eSOak Zeng 	adev->mmhub.funcs->set_clockgating(adev, state);
1945bee7b51aSLe Ma 
1946bee7b51aSLe Ma 	athub_v1_0_set_clockgating(adev, state);
1947bee7b51aSLe Ma 
1948bee7b51aSLe Ma 	return 0;
1949e60f8db5SAlex Xie }
1950e60f8db5SAlex Xie 
195113052be5SHuang Rui static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
195213052be5SHuang Rui {
195313052be5SHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
195413052be5SHuang Rui 
19559fb1506eSOak Zeng 	adev->mmhub.funcs->get_clockgating(adev, flags);
1956bee7b51aSLe Ma 
1957bee7b51aSLe Ma 	athub_v1_0_get_clockgating(adev, flags);
195813052be5SHuang Rui }
195913052be5SHuang Rui 
1960e60f8db5SAlex Xie static int gmc_v9_0_set_powergating_state(void *handle,
1961e60f8db5SAlex Xie 					enum amd_powergating_state state)
1962e60f8db5SAlex Xie {
1963e60f8db5SAlex Xie 	return 0;
1964e60f8db5SAlex Xie }
1965e60f8db5SAlex Xie 
1966e60f8db5SAlex Xie const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1967e60f8db5SAlex Xie 	.name = "gmc_v9_0",
1968e60f8db5SAlex Xie 	.early_init = gmc_v9_0_early_init,
1969e60f8db5SAlex Xie 	.late_init = gmc_v9_0_late_init,
1970e60f8db5SAlex Xie 	.sw_init = gmc_v9_0_sw_init,
1971e60f8db5SAlex Xie 	.sw_fini = gmc_v9_0_sw_fini,
1972e60f8db5SAlex Xie 	.hw_init = gmc_v9_0_hw_init,
1973e60f8db5SAlex Xie 	.hw_fini = gmc_v9_0_hw_fini,
1974e60f8db5SAlex Xie 	.suspend = gmc_v9_0_suspend,
1975e60f8db5SAlex Xie 	.resume = gmc_v9_0_resume,
1976e60f8db5SAlex Xie 	.is_idle = gmc_v9_0_is_idle,
1977e60f8db5SAlex Xie 	.wait_for_idle = gmc_v9_0_wait_for_idle,
1978e60f8db5SAlex Xie 	.soft_reset = gmc_v9_0_soft_reset,
1979e60f8db5SAlex Xie 	.set_clockgating_state = gmc_v9_0_set_clockgating_state,
1980e60f8db5SAlex Xie 	.set_powergating_state = gmc_v9_0_set_powergating_state,
198113052be5SHuang Rui 	.get_clockgating_state = gmc_v9_0_get_clockgating_state,
1982e60f8db5SAlex Xie };
1983e60f8db5SAlex Xie 
1984e60f8db5SAlex Xie const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1985e60f8db5SAlex Xie {
1986e60f8db5SAlex Xie 	.type = AMD_IP_BLOCK_TYPE_GMC,
1987e60f8db5SAlex Xie 	.major = 9,
1988e60f8db5SAlex Xie 	.minor = 0,
1989e60f8db5SAlex Xie 	.rev = 0,
1990e60f8db5SAlex Xie 	.funcs = &gmc_v9_0_ip_funcs,
1991e60f8db5SAlex Xie };
1992