xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c (revision 7110e988)
1e60f8db5SAlex Xie /*
2e60f8db5SAlex Xie  * Copyright 2016 Advanced Micro Devices, Inc.
3e60f8db5SAlex Xie  *
4e60f8db5SAlex Xie  * Permission is hereby granted, free of charge, to any person obtaining a
5e60f8db5SAlex Xie  * copy of this software and associated documentation files (the "Software"),
6e60f8db5SAlex Xie  * to deal in the Software without restriction, including without limitation
7e60f8db5SAlex Xie  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8e60f8db5SAlex Xie  * and/or sell copies of the Software, and to permit persons to whom the
9e60f8db5SAlex Xie  * Software is furnished to do so, subject to the following conditions:
10e60f8db5SAlex Xie  *
11e60f8db5SAlex Xie  * The above copyright notice and this permission notice shall be included in
12e60f8db5SAlex Xie  * all copies or substantial portions of the Software.
13e60f8db5SAlex Xie  *
14e60f8db5SAlex Xie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15e60f8db5SAlex Xie  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16e60f8db5SAlex Xie  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17e60f8db5SAlex Xie  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18e60f8db5SAlex Xie  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19e60f8db5SAlex Xie  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20e60f8db5SAlex Xie  * OTHER DEALINGS IN THE SOFTWARE.
21e60f8db5SAlex Xie  *
22e60f8db5SAlex Xie  */
23f867723bSSam Ravnborg 
24e60f8db5SAlex Xie #include <linux/firmware.h>
25f867723bSSam Ravnborg #include <linux/pci.h>
26f867723bSSam Ravnborg 
27fd5fd480SChunming Zhou #include <drm/drm_cache.h>
28f867723bSSam Ravnborg 
29e60f8db5SAlex Xie #include "amdgpu.h"
30e60f8db5SAlex Xie #include "gmc_v9_0.h"
318d6a5230SAlex Deucher #include "amdgpu_atomfirmware.h"
322cddc50eSHuang Rui #include "amdgpu_gem.h"
33e60f8db5SAlex Xie 
34cde5c34fSFeifei Xu #include "gc/gc_9_0_sh_mask.h"
35135d4b10SFeifei Xu #include "dce/dce_12_0_offset.h"
36135d4b10SFeifei Xu #include "dce/dce_12_0_sh_mask.h"
37fb960bd2SFeifei Xu #include "vega10_enum.h"
3865417d9fSFeifei Xu #include "mmhub/mmhub_1_0_offset.h"
39ea930000SAlex Sierra #include "athub/athub_1_0_sh_mask.h"
406ce68225SFeifei Xu #include "athub/athub_1_0_offset.h"
41250b4228SChristian König #include "oss/osssys_4_0_offset.h"
42e60f8db5SAlex Xie 
43946a4d5bSShaoyun Liu #include "soc15.h"
44ea930000SAlex Sierra #include "soc15d.h"
45e60f8db5SAlex Xie #include "soc15_common.h"
4690c7a935SFeifei Xu #include "umc/umc_6_0_sh_mask.h"
47e60f8db5SAlex Xie 
48e60f8db5SAlex Xie #include "gfxhub_v1_0.h"
49e60f8db5SAlex Xie #include "mmhub_v1_0.h"
50bee7b51aSLe Ma #include "athub_v1_0.h"
51bf0a60b7SAlex Deucher #include "gfxhub_v1_1.h"
52018f7300SLe Ma #include "gfxhub_v1_2.h"
5351cce480SLe Ma #include "mmhub_v9_4.h"
5485e39550SLe Ma #include "mmhub_v1_7.h"
55018f7300SLe Ma #include "mmhub_v1_8.h"
565b6b35aaSHawking Zhang #include "umc_v6_1.h"
57e7da754bSMonk Liu #include "umc_v6_0.h"
58186c8a85SJohn Clements #include "umc_v6_7.h"
596f12507fSHawking Zhang #include "hdp_v4_0.h"
603907c492SJohn Clements #include "mca_v3_0.h"
61e60f8db5SAlex Xie 
6244a99b65SAndrey Grodzovsky #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
6344a99b65SAndrey Grodzovsky 
64791c4769Sxinhui pan #include "amdgpu_ras.h"
65029fbd43SHawking Zhang #include "amdgpu_xgmi.h"
66791c4769Sxinhui pan 
67d0fb18b5SAndrey Grodzovsky #include "amdgpu_reset.h"
68d0fb18b5SAndrey Grodzovsky 
69ebdef28eSAlex Deucher /* add these here since we already include dce12 headers and these are for DCN */
70ebdef28eSAlex Deucher #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
71ebdef28eSAlex Deucher #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
72ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT                                        0x0
73ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT                                       0x10
74ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
75ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
76f8646661SAlex Deucher #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0                                                                  0x049d
77f8646661SAlex Deucher #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX                                                         2
78f8646661SAlex Deucher 
79dc5d4affSHarry Wentland #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2                                                          0x05ea
80dc5d4affSHarry Wentland #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX                                                 2
81dc5d4affSHarry Wentland 
82a433f1f5SLijo Lazar #define MAX_MEM_RANGES 8
83ebdef28eSAlex Deucher 
8408e85215SSrinivasan Shanmugam static const char * const gfxhub_client_ids[] = {
85be99ecbfSAlex Deucher 	"CB",
86be99ecbfSAlex Deucher 	"DB",
87be99ecbfSAlex Deucher 	"IA",
88be99ecbfSAlex Deucher 	"WD",
89be99ecbfSAlex Deucher 	"CPF",
90be99ecbfSAlex Deucher 	"CPC",
91be99ecbfSAlex Deucher 	"CPG",
92be99ecbfSAlex Deucher 	"RLC",
93be99ecbfSAlex Deucher 	"TCP",
94be99ecbfSAlex Deucher 	"SQC (inst)",
95be99ecbfSAlex Deucher 	"SQC (data)",
96be99ecbfSAlex Deucher 	"SQG",
97be99ecbfSAlex Deucher 	"PA",
98be99ecbfSAlex Deucher };
99be99ecbfSAlex Deucher 
10002f23f5fSAlex Deucher static const char *mmhub_client_ids_raven[][2] = {
10102f23f5fSAlex Deucher 	[0][0] = "MP1",
10202f23f5fSAlex Deucher 	[1][0] = "MP0",
10302f23f5fSAlex Deucher 	[2][0] = "VCN",
10402f23f5fSAlex Deucher 	[3][0] = "VCNU",
10502f23f5fSAlex Deucher 	[4][0] = "HDP",
10602f23f5fSAlex Deucher 	[5][0] = "DCE",
10702f23f5fSAlex Deucher 	[13][0] = "UTCL2",
10802f23f5fSAlex Deucher 	[19][0] = "TLS",
10902f23f5fSAlex Deucher 	[26][0] = "OSS",
11002f23f5fSAlex Deucher 	[27][0] = "SDMA0",
11102f23f5fSAlex Deucher 	[0][1] = "MP1",
11202f23f5fSAlex Deucher 	[1][1] = "MP0",
11302f23f5fSAlex Deucher 	[2][1] = "VCN",
11402f23f5fSAlex Deucher 	[3][1] = "VCNU",
11502f23f5fSAlex Deucher 	[4][1] = "HDP",
11602f23f5fSAlex Deucher 	[5][1] = "XDP",
11702f23f5fSAlex Deucher 	[6][1] = "DBGU0",
11802f23f5fSAlex Deucher 	[7][1] = "DCE",
11902f23f5fSAlex Deucher 	[8][1] = "DCEDWB0",
12002f23f5fSAlex Deucher 	[9][1] = "DCEDWB1",
12102f23f5fSAlex Deucher 	[26][1] = "OSS",
12202f23f5fSAlex Deucher 	[27][1] = "SDMA0",
12302f23f5fSAlex Deucher };
12402f23f5fSAlex Deucher 
12502f23f5fSAlex Deucher static const char *mmhub_client_ids_renoir[][2] = {
12602f23f5fSAlex Deucher 	[0][0] = "MP1",
12702f23f5fSAlex Deucher 	[1][0] = "MP0",
12802f23f5fSAlex Deucher 	[2][0] = "HDP",
12902f23f5fSAlex Deucher 	[4][0] = "DCEDMC",
13002f23f5fSAlex Deucher 	[5][0] = "DCEVGA",
13102f23f5fSAlex Deucher 	[13][0] = "UTCL2",
13202f23f5fSAlex Deucher 	[19][0] = "TLS",
13302f23f5fSAlex Deucher 	[26][0] = "OSS",
13402f23f5fSAlex Deucher 	[27][0] = "SDMA0",
13502f23f5fSAlex Deucher 	[28][0] = "VCN",
13602f23f5fSAlex Deucher 	[29][0] = "VCNU",
13702f23f5fSAlex Deucher 	[30][0] = "JPEG",
13802f23f5fSAlex Deucher 	[0][1] = "MP1",
13902f23f5fSAlex Deucher 	[1][1] = "MP0",
14002f23f5fSAlex Deucher 	[2][1] = "HDP",
14102f23f5fSAlex Deucher 	[3][1] = "XDP",
14202f23f5fSAlex Deucher 	[6][1] = "DBGU0",
14302f23f5fSAlex Deucher 	[7][1] = "DCEDMC",
14402f23f5fSAlex Deucher 	[8][1] = "DCEVGA",
14502f23f5fSAlex Deucher 	[9][1] = "DCEDWB",
14602f23f5fSAlex Deucher 	[26][1] = "OSS",
14702f23f5fSAlex Deucher 	[27][1] = "SDMA0",
14802f23f5fSAlex Deucher 	[28][1] = "VCN",
14902f23f5fSAlex Deucher 	[29][1] = "VCNU",
15002f23f5fSAlex Deucher 	[30][1] = "JPEG",
15102f23f5fSAlex Deucher };
15202f23f5fSAlex Deucher 
15302f23f5fSAlex Deucher static const char *mmhub_client_ids_vega10[][2] = {
15402f23f5fSAlex Deucher 	[0][0] = "MP0",
15502f23f5fSAlex Deucher 	[1][0] = "UVD",
15602f23f5fSAlex Deucher 	[2][0] = "UVDU",
15702f23f5fSAlex Deucher 	[3][0] = "HDP",
15802f23f5fSAlex Deucher 	[13][0] = "UTCL2",
15902f23f5fSAlex Deucher 	[14][0] = "OSS",
16002f23f5fSAlex Deucher 	[15][0] = "SDMA1",
16102f23f5fSAlex Deucher 	[32+0][0] = "VCE0",
16202f23f5fSAlex Deucher 	[32+1][0] = "VCE0U",
16302f23f5fSAlex Deucher 	[32+2][0] = "XDMA",
16402f23f5fSAlex Deucher 	[32+3][0] = "DCE",
16502f23f5fSAlex Deucher 	[32+4][0] = "MP1",
16602f23f5fSAlex Deucher 	[32+14][0] = "SDMA0",
16702f23f5fSAlex Deucher 	[0][1] = "MP0",
16802f23f5fSAlex Deucher 	[1][1] = "UVD",
16902f23f5fSAlex Deucher 	[2][1] = "UVDU",
17002f23f5fSAlex Deucher 	[3][1] = "DBGU0",
17102f23f5fSAlex Deucher 	[4][1] = "HDP",
17202f23f5fSAlex Deucher 	[5][1] = "XDP",
17302f23f5fSAlex Deucher 	[14][1] = "OSS",
17402f23f5fSAlex Deucher 	[15][1] = "SDMA0",
17502f23f5fSAlex Deucher 	[32+0][1] = "VCE0",
17602f23f5fSAlex Deucher 	[32+1][1] = "VCE0U",
17702f23f5fSAlex Deucher 	[32+2][1] = "XDMA",
17802f23f5fSAlex Deucher 	[32+3][1] = "DCE",
17902f23f5fSAlex Deucher 	[32+4][1] = "DCEDWB",
18002f23f5fSAlex Deucher 	[32+5][1] = "MP1",
18102f23f5fSAlex Deucher 	[32+6][1] = "DBGU1",
18202f23f5fSAlex Deucher 	[32+14][1] = "SDMA1",
18302f23f5fSAlex Deucher };
18402f23f5fSAlex Deucher 
18502f23f5fSAlex Deucher static const char *mmhub_client_ids_vega12[][2] = {
18602f23f5fSAlex Deucher 	[0][0] = "MP0",
18702f23f5fSAlex Deucher 	[1][0] = "VCE0",
18802f23f5fSAlex Deucher 	[2][0] = "VCE0U",
18902f23f5fSAlex Deucher 	[3][0] = "HDP",
19002f23f5fSAlex Deucher 	[13][0] = "UTCL2",
19102f23f5fSAlex Deucher 	[14][0] = "OSS",
19202f23f5fSAlex Deucher 	[15][0] = "SDMA1",
19302f23f5fSAlex Deucher 	[32+0][0] = "DCE",
19402f23f5fSAlex Deucher 	[32+1][0] = "XDMA",
19502f23f5fSAlex Deucher 	[32+2][0] = "UVD",
19602f23f5fSAlex Deucher 	[32+3][0] = "UVDU",
19702f23f5fSAlex Deucher 	[32+4][0] = "MP1",
19802f23f5fSAlex Deucher 	[32+15][0] = "SDMA0",
19902f23f5fSAlex Deucher 	[0][1] = "MP0",
20002f23f5fSAlex Deucher 	[1][1] = "VCE0",
20102f23f5fSAlex Deucher 	[2][1] = "VCE0U",
20202f23f5fSAlex Deucher 	[3][1] = "DBGU0",
20302f23f5fSAlex Deucher 	[4][1] = "HDP",
20402f23f5fSAlex Deucher 	[5][1] = "XDP",
20502f23f5fSAlex Deucher 	[14][1] = "OSS",
20602f23f5fSAlex Deucher 	[15][1] = "SDMA0",
20702f23f5fSAlex Deucher 	[32+0][1] = "DCE",
20802f23f5fSAlex Deucher 	[32+1][1] = "DCEDWB",
20902f23f5fSAlex Deucher 	[32+2][1] = "XDMA",
21002f23f5fSAlex Deucher 	[32+3][1] = "UVD",
21102f23f5fSAlex Deucher 	[32+4][1] = "UVDU",
21202f23f5fSAlex Deucher 	[32+5][1] = "MP1",
21302f23f5fSAlex Deucher 	[32+6][1] = "DBGU1",
21402f23f5fSAlex Deucher 	[32+15][1] = "SDMA1",
21502f23f5fSAlex Deucher };
21602f23f5fSAlex Deucher 
21702f23f5fSAlex Deucher static const char *mmhub_client_ids_vega20[][2] = {
21802f23f5fSAlex Deucher 	[0][0] = "XDMA",
21902f23f5fSAlex Deucher 	[1][0] = "DCE",
22002f23f5fSAlex Deucher 	[2][0] = "VCE0",
22102f23f5fSAlex Deucher 	[3][0] = "VCE0U",
22202f23f5fSAlex Deucher 	[4][0] = "UVD",
22302f23f5fSAlex Deucher 	[5][0] = "UVD1U",
22402f23f5fSAlex Deucher 	[13][0] = "OSS",
22502f23f5fSAlex Deucher 	[14][0] = "HDP",
22602f23f5fSAlex Deucher 	[15][0] = "SDMA0",
22702f23f5fSAlex Deucher 	[32+0][0] = "UVD",
22802f23f5fSAlex Deucher 	[32+1][0] = "UVDU",
22902f23f5fSAlex Deucher 	[32+2][0] = "MP1",
23002f23f5fSAlex Deucher 	[32+3][0] = "MP0",
23102f23f5fSAlex Deucher 	[32+12][0] = "UTCL2",
23202f23f5fSAlex Deucher 	[32+14][0] = "SDMA1",
23302f23f5fSAlex Deucher 	[0][1] = "XDMA",
23402f23f5fSAlex Deucher 	[1][1] = "DCE",
23502f23f5fSAlex Deucher 	[2][1] = "DCEDWB",
23602f23f5fSAlex Deucher 	[3][1] = "VCE0",
23702f23f5fSAlex Deucher 	[4][1] = "VCE0U",
23802f23f5fSAlex Deucher 	[5][1] = "UVD1",
23902f23f5fSAlex Deucher 	[6][1] = "UVD1U",
24002f23f5fSAlex Deucher 	[7][1] = "DBGU0",
24102f23f5fSAlex Deucher 	[8][1] = "XDP",
24202f23f5fSAlex Deucher 	[13][1] = "OSS",
24302f23f5fSAlex Deucher 	[14][1] = "HDP",
24402f23f5fSAlex Deucher 	[15][1] = "SDMA0",
24502f23f5fSAlex Deucher 	[32+0][1] = "UVD",
24602f23f5fSAlex Deucher 	[32+1][1] = "UVDU",
24702f23f5fSAlex Deucher 	[32+2][1] = "DBGU1",
24802f23f5fSAlex Deucher 	[32+3][1] = "MP1",
24902f23f5fSAlex Deucher 	[32+4][1] = "MP0",
25002f23f5fSAlex Deucher 	[32+14][1] = "SDMA1",
25102f23f5fSAlex Deucher };
25202f23f5fSAlex Deucher 
25302f23f5fSAlex Deucher static const char *mmhub_client_ids_arcturus[][2] = {
254e83db774SAlex Deucher 	[0][0] = "DBGU1",
255e83db774SAlex Deucher 	[1][0] = "XDP",
25602f23f5fSAlex Deucher 	[2][0] = "MP1",
25702f23f5fSAlex Deucher 	[14][0] = "HDP",
258e83db774SAlex Deucher 	[171][0] = "JPEG",
259e83db774SAlex Deucher 	[172][0] = "VCN",
260e83db774SAlex Deucher 	[173][0] = "VCNU",
261e83db774SAlex Deucher 	[203][0] = "JPEG1",
262e83db774SAlex Deucher 	[204][0] = "VCN1",
263e83db774SAlex Deucher 	[205][0] = "VCN1U",
264e83db774SAlex Deucher 	[256][0] = "SDMA0",
265e83db774SAlex Deucher 	[257][0] = "SDMA1",
266e83db774SAlex Deucher 	[258][0] = "SDMA2",
267e83db774SAlex Deucher 	[259][0] = "SDMA3",
268e83db774SAlex Deucher 	[260][0] = "SDMA4",
269e83db774SAlex Deucher 	[261][0] = "SDMA5",
270e83db774SAlex Deucher 	[262][0] = "SDMA6",
271e83db774SAlex Deucher 	[263][0] = "SDMA7",
272e83db774SAlex Deucher 	[384][0] = "OSS",
27302f23f5fSAlex Deucher 	[0][1] = "DBGU1",
27402f23f5fSAlex Deucher 	[1][1] = "XDP",
27502f23f5fSAlex Deucher 	[2][1] = "MP1",
27602f23f5fSAlex Deucher 	[14][1] = "HDP",
277e83db774SAlex Deucher 	[171][1] = "JPEG",
278e83db774SAlex Deucher 	[172][1] = "VCN",
279e83db774SAlex Deucher 	[173][1] = "VCNU",
280e83db774SAlex Deucher 	[203][1] = "JPEG1",
281e83db774SAlex Deucher 	[204][1] = "VCN1",
282e83db774SAlex Deucher 	[205][1] = "VCN1U",
283e83db774SAlex Deucher 	[256][1] = "SDMA0",
284e83db774SAlex Deucher 	[257][1] = "SDMA1",
285e83db774SAlex Deucher 	[258][1] = "SDMA2",
286e83db774SAlex Deucher 	[259][1] = "SDMA3",
287e83db774SAlex Deucher 	[260][1] = "SDMA4",
288e83db774SAlex Deucher 	[261][1] = "SDMA5",
289e83db774SAlex Deucher 	[262][1] = "SDMA6",
290e83db774SAlex Deucher 	[263][1] = "SDMA7",
291e83db774SAlex Deucher 	[384][1] = "OSS",
29202f23f5fSAlex Deucher };
293e60f8db5SAlex Xie 
294e844cd99SAlex Deucher static const char *mmhub_client_ids_aldebaran[][2] = {
295e844cd99SAlex Deucher 	[2][0] = "MP1",
296e844cd99SAlex Deucher 	[3][0] = "MP0",
297f4ec3e50SAlex Sierra 	[32+1][0] = "DBGU_IO0",
298f4ec3e50SAlex Sierra 	[32+2][0] = "DBGU_IO2",
299e844cd99SAlex Deucher 	[32+4][0] = "MPIO",
300e844cd99SAlex Deucher 	[96+11][0] = "JPEG0",
301e844cd99SAlex Deucher 	[96+12][0] = "VCN0",
302e844cd99SAlex Deucher 	[96+13][0] = "VCNU0",
303e844cd99SAlex Deucher 	[128+11][0] = "JPEG1",
304e844cd99SAlex Deucher 	[128+12][0] = "VCN1",
305e844cd99SAlex Deucher 	[128+13][0] = "VCNU1",
306f4ec3e50SAlex Sierra 	[160+1][0] = "XDP",
307e844cd99SAlex Deucher 	[160+14][0] = "HDP",
308f4ec3e50SAlex Sierra 	[256+0][0] = "SDMA0",
309f4ec3e50SAlex Sierra 	[256+1][0] = "SDMA1",
310f4ec3e50SAlex Sierra 	[256+2][0] = "SDMA2",
311f4ec3e50SAlex Sierra 	[256+3][0] = "SDMA3",
312f4ec3e50SAlex Sierra 	[256+4][0] = "SDMA4",
313f4ec3e50SAlex Sierra 	[384+0][0] = "OSS",
314e844cd99SAlex Deucher 	[2][1] = "MP1",
315e844cd99SAlex Deucher 	[3][1] = "MP0",
316e844cd99SAlex Deucher 	[32+1][1] = "DBGU_IO0",
317e844cd99SAlex Deucher 	[32+2][1] = "DBGU_IO2",
318e844cd99SAlex Deucher 	[32+4][1] = "MPIO",
319e844cd99SAlex Deucher 	[96+11][1] = "JPEG0",
320e844cd99SAlex Deucher 	[96+12][1] = "VCN0",
321e844cd99SAlex Deucher 	[96+13][1] = "VCNU0",
322e844cd99SAlex Deucher 	[128+11][1] = "JPEG1",
323e844cd99SAlex Deucher 	[128+12][1] = "VCN1",
324e844cd99SAlex Deucher 	[128+13][1] = "VCNU1",
325f4ec3e50SAlex Sierra 	[160+1][1] = "XDP",
326e844cd99SAlex Deucher 	[160+14][1] = "HDP",
327f4ec3e50SAlex Sierra 	[256+0][1] = "SDMA0",
328f4ec3e50SAlex Sierra 	[256+1][1] = "SDMA1",
329f4ec3e50SAlex Sierra 	[256+2][1] = "SDMA2",
330f4ec3e50SAlex Sierra 	[256+3][1] = "SDMA3",
331f4ec3e50SAlex Sierra 	[256+4][1] = "SDMA4",
332f4ec3e50SAlex Sierra 	[384+0][1] = "OSS",
333e844cd99SAlex Deucher };
334e844cd99SAlex Deucher 
33508e85215SSrinivasan Shanmugam static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] = {
336946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
337946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
3385c583018SEvan Quan };
3395c583018SEvan Quan 
34008e85215SSrinivasan Shanmugam static const struct soc15_reg_golden golden_settings_athub_1_0_0[] = {
341946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
342946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
3435c583018SEvan Quan };
3445c583018SEvan Quan 
345791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
346791c4769Sxinhui pan 	(0x000143c0 + 0x00000000),
347791c4769Sxinhui pan 	(0x000143c0 + 0x00000800),
348791c4769Sxinhui pan 	(0x000143c0 + 0x00001000),
349791c4769Sxinhui pan 	(0x000143c0 + 0x00001800),
350791c4769Sxinhui pan 	(0x000543c0 + 0x00000000),
351791c4769Sxinhui pan 	(0x000543c0 + 0x00000800),
352791c4769Sxinhui pan 	(0x000543c0 + 0x00001000),
353791c4769Sxinhui pan 	(0x000543c0 + 0x00001800),
354791c4769Sxinhui pan 	(0x000943c0 + 0x00000000),
355791c4769Sxinhui pan 	(0x000943c0 + 0x00000800),
356791c4769Sxinhui pan 	(0x000943c0 + 0x00001000),
357791c4769Sxinhui pan 	(0x000943c0 + 0x00001800),
358791c4769Sxinhui pan 	(0x000d43c0 + 0x00000000),
359791c4769Sxinhui pan 	(0x000d43c0 + 0x00000800),
360791c4769Sxinhui pan 	(0x000d43c0 + 0x00001000),
361791c4769Sxinhui pan 	(0x000d43c0 + 0x00001800),
362791c4769Sxinhui pan 	(0x001143c0 + 0x00000000),
363791c4769Sxinhui pan 	(0x001143c0 + 0x00000800),
364791c4769Sxinhui pan 	(0x001143c0 + 0x00001000),
365791c4769Sxinhui pan 	(0x001143c0 + 0x00001800),
366791c4769Sxinhui pan 	(0x001543c0 + 0x00000000),
367791c4769Sxinhui pan 	(0x001543c0 + 0x00000800),
368791c4769Sxinhui pan 	(0x001543c0 + 0x00001000),
369791c4769Sxinhui pan 	(0x001543c0 + 0x00001800),
370791c4769Sxinhui pan 	(0x001943c0 + 0x00000000),
371791c4769Sxinhui pan 	(0x001943c0 + 0x00000800),
372791c4769Sxinhui pan 	(0x001943c0 + 0x00001000),
373791c4769Sxinhui pan 	(0x001943c0 + 0x00001800),
374791c4769Sxinhui pan 	(0x001d43c0 + 0x00000000),
375791c4769Sxinhui pan 	(0x001d43c0 + 0x00000800),
376791c4769Sxinhui pan 	(0x001d43c0 + 0x00001000),
377791c4769Sxinhui pan 	(0x001d43c0 + 0x00001800),
37802bab923SDavid Panariti };
37902bab923SDavid Panariti 
380791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
381791c4769Sxinhui pan 	(0x000143e0 + 0x00000000),
382791c4769Sxinhui pan 	(0x000143e0 + 0x00000800),
383791c4769Sxinhui pan 	(0x000143e0 + 0x00001000),
384791c4769Sxinhui pan 	(0x000143e0 + 0x00001800),
385791c4769Sxinhui pan 	(0x000543e0 + 0x00000000),
386791c4769Sxinhui pan 	(0x000543e0 + 0x00000800),
387791c4769Sxinhui pan 	(0x000543e0 + 0x00001000),
388791c4769Sxinhui pan 	(0x000543e0 + 0x00001800),
389791c4769Sxinhui pan 	(0x000943e0 + 0x00000000),
390791c4769Sxinhui pan 	(0x000943e0 + 0x00000800),
391791c4769Sxinhui pan 	(0x000943e0 + 0x00001000),
392791c4769Sxinhui pan 	(0x000943e0 + 0x00001800),
393791c4769Sxinhui pan 	(0x000d43e0 + 0x00000000),
394791c4769Sxinhui pan 	(0x000d43e0 + 0x00000800),
395791c4769Sxinhui pan 	(0x000d43e0 + 0x00001000),
396791c4769Sxinhui pan 	(0x000d43e0 + 0x00001800),
397791c4769Sxinhui pan 	(0x001143e0 + 0x00000000),
398791c4769Sxinhui pan 	(0x001143e0 + 0x00000800),
399791c4769Sxinhui pan 	(0x001143e0 + 0x00001000),
400791c4769Sxinhui pan 	(0x001143e0 + 0x00001800),
401791c4769Sxinhui pan 	(0x001543e0 + 0x00000000),
402791c4769Sxinhui pan 	(0x001543e0 + 0x00000800),
403791c4769Sxinhui pan 	(0x001543e0 + 0x00001000),
404791c4769Sxinhui pan 	(0x001543e0 + 0x00001800),
405791c4769Sxinhui pan 	(0x001943e0 + 0x00000000),
406791c4769Sxinhui pan 	(0x001943e0 + 0x00000800),
407791c4769Sxinhui pan 	(0x001943e0 + 0x00001000),
408791c4769Sxinhui pan 	(0x001943e0 + 0x00001800),
409791c4769Sxinhui pan 	(0x001d43e0 + 0x00000000),
410791c4769Sxinhui pan 	(0x001d43e0 + 0x00000800),
411791c4769Sxinhui pan 	(0x001d43e0 + 0x00001000),
412791c4769Sxinhui pan 	(0x001d43e0 + 0x00001800),
41302bab923SDavid Panariti };
41402bab923SDavid Panariti 
gmc_v9_0_ecc_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned int type,enum amdgpu_interrupt_state state)415791c4769Sxinhui pan static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
416791c4769Sxinhui pan 		struct amdgpu_irq_src *src,
41708e85215SSrinivasan Shanmugam 		unsigned int type,
418791c4769Sxinhui pan 		enum amdgpu_interrupt_state state)
419791c4769Sxinhui pan {
420791c4769Sxinhui pan 	u32 bits, i, tmp, reg;
421791c4769Sxinhui pan 
4221e2c6d55SJohn Clements 	/* Devices newer then VEGA10/12 shall have these programming
42308e85215SSrinivasan Shanmugam 	 * sequences performed by PSP BL
42408e85215SSrinivasan Shanmugam 	 */
4251e2c6d55SJohn Clements 	if (adev->asic_type >= CHIP_VEGA20)
4261e2c6d55SJohn Clements 		return 0;
4271e2c6d55SJohn Clements 
428791c4769Sxinhui pan 	bits = 0x7f;
429791c4769Sxinhui pan 
430791c4769Sxinhui pan 	switch (state) {
431791c4769Sxinhui pan 	case AMDGPU_IRQ_STATE_DISABLE:
432791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
433791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_addrs[i];
434791c4769Sxinhui pan 			tmp = RREG32(reg);
435791c4769Sxinhui pan 			tmp &= ~bits;
436791c4769Sxinhui pan 			WREG32(reg, tmp);
437791c4769Sxinhui pan 		}
438791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
439791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
440791c4769Sxinhui pan 			tmp = RREG32(reg);
441791c4769Sxinhui pan 			tmp &= ~bits;
442791c4769Sxinhui pan 			WREG32(reg, tmp);
443791c4769Sxinhui pan 		}
444791c4769Sxinhui pan 		break;
445791c4769Sxinhui pan 	case AMDGPU_IRQ_STATE_ENABLE:
446791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
447791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_addrs[i];
448791c4769Sxinhui pan 			tmp = RREG32(reg);
449791c4769Sxinhui pan 			tmp |= bits;
450791c4769Sxinhui pan 			WREG32(reg, tmp);
451791c4769Sxinhui pan 		}
452791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
453791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
454791c4769Sxinhui pan 			tmp = RREG32(reg);
455791c4769Sxinhui pan 			tmp |= bits;
456791c4769Sxinhui pan 			WREG32(reg, tmp);
457791c4769Sxinhui pan 		}
458791c4769Sxinhui pan 		break;
459791c4769Sxinhui pan 	default:
460791c4769Sxinhui pan 		break;
461791c4769Sxinhui pan 	}
462791c4769Sxinhui pan 
463791c4769Sxinhui pan 	return 0;
464791c4769Sxinhui pan }
465791c4769Sxinhui pan 
gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned int type,enum amdgpu_interrupt_state state)466e60f8db5SAlex Xie static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
467e60f8db5SAlex Xie 					struct amdgpu_irq_src *src,
46808e85215SSrinivasan Shanmugam 					unsigned int type,
469e60f8db5SAlex Xie 					enum amdgpu_interrupt_state state)
470e60f8db5SAlex Xie {
471e60f8db5SAlex Xie 	struct amdgpu_vmhub *hub;
472ae6d1416STom St Denis 	u32 tmp, reg, bits, i, j;
473e60f8db5SAlex Xie 
47411250164SChristian König 	bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
47511250164SChristian König 		VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
47611250164SChristian König 		VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
47711250164SChristian König 		VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
47811250164SChristian König 		VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
47911250164SChristian König 		VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
48011250164SChristian König 		VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
48111250164SChristian König 
482e60f8db5SAlex Xie 	switch (state) {
483e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_DISABLE:
484d9426c3dSLe Ma 		for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
485ae6d1416STom St Denis 			hub = &adev->vmhub[j];
486e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
487e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
48892f153bbSVictor Skvortsov 
489b93df61dSAlex Deucher 				/* This works because this interrupt is only
490b93df61dSAlex Deucher 				 * enabled at init/resume and disabled in
491b93df61dSAlex Deucher 				 * fini/suspend, so the overall state doesn't
492b93df61dSAlex Deucher 				 * change over the course of suspend/resume.
493b93df61dSAlex Deucher 				 */
494f4caf584SHawking Zhang 				if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0)))
495b93df61dSAlex Deucher 					continue;
496b93df61dSAlex Deucher 
497f4caf584SHawking Zhang 				if (j >= AMDGPU_MMHUB0(0))
49892f153bbSVictor Skvortsov 					tmp = RREG32_SOC15_IP(MMHUB, reg);
499f4caf584SHawking Zhang 				else
500f4caf584SHawking Zhang 					tmp = RREG32_SOC15_IP(GC, reg);
50192f153bbSVictor Skvortsov 
502e60f8db5SAlex Xie 				tmp &= ~bits;
50392f153bbSVictor Skvortsov 
504f4caf584SHawking Zhang 				if (j >= AMDGPU_MMHUB0(0))
50592f153bbSVictor Skvortsov 					WREG32_SOC15_IP(MMHUB, reg, tmp);
506f4caf584SHawking Zhang 				else
507f4caf584SHawking Zhang 					WREG32_SOC15_IP(GC, reg, tmp);
508e60f8db5SAlex Xie 			}
509e60f8db5SAlex Xie 		}
510e60f8db5SAlex Xie 		break;
511e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_ENABLE:
512d9426c3dSLe Ma 		for_each_set_bit(j, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
513ae6d1416STom St Denis 			hub = &adev->vmhub[j];
514e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
515e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
51692f153bbSVictor Skvortsov 
517b93df61dSAlex Deucher 				/* This works because this interrupt is only
518b93df61dSAlex Deucher 				 * enabled at init/resume and disabled in
519b93df61dSAlex Deucher 				 * fini/suspend, so the overall state doesn't
520b93df61dSAlex Deucher 				 * change over the course of suspend/resume.
521b93df61dSAlex Deucher 				 */
522f4caf584SHawking Zhang 				if (adev->in_s0ix && (j == AMDGPU_GFXHUB(0)))
523b93df61dSAlex Deucher 					continue;
524b93df61dSAlex Deucher 
525f4caf584SHawking Zhang 				if (j >= AMDGPU_MMHUB0(0))
52692f153bbSVictor Skvortsov 					tmp = RREG32_SOC15_IP(MMHUB, reg);
527f4caf584SHawking Zhang 				else
528f4caf584SHawking Zhang 					tmp = RREG32_SOC15_IP(GC, reg);
52992f153bbSVictor Skvortsov 
530e60f8db5SAlex Xie 				tmp |= bits;
53192f153bbSVictor Skvortsov 
532f4caf584SHawking Zhang 				if (j >= AMDGPU_MMHUB0(0))
53392f153bbSVictor Skvortsov 					WREG32_SOC15_IP(MMHUB, reg, tmp);
534f4caf584SHawking Zhang 				else
535f4caf584SHawking Zhang 					WREG32_SOC15_IP(GC, reg, tmp);
536e60f8db5SAlex Xie 			}
537e60f8db5SAlex Xie 		}
5389304ca4dSGustavo A. R. Silva 		break;
539e60f8db5SAlex Xie 	default:
540e60f8db5SAlex Xie 		break;
541e60f8db5SAlex Xie 	}
542e60f8db5SAlex Xie 
543e60f8db5SAlex Xie 	return 0;
544e60f8db5SAlex Xie }
545e60f8db5SAlex Xie 
gmc_v9_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)546e60f8db5SAlex Xie static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
547e60f8db5SAlex Xie 				      struct amdgpu_irq_src *source,
548e60f8db5SAlex Xie 				      struct amdgpu_iv_entry *entry)
549e60f8db5SAlex Xie {
550c468f9e2SChristian König 	bool retry_fault = !!(entry->src_data[1] & 0x80);
551ff891a2eSPhilip Yang 	bool write_fault = !!(entry->src_data[1] & 0x20);
55202f23f5fSAlex Deucher 	uint32_t status = 0, cid = 0, rw = 0;
553e3898719SChristian König 	struct amdgpu_task_info task_info;
554e3898719SChristian König 	struct amdgpu_vmhub *hub;
55502f23f5fSAlex Deucher 	const char *mmhub_cid;
556e3898719SChristian König 	const char *hub_name;
557e3898719SChristian König 	u64 addr;
558318e431bSMukul Joshi 	uint32_t cam_index = 0;
5596dabce86SHarshit Mogalapalli 	int ret, xcc_id = 0;
5606dabce86SHarshit Mogalapalli 	uint32_t node_id;
561eaae4beeSPhilip Yang 
56298b2e9caSLe Ma 	node_id = entry->node_id;
563e60f8db5SAlex Xie 
564e60f8db5SAlex Xie 	addr = (u64)entry->src_data[0] << 12;
565e60f8db5SAlex Xie 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
566e60f8db5SAlex Xie 
5675fb34bd9SAlex Sierra 	if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
5685fb34bd9SAlex Sierra 		hub_name = "mmhub0";
56998b2e9caSLe Ma 		hub = &adev->vmhub[AMDGPU_MMHUB0(node_id / 4)];
5705fb34bd9SAlex Sierra 	} else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
5715fb34bd9SAlex Sierra 		hub_name = "mmhub1";
5725fb34bd9SAlex Sierra 		hub = &adev->vmhub[AMDGPU_MMHUB1(0)];
5735fb34bd9SAlex Sierra 	} else {
5745fb34bd9SAlex Sierra 		hub_name = "gfxhub0";
57598b2e9caSLe Ma 		if (adev->gfx.funcs->ih_node_to_logical_xcc) {
57698b2e9caSLe Ma 			xcc_id = adev->gfx.funcs->ih_node_to_logical_xcc(adev,
57798b2e9caSLe Ma 				node_id);
57898b2e9caSLe Ma 			if (xcc_id < 0)
57998b2e9caSLe Ma 				xcc_id = 0;
58098b2e9caSLe Ma 		}
58198b2e9caSLe Ma 		hub = &adev->vmhub[xcc_id];
5825fb34bd9SAlex Sierra 	}
5835fb34bd9SAlex Sierra 
5840291150dSChristian König 	if (retry_fault) {
585318e431bSMukul Joshi 		if (adev->irq.retry_cam_enabled) {
586318e431bSMukul Joshi 			/* Delegate it to a different ring if the hardware hasn't
587318e431bSMukul Joshi 			 * already done it.
588318e431bSMukul Joshi 			 */
589318e431bSMukul Joshi 			if (entry->ih == &adev->irq.ih) {
590318e431bSMukul Joshi 				amdgpu_irq_delegate(adev, entry, 8);
591318e431bSMukul Joshi 				return 1;
592318e431bSMukul Joshi 			}
59322666cc1SChristian König 
594318e431bSMukul Joshi 			cam_index = entry->src_data[2] & 0x3ff;
595318e431bSMukul Joshi 
596f5fe7edfSMukul Joshi 			ret = amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
5975fb34bd9SAlex Sierra 						     addr, write_fault);
598318e431bSMukul Joshi 			WDOORBELL32(adev->irq.retry_cam_doorbell_index, cam_index);
599318e431bSMukul Joshi 			if (ret)
600318e431bSMukul Joshi 				return 1;
601318e431bSMukul Joshi 		} else {
6020291150dSChristian König 			/* Process it onyl if it's the first fault for this address */
6030291150dSChristian König 			if (entry->ih != &adev->irq.ih_soft &&
6043c2d6ea2SPhilip Yang 			    amdgpu_gmc_filter_faults(adev, entry->ih, addr, entry->pasid,
6050291150dSChristian König 					     entry->timestamp))
6060291150dSChristian König 				return 1;
6070291150dSChristian König 
6080291150dSChristian König 			/* Delegate it to a different ring if the hardware hasn't
6090291150dSChristian König 			 * already done it.
6100291150dSChristian König 			 */
61158df0d71SSebastian Andrzej Siewior 			if (entry->ih == &adev->irq.ih) {
6120291150dSChristian König 				amdgpu_irq_delegate(adev, entry, 8);
6130291150dSChristian König 				return 1;
6140291150dSChristian König 			}
6150291150dSChristian König 
6160291150dSChristian König 			/* Try to handle the recoverable page faults by filling page
6170291150dSChristian König 			 * tables
6180291150dSChristian König 			 */
619f5fe7edfSMukul Joshi 			if (amdgpu_vm_handle_fault(adev, entry->pasid, entry->vmid, node_id,
6205fb34bd9SAlex Sierra 						   addr, write_fault))
6210291150dSChristian König 				return 1;
6220291150dSChristian König 		}
623318e431bSMukul Joshi 	}
624ec671737SChristian König 
625e3898719SChristian König 	if (!printk_ratelimit())
626e3898719SChristian König 		return 0;
62753499173SXiaojie Yuan 
628e60f8db5SAlex Xie 
62905794effSShirish S 	memset(&task_info, 0, sizeof(struct amdgpu_task_info));
630efaa9646SAndrey Grodzovsky 	amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
631efaa9646SAndrey Grodzovsky 
6324d6cbde3SFelix Kuehling 	dev_err(adev->dev,
63308e85215SSrinivasan Shanmugam 		"[%s] %s page fault (src_id:%u ring:%u vmid:%u pasid:%u, for process %s pid %d thread %s pid %d)\n",
63451c60898SLe Ma 		hub_name, retry_fault ? "retry" : "no-retry",
635c4f46f22SChristian König 		entry->src_id, entry->ring_id, entry->vmid,
636efaa9646SAndrey Grodzovsky 		entry->pasid, task_info.process_name, task_info.tgid,
637efaa9646SAndrey Grodzovsky 		task_info.task_name, task_info.pid);
638be14729aSYong Zhao 	dev_err(adev->dev, "  in page starting at address 0x%016llx from IH client 0x%x (%s)\n",
639be14729aSYong Zhao 		addr, entry->client_id,
640be14729aSYong Zhao 		soc15_ih_clientid_name[entry->client_id]);
641e3898719SChristian König 
642eaae4beeSPhilip Yang 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
643eaae4beeSPhilip Yang 		dev_err(adev->dev, "  cookie node_id %d fault from die %s%d%s\n",
644eaae4beeSPhilip Yang 			node_id, node_id % 4 == 3 ? "RSV" : "AID", node_id / 4,
645eaae4beeSPhilip Yang 			node_id % 4 == 1 ? ".XCD0" : node_id % 4 == 2 ? ".XCD1" : "");
646eaae4beeSPhilip Yang 
647e3898719SChristian König 	if (amdgpu_sriov_vf(adev))
648e3898719SChristian König 		return 0;
649e3898719SChristian König 
650e3898719SChristian König 	/*
651e3898719SChristian König 	 * Issue a dummy read to wait for the status register to
652e3898719SChristian König 	 * be updated to avoid reading an incorrect value due to
653e3898719SChristian König 	 * the new fast GRBM interface.
654e3898719SChristian König 	 */
655f4caf584SHawking Zhang 	if ((entry->vmid_src == AMDGPU_GFXHUB(0)) &&
656630e959fSAlex Deucher 	    (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2)))
657e3898719SChristian König 		RREG32(hub->vm_l2_pro_fault_status);
658e3898719SChristian König 
659e3898719SChristian König 	status = RREG32(hub->vm_l2_pro_fault_status);
660e3898719SChristian König 	cid = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, CID);
661e3898719SChristian König 	rw = REG_GET_FIELD(status, VM_L2_PROTECTION_FAULT_STATUS, RW);
662e3898719SChristian König 	WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
663e3898719SChristian König 
6644d6cbde3SFelix Kuehling 	dev_err(adev->dev,
6654d6cbde3SFelix Kuehling 		"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
6664d6cbde3SFelix Kuehling 		status);
66721e1217bSMukul Joshi 	if (entry->vmid_src == AMDGPU_GFXHUB(0)) {
668be99ecbfSAlex Deucher 		dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
669e3898719SChristian König 			cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" :
670e3898719SChristian König 			gfxhub_client_ids[cid],
671be99ecbfSAlex Deucher 			cid);
67202f23f5fSAlex Deucher 	} else {
673630e959fSAlex Deucher 		switch (adev->ip_versions[MMHUB_HWIP][0]) {
674630e959fSAlex Deucher 		case IP_VERSION(9, 0, 0):
67502f23f5fSAlex Deucher 			mmhub_cid = mmhub_client_ids_vega10[cid][rw];
67602f23f5fSAlex Deucher 			break;
677630e959fSAlex Deucher 		case IP_VERSION(9, 3, 0):
67802f23f5fSAlex Deucher 			mmhub_cid = mmhub_client_ids_vega12[cid][rw];
67902f23f5fSAlex Deucher 			break;
680630e959fSAlex Deucher 		case IP_VERSION(9, 4, 0):
68102f23f5fSAlex Deucher 			mmhub_cid = mmhub_client_ids_vega20[cid][rw];
68202f23f5fSAlex Deucher 			break;
683630e959fSAlex Deucher 		case IP_VERSION(9, 4, 1):
68402f23f5fSAlex Deucher 			mmhub_cid = mmhub_client_ids_arcturus[cid][rw];
68502f23f5fSAlex Deucher 			break;
686630e959fSAlex Deucher 		case IP_VERSION(9, 1, 0):
687630e959fSAlex Deucher 		case IP_VERSION(9, 2, 0):
68802f23f5fSAlex Deucher 			mmhub_cid = mmhub_client_ids_raven[cid][rw];
68902f23f5fSAlex Deucher 			break;
690630e959fSAlex Deucher 		case IP_VERSION(1, 5, 0):
691630e959fSAlex Deucher 		case IP_VERSION(2, 4, 0):
69202f23f5fSAlex Deucher 			mmhub_cid = mmhub_client_ids_renoir[cid][rw];
69302f23f5fSAlex Deucher 			break;
694ab1a157eSHawking Zhang 		case IP_VERSION(1, 8, 0):
695630e959fSAlex Deucher 		case IP_VERSION(9, 4, 2):
696e844cd99SAlex Deucher 			mmhub_cid = mmhub_client_ids_aldebaran[cid][rw];
697e844cd99SAlex Deucher 			break;
69802f23f5fSAlex Deucher 		default:
69902f23f5fSAlex Deucher 			mmhub_cid = NULL;
70002f23f5fSAlex Deucher 			break;
70102f23f5fSAlex Deucher 		}
70202f23f5fSAlex Deucher 		dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
70302f23f5fSAlex Deucher 			mmhub_cid ? mmhub_cid : "unknown", cid);
70402f23f5fSAlex Deucher 	}
7055ddd4a9aSYong Zhao 	dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
7065ddd4a9aSYong Zhao 		REG_GET_FIELD(status,
7075ddd4a9aSYong Zhao 		VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
7085ddd4a9aSYong Zhao 	dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
7095ddd4a9aSYong Zhao 		REG_GET_FIELD(status,
7105ddd4a9aSYong Zhao 		VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
7115ddd4a9aSYong Zhao 	dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
7125ddd4a9aSYong Zhao 		REG_GET_FIELD(status,
7135ddd4a9aSYong Zhao 		VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
7145ddd4a9aSYong Zhao 	dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
7155ddd4a9aSYong Zhao 		REG_GET_FIELD(status,
7165ddd4a9aSYong Zhao 		VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
71702f23f5fSAlex Deucher 	dev_err(adev->dev, "\t RW: 0x%x\n", rw);
718e60f8db5SAlex Xie 	return 0;
719e60f8db5SAlex Xie }
720e60f8db5SAlex Xie 
721e60f8db5SAlex Xie static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
722e60f8db5SAlex Xie 	.set = gmc_v9_0_vm_fault_interrupt_state,
723e60f8db5SAlex Xie 	.process = gmc_v9_0_process_interrupt,
724e60f8db5SAlex Xie };
725e60f8db5SAlex Xie 
726791c4769Sxinhui pan 
727791c4769Sxinhui pan static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
728791c4769Sxinhui pan 	.set = gmc_v9_0_ecc_interrupt_state,
72934cc4fd9STao Zhou 	.process = amdgpu_umc_process_ecc_irq,
730791c4769Sxinhui pan };
731791c4769Sxinhui pan 
gmc_v9_0_set_irq_funcs(struct amdgpu_device * adev)732e60f8db5SAlex Xie static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
733e60f8db5SAlex Xie {
734770d13b1SChristian König 	adev->gmc.vm_fault.num_types = 1;
735770d13b1SChristian König 	adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
736791c4769Sxinhui pan 
73768d705ddSHawking Zhang 	if (!amdgpu_sriov_vf(adev) &&
73868d705ddSHawking Zhang 	    !adev->gmc.xgmi.connected_to_cpu) {
739791c4769Sxinhui pan 		adev->gmc.ecc_irq.num_types = 1;
740791c4769Sxinhui pan 		adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
741e60f8db5SAlex Xie 	}
7422ee9403eSZhigang Luo }
743e60f8db5SAlex Xie 
gmc_v9_0_get_invalidate_req(unsigned int vmid,uint32_t flush_type)7442a79d868SYong Zhao static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
7452a79d868SYong Zhao 					uint32_t flush_type)
74603f89febSChristian König {
74703f89febSChristian König 	u32 req = 0;
74803f89febSChristian König 
74903f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
750c4f46f22SChristian König 			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
7512a79d868SYong Zhao 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
75203f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
75303f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
75403f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
75503f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
75603f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
75703f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
75803f89febSChristian König 			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
75903f89febSChristian König 
76003f89febSChristian König 	return req;
76103f89febSChristian König }
76203f89febSChristian König 
76390f6452cSchangzhu /**
76490f6452cSchangzhu  * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
76590f6452cSchangzhu  *
76690f6452cSchangzhu  * @adev: amdgpu_device pointer
76790f6452cSchangzhu  * @vmhub: vmhub type
76890f6452cSchangzhu  *
76990f6452cSchangzhu  */
gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device * adev,uint32_t vmhub)77090f6452cSchangzhu static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
77190f6452cSchangzhu 				       uint32_t vmhub)
77290f6452cSchangzhu {
773ab1a157eSHawking Zhang 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||
774ab1a157eSHawking Zhang 	    adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
775d477c5aaSHawking Zhang 		return false;
776d477c5aaSHawking Zhang 
777f4caf584SHawking Zhang 	return ((vmhub == AMDGPU_MMHUB0(0) ||
778f4caf584SHawking Zhang 		 vmhub == AMDGPU_MMHUB1(0)) &&
77990f6452cSchangzhu 		(!amdgpu_sriov_vf(adev)) &&
78054f78a76SAlex Deucher 		(!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
78154f78a76SAlex Deucher 		   (adev->apu_flags & AMD_APU_IS_PICASSO))));
78290f6452cSchangzhu }
78390f6452cSchangzhu 
gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device * adev,uint8_t vmid,uint16_t * p_pasid)784ea930000SAlex Sierra static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
785ea930000SAlex Sierra 					uint8_t vmid, uint16_t *p_pasid)
786ea930000SAlex Sierra {
787ea930000SAlex Sierra 	uint32_t value;
788ea930000SAlex Sierra 
789ea930000SAlex Sierra 	value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
790ea930000SAlex Sierra 		     + vmid);
791ea930000SAlex Sierra 	*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
792ea930000SAlex Sierra 
793ea930000SAlex Sierra 	return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
794ea930000SAlex Sierra }
795ea930000SAlex Sierra 
796e60f8db5SAlex Xie /*
797e60f8db5SAlex Xie  * GART
798e60f8db5SAlex Xie  * VMID 0 is the physical GPU addresses as used by the kernel.
799e60f8db5SAlex Xie  * VMIDs 1-15 are used for userspace clients and are handled
800e60f8db5SAlex Xie  * by the amdgpu vm/hsa code.
801e60f8db5SAlex Xie  */
802e60f8db5SAlex Xie 
803e60f8db5SAlex Xie /**
8042a79d868SYong Zhao  * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
805e60f8db5SAlex Xie  *
806e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
807e60f8db5SAlex Xie  * @vmid: vm instance to flush
808bf0df09cSLee Jones  * @vmhub: which hub to flush
8092a79d868SYong Zhao  * @flush_type: the flush type
810e60f8db5SAlex Xie  *
8112a79d868SYong Zhao  * Flush the TLB for the requested page table using certain type.
812e60f8db5SAlex Xie  */
gmc_v9_0_flush_gpu_tlb(struct amdgpu_device * adev,uint32_t vmid,uint32_t vmhub,uint32_t flush_type)8133ff98548SOak Zeng static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
8143ff98548SOak Zeng 					uint32_t vmhub, uint32_t flush_type)
815e60f8db5SAlex Xie {
81690f6452cSchangzhu 	bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
81708e85215SSrinivasan Shanmugam 	const unsigned int eng = 17;
818b80cd524SFelix Kuehling 	u32 j, inv_req, inv_req2, tmp;
8193ff98548SOak Zeng 	struct amdgpu_vmhub *hub;
820e60f8db5SAlex Xie 
821d9426c3dSLe Ma 	BUG_ON(vmhub >= AMDGPU_MAX_VMHUBS);
8223ff98548SOak Zeng 
8233ff98548SOak Zeng 	hub = &adev->vmhub[vmhub];
824b80cd524SFelix Kuehling 	if (adev->gmc.xgmi.num_physical_nodes &&
825630e959fSAlex Deucher 	    adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0)) {
826b80cd524SFelix Kuehling 		/* Vega20+XGMI caches PTEs in TC and TLB. Add a
827b80cd524SFelix Kuehling 		 * heavy-weight TLB flush (type 2), which flushes
828b80cd524SFelix Kuehling 		 * both. Due to a race condition with concurrent
829b80cd524SFelix Kuehling 		 * memory accesses using the same TLB cache line, we
830b80cd524SFelix Kuehling 		 * still need a second TLB flush after this.
831b80cd524SFelix Kuehling 		 */
832b80cd524SFelix Kuehling 		inv_req = gmc_v9_0_get_invalidate_req(vmid, 2);
833b80cd524SFelix Kuehling 		inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);
8342e8cc5d3SGraham Sider 	} else if (flush_type == 2 &&
8352e8cc5d3SGraham Sider 		   adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) &&
8362e8cc5d3SGraham Sider 		   adev->rev_id == 0) {
8372e8cc5d3SGraham Sider 		inv_req = gmc_v9_0_get_invalidate_req(vmid, 0);
8382e8cc5d3SGraham Sider 		inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);
839b80cd524SFelix Kuehling 	} else {
84037c58ddfSFelix Kuehling 		inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
841b80cd524SFelix Kuehling 		inv_req2 = 0;
842b80cd524SFelix Kuehling 	}
843e60f8db5SAlex Xie 
84482d1a1b1SChengming Gui 	/* This is necessary for a HW workaround under SRIOV as well
84582d1a1b1SChengming Gui 	 * as GFXOFF under bare metal
84682d1a1b1SChengming Gui 	 */
847277bd337SLe Ma 	if (adev->gfx.kiq[0].ring.sched.ready &&
84882d1a1b1SChengming Gui 	    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
849d0fb18b5SAndrey Grodzovsky 	    down_read_trylock(&adev->reset_domain->sem)) {
850148f597dSHuang Rui 		uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
851148f597dSHuang Rui 		uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
852af5fe1e9SChristian König 
85337c58ddfSFelix Kuehling 		amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
854af5fe1e9SChristian König 						   1 << vmid);
855d0fb18b5SAndrey Grodzovsky 		up_read(&adev->reset_domain->sem);
8563ff98548SOak Zeng 		return;
857fc0faf04SEmily Deng 	}
8583890d111SEmily Deng 
8593890d111SEmily Deng 	spin_lock(&adev->gmc.invalidate_lock);
860f920d1bbSchangzhu 
861f920d1bbSchangzhu 	/*
862f920d1bbSchangzhu 	 * It may lose gpuvm invalidate acknowldege state across power-gating
863f920d1bbSchangzhu 	 * off cycle, add semaphore acquire before invalidation and semaphore
864f920d1bbSchangzhu 	 * release after invalidation to avoid entering power gated state
865f920d1bbSchangzhu 	 * to WA the Issue
866f920d1bbSchangzhu 	 */
867f920d1bbSchangzhu 
868f920d1bbSchangzhu 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
86990f6452cSchangzhu 	if (use_semaphore) {
870f920d1bbSchangzhu 		for (j = 0; j < adev->usec_timeout; j++) {
87192f153bbSVictor Skvortsov 			/* a read return value of 1 means semaphore acquire */
872f4caf584SHawking Zhang 			if (vmhub >= AMDGPU_MMHUB0(0))
87392f153bbSVictor Skvortsov 				tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng);
874f4caf584SHawking Zhang 			else
875f4caf584SHawking Zhang 				tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng);
876f920d1bbSchangzhu 			if (tmp & 0x1)
877f920d1bbSchangzhu 				break;
878f920d1bbSchangzhu 			udelay(1);
879f920d1bbSchangzhu 		}
880f920d1bbSchangzhu 
881f920d1bbSchangzhu 		if (j >= adev->usec_timeout)
882f920d1bbSchangzhu 			DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
883f920d1bbSchangzhu 	}
884f920d1bbSchangzhu 
885b80cd524SFelix Kuehling 	do {
886f4caf584SHawking Zhang 		if (vmhub >= AMDGPU_MMHUB0(0))
88792f153bbSVictor Skvortsov 			WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
888f4caf584SHawking Zhang 		else
889f4caf584SHawking Zhang 			WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
89053499173SXiaojie Yuan 
89153499173SXiaojie Yuan 		/*
892b80cd524SFelix Kuehling 		 * Issue a dummy read to wait for the ACK register to
893b80cd524SFelix Kuehling 		 * be cleared to avoid a false ACK due to the new fast
894b80cd524SFelix Kuehling 		 * GRBM interface.
89553499173SXiaojie Yuan 		 */
896f4caf584SHawking Zhang 		if ((vmhub == AMDGPU_GFXHUB(0)) &&
897630e959fSAlex Deucher 		    (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2)))
898148f597dSHuang Rui 			RREG32_NO_KIQ(hub->vm_inv_eng0_req +
899148f597dSHuang Rui 				      hub->eng_distance * eng);
90053499173SXiaojie Yuan 
901e60f8db5SAlex Xie 		for (j = 0; j < adev->usec_timeout; j++) {
902f4caf584SHawking Zhang 			if (vmhub >= AMDGPU_MMHUB0(0))
90392f153bbSVictor Skvortsov 				tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_ack + hub->eng_distance * eng);
904f4caf584SHawking Zhang 			else
905f4caf584SHawking Zhang 				tmp = RREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_ack + hub->eng_distance * eng);
906396557b0SChristian König 			if (tmp & (1 << vmid))
907e60f8db5SAlex Xie 				break;
908e60f8db5SAlex Xie 			udelay(1);
909e60f8db5SAlex Xie 		}
910f920d1bbSchangzhu 
911b80cd524SFelix Kuehling 		inv_req = inv_req2;
912b80cd524SFelix Kuehling 		inv_req2 = 0;
913b80cd524SFelix Kuehling 	} while (inv_req);
914b80cd524SFelix Kuehling 
915f920d1bbSchangzhu 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
91692f153bbSVictor Skvortsov 	if (use_semaphore) {
917f920d1bbSchangzhu 		/*
918f920d1bbSchangzhu 		 * add semaphore release after invalidation,
919f920d1bbSchangzhu 		 * write with 0 means semaphore release
920f920d1bbSchangzhu 		 */
921f4caf584SHawking Zhang 		if (vmhub >= AMDGPU_MMHUB0(0))
922a0a0c69cSLijo Lazar 			WREG32_SOC15_IP_NO_KIQ(MMHUB, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0);
92392f153bbSVictor Skvortsov 		else
924a0a0c69cSLijo Lazar 			WREG32_SOC15_IP_NO_KIQ(GC, hub->vm_inv_eng0_sem + hub->eng_distance * eng, 0);
92592f153bbSVictor Skvortsov 	}
926f920d1bbSchangzhu 
9273890d111SEmily Deng 	spin_unlock(&adev->gmc.invalidate_lock);
928f920d1bbSchangzhu 
929396557b0SChristian König 	if (j < adev->usec_timeout)
9303ff98548SOak Zeng 		return;
931396557b0SChristian König 
932e60f8db5SAlex Xie 	DRM_ERROR("Timeout waiting for VM flush ACK!\n");
933e60f8db5SAlex Xie }
934e60f8db5SAlex Xie 
935ea930000SAlex Sierra /**
936ea930000SAlex Sierra  * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
937ea930000SAlex Sierra  *
938ea930000SAlex Sierra  * @adev: amdgpu_device pointer
939ea930000SAlex Sierra  * @pasid: pasid to be flush
940bf0df09cSLee Jones  * @flush_type: the flush type
941bf0df09cSLee Jones  * @all_hub: flush all hubs
9421bae03aaSSrinivasan Shanmugam  * @inst: is used to select which instance of KIQ to use for the invalidation
943ea930000SAlex Sierra  *
944ea930000SAlex Sierra  * Flush the TLB for the requested pasid.
945ea930000SAlex Sierra  */
gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device * adev,uint16_t pasid,uint32_t flush_type,bool all_hub,uint32_t inst)946ea930000SAlex Sierra static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
947ea930000SAlex Sierra 					uint16_t pasid, uint32_t flush_type,
948f87f6864SMukul Joshi 					bool all_hub, uint32_t inst)
949ea930000SAlex Sierra {
950ea930000SAlex Sierra 	int vmid, i;
951ea930000SAlex Sierra 	signed long r;
952ea930000SAlex Sierra 	uint32_t seq;
953ea930000SAlex Sierra 	uint16_t queried_pasid;
954ea930000SAlex Sierra 	bool ret;
955373008bfSDusica Milinkovic 	u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
956f87f6864SMukul Joshi 	struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring;
957f87f6864SMukul Joshi 	struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
958ea930000SAlex Sierra 
95953b3f8f4SDennis Li 	if (amdgpu_in_reset(adev))
960ea930000SAlex Sierra 		return -EIO;
961ea930000SAlex Sierra 
962d0fb18b5SAndrey Grodzovsky 	if (ring->sched.ready && down_read_trylock(&adev->reset_domain->sem)) {
963b80cd524SFelix Kuehling 		/* Vega20+XGMI caches PTEs in TC and TLB. Add a
964b80cd524SFelix Kuehling 		 * heavy-weight TLB flush (type 2), which flushes
965b80cd524SFelix Kuehling 		 * both. Due to a race condition with concurrent
966b80cd524SFelix Kuehling 		 * memory accesses using the same TLB cache line, we
967b80cd524SFelix Kuehling 		 * still need a second TLB flush after this.
968b80cd524SFelix Kuehling 		 */
969b80cd524SFelix Kuehling 		bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes &&
970630e959fSAlex Deucher 				       adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0));
971b80cd524SFelix Kuehling 		/* 2 dwords flush + 8 dwords fence */
972b80cd524SFelix Kuehling 		unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8;
973b80cd524SFelix Kuehling 
974b80cd524SFelix Kuehling 		if (vega20_xgmi_wa)
975b80cd524SFelix Kuehling 			ndw += kiq->pmf->invalidate_tlbs_size;
976b80cd524SFelix Kuehling 
977f87f6864SMukul Joshi 		spin_lock(&adev->gfx.kiq[inst].ring_lock);
97836a1707aSAlex Sierra 		/* 2 dwords flush + 8 dwords fence */
979b80cd524SFelix Kuehling 		amdgpu_ring_alloc(ring, ndw);
980b80cd524SFelix Kuehling 		if (vega20_xgmi_wa)
981b80cd524SFelix Kuehling 			kiq->pmf->kiq_invalidate_tlbs(ring,
982b80cd524SFelix Kuehling 						      pasid, 2, all_hub);
9832e8cc5d3SGraham Sider 
9842e8cc5d3SGraham Sider 		if (flush_type == 2 &&
9852e8cc5d3SGraham Sider 		    adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3) &&
9862e8cc5d3SGraham Sider 		    adev->rev_id == 0)
9872e8cc5d3SGraham Sider 			kiq->pmf->kiq_invalidate_tlbs(ring,
9882e8cc5d3SGraham Sider 						pasid, 0, all_hub);
9892e8cc5d3SGraham Sider 
990ea930000SAlex Sierra 		kiq->pmf->kiq_invalidate_tlbs(ring,
991ea930000SAlex Sierra 					pasid, flush_type, all_hub);
99204e4e2e9SYintian Tao 		r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
99304e4e2e9SYintian Tao 		if (r) {
99404e4e2e9SYintian Tao 			amdgpu_ring_undo(ring);
995f87f6864SMukul Joshi 			spin_unlock(&adev->gfx.kiq[inst].ring_lock);
996d0fb18b5SAndrey Grodzovsky 			up_read(&adev->reset_domain->sem);
99704e4e2e9SYintian Tao 			return -ETIME;
99804e4e2e9SYintian Tao 		}
99904e4e2e9SYintian Tao 
1000ea930000SAlex Sierra 		amdgpu_ring_commit(ring);
1001f87f6864SMukul Joshi 		spin_unlock(&adev->gfx.kiq[inst].ring_lock);
1002373008bfSDusica Milinkovic 		r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
1003ea930000SAlex Sierra 		if (r < 1) {
1004aac89168SDennis Li 			dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
1005d0fb18b5SAndrey Grodzovsky 			up_read(&adev->reset_domain->sem);
1006ea930000SAlex Sierra 			return -ETIME;
1007ea930000SAlex Sierra 		}
1008d0fb18b5SAndrey Grodzovsky 		up_read(&adev->reset_domain->sem);
1009ea930000SAlex Sierra 		return 0;
1010ea930000SAlex Sierra 	}
1011ea930000SAlex Sierra 
1012ea930000SAlex Sierra 	for (vmid = 1; vmid < 16; vmid++) {
1013ea930000SAlex Sierra 
1014ea930000SAlex Sierra 		ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
1015ea930000SAlex Sierra 				&queried_pasid);
1016ea930000SAlex Sierra 		if (ret && queried_pasid == pasid) {
1017ea930000SAlex Sierra 			if (all_hub) {
1018d9426c3dSLe Ma 				for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS)
1019ea930000SAlex Sierra 					gmc_v9_0_flush_gpu_tlb(adev, vmid,
1020fa34edbeSFelix Kuehling 							i, flush_type);
1021ea930000SAlex Sierra 			} else {
1022ea930000SAlex Sierra 				gmc_v9_0_flush_gpu_tlb(adev, vmid,
1023f4caf584SHawking Zhang 						AMDGPU_GFXHUB(0), flush_type);
1024ea930000SAlex Sierra 			}
1025ea930000SAlex Sierra 			break;
1026ea930000SAlex Sierra 		}
1027ea930000SAlex Sierra 	}
1028ea930000SAlex Sierra 
1029ea930000SAlex Sierra 	return 0;
1030ea930000SAlex Sierra 
1031ea930000SAlex Sierra }
1032ea930000SAlex Sierra 
gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring * ring,unsigned int vmid,uint64_t pd_addr)10339096d6e5SChristian König static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
103408e85215SSrinivasan Shanmugam 					    unsigned int vmid, uint64_t pd_addr)
10359096d6e5SChristian König {
10360530553bSLe Ma 	bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->vm_hub);
1037250b4228SChristian König 	struct amdgpu_device *adev = ring->adev;
10380530553bSLe Ma 	struct amdgpu_vmhub *hub = &adev->vmhub[ring->vm_hub];
10392a79d868SYong Zhao 	uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
104008e85215SSrinivasan Shanmugam 	unsigned int eng = ring->vm_inv_eng;
10419096d6e5SChristian König 
1042f920d1bbSchangzhu 	/*
1043f920d1bbSchangzhu 	 * It may lose gpuvm invalidate acknowldege state across power-gating
1044f920d1bbSchangzhu 	 * off cycle, add semaphore acquire before invalidation and semaphore
1045f920d1bbSchangzhu 	 * release after invalidation to avoid entering power gated state
1046f920d1bbSchangzhu 	 * to WA the Issue
1047f920d1bbSchangzhu 	 */
1048f920d1bbSchangzhu 
1049f920d1bbSchangzhu 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
105090f6452cSchangzhu 	if (use_semaphore)
1051f920d1bbSchangzhu 		/* a read return value of 1 means semaphore acuqire */
1052f920d1bbSchangzhu 		amdgpu_ring_emit_reg_wait(ring,
1053148f597dSHuang Rui 					  hub->vm_inv_eng0_sem +
1054148f597dSHuang Rui 					  hub->eng_distance * eng, 0x1, 0x1);
1055f920d1bbSchangzhu 
1056148f597dSHuang Rui 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
1057148f597dSHuang Rui 			      (hub->ctx_addr_distance * vmid),
10589096d6e5SChristian König 			      lower_32_bits(pd_addr));
10599096d6e5SChristian König 
1060148f597dSHuang Rui 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
1061148f597dSHuang Rui 			      (hub->ctx_addr_distance * vmid),
10629096d6e5SChristian König 			      upper_32_bits(pd_addr));
10639096d6e5SChristian König 
1064148f597dSHuang Rui 	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
1065148f597dSHuang Rui 					    hub->eng_distance * eng,
1066148f597dSHuang Rui 					    hub->vm_inv_eng0_ack +
1067148f597dSHuang Rui 					    hub->eng_distance * eng,
1068f8bc9037SAlex Deucher 					    req, 1 << vmid);
1069f732b6b3SChristian König 
1070f920d1bbSchangzhu 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
107190f6452cSchangzhu 	if (use_semaphore)
1072f920d1bbSchangzhu 		/*
1073f920d1bbSchangzhu 		 * add semaphore release after invalidation,
1074f920d1bbSchangzhu 		 * write with 0 means semaphore release
1075f920d1bbSchangzhu 		 */
1076148f597dSHuang Rui 		amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
1077148f597dSHuang Rui 				      hub->eng_distance * eng, 0);
1078f920d1bbSchangzhu 
10799096d6e5SChristian König 	return pd_addr;
10809096d6e5SChristian König }
10819096d6e5SChristian König 
gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring * ring,unsigned int vmid,unsigned int pasid)108208e85215SSrinivasan Shanmugam static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid,
108308e85215SSrinivasan Shanmugam 					unsigned int pasid)
1084c633c00bSChristian König {
1085c633c00bSChristian König 	struct amdgpu_device *adev = ring->adev;
1086c633c00bSChristian König 	uint32_t reg;
1087c633c00bSChristian König 
1088f2d66571SLe Ma 	/* Do nothing because there's no lut register for mmhub1. */
1089f4caf584SHawking Zhang 	if (ring->vm_hub == AMDGPU_MMHUB1(0))
1090f2d66571SLe Ma 		return;
1091f2d66571SLe Ma 
1092f4caf584SHawking Zhang 	if (ring->vm_hub == AMDGPU_GFXHUB(0))
1093c633c00bSChristian König 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
1094c633c00bSChristian König 	else
1095c633c00bSChristian König 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
1096c633c00bSChristian König 
1097c633c00bSChristian König 	amdgpu_ring_emit_wreg(ring, reg, pasid);
1098c633c00bSChristian König }
1099c633c00bSChristian König 
1100e60f8db5SAlex Xie /*
1101e60f8db5SAlex Xie  * PTE format on VEGA 10:
1102e60f8db5SAlex Xie  * 63:59 reserved
1103e60f8db5SAlex Xie  * 58:57 mtype
1104e60f8db5SAlex Xie  * 56 F
1105e60f8db5SAlex Xie  * 55 L
1106e60f8db5SAlex Xie  * 54 P
1107e60f8db5SAlex Xie  * 53 SW
1108e60f8db5SAlex Xie  * 52 T
1109e60f8db5SAlex Xie  * 50:48 reserved
1110e60f8db5SAlex Xie  * 47:12 4k physical page base address
1111e60f8db5SAlex Xie  * 11:7 fragment
1112e60f8db5SAlex Xie  * 6 write
1113e60f8db5SAlex Xie  * 5 read
1114e60f8db5SAlex Xie  * 4 exe
1115e60f8db5SAlex Xie  * 3 Z
1116e60f8db5SAlex Xie  * 2 snooped
1117e60f8db5SAlex Xie  * 1 system
1118e60f8db5SAlex Xie  * 0 valid
1119e60f8db5SAlex Xie  *
1120e60f8db5SAlex Xie  * PDE format on VEGA 10:
1121e60f8db5SAlex Xie  * 63:59 block fragment size
1122e60f8db5SAlex Xie  * 58:55 reserved
1123e60f8db5SAlex Xie  * 54 P
1124e60f8db5SAlex Xie  * 53:48 reserved
1125e60f8db5SAlex Xie  * 47:6 physical base address of PD or PTE
1126e60f8db5SAlex Xie  * 5:3 reserved
1127e60f8db5SAlex Xie  * 2 C
1128e60f8db5SAlex Xie  * 1 system
1129e60f8db5SAlex Xie  * 0 valid
1130e60f8db5SAlex Xie  */
1131e60f8db5SAlex Xie 
gmc_v9_0_map_mtype(struct amdgpu_device * adev,uint32_t flags)113271776b6dSChristian König static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
1133e60f8db5SAlex Xie 
1134e60f8db5SAlex Xie {
113571776b6dSChristian König 	switch (flags) {
1136e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_DEFAULT:
113771776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1138e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_NC:
113971776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1140e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_WC:
114171776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
1142093e48c0SOak Zeng 	case AMDGPU_VM_MTYPE_RW:
114371776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
1144e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_CC:
114571776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
1146e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_UC:
114771776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
1148e60f8db5SAlex Xie 	default:
114971776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1150e60f8db5SAlex Xie 	}
1151e60f8db5SAlex Xie }
1152e60f8db5SAlex Xie 
gmc_v9_0_get_vm_pde(struct amdgpu_device * adev,int level,uint64_t * addr,uint64_t * flags)11533de676d8SChristian König static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
11543de676d8SChristian König 				uint64_t *addr, uint64_t *flags)
1155f75e237cSChristian König {
1156bbc9fb10SChristian König 	if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
11570ca565abSOak Zeng 		*addr = amdgpu_gmc_vram_mc2pa(adev, *addr);
11583de676d8SChristian König 	BUG_ON(*addr & 0xFFFF00000000003FULL);
11596a42fd6fSChristian König 
1160770d13b1SChristian König 	if (!adev->gmc.translate_further)
11616a42fd6fSChristian König 		return;
11626a42fd6fSChristian König 
11636a42fd6fSChristian König 	if (level == AMDGPU_VM_PDB1) {
11646a42fd6fSChristian König 		/* Set the block fragment size */
11656a42fd6fSChristian König 		if (!(*flags & AMDGPU_PDE_PTE))
11666a42fd6fSChristian König 			*flags |= AMDGPU_PDE_BFS(0x9);
11676a42fd6fSChristian König 
11686a42fd6fSChristian König 	} else if (level == AMDGPU_VM_PDB0) {
116937a0bad6SMukul Joshi 		if (*flags & AMDGPU_PDE_PTE) {
11706a42fd6fSChristian König 			*flags &= ~AMDGPU_PDE_PTE;
117137a0bad6SMukul Joshi 			if (!(*flags & AMDGPU_PTE_VALID))
117237a0bad6SMukul Joshi 				*addr |= 1 << PAGE_SHIFT;
117337a0bad6SMukul Joshi 		} else {
11746a42fd6fSChristian König 			*flags |= AMDGPU_PTE_TF;
11756a42fd6fSChristian König 		}
1176f75e237cSChristian König 	}
117737a0bad6SMukul Joshi }
1178f75e237cSChristian König 
gmc_v9_0_get_coherence_flags(struct amdgpu_device * adev,struct amdgpu_bo * bo,struct amdgpu_bo_va_mapping * mapping,uint64_t * flags)1179d1a372afSFelix Kuehling static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
1180d1a372afSFelix Kuehling 					 struct amdgpu_bo *bo,
1181d1a372afSFelix Kuehling 					 struct amdgpu_bo_va_mapping *mapping,
1182d1a372afSFelix Kuehling 					 uint64_t *flags)
1183d1a372afSFelix Kuehling {
1184d1a372afSFelix Kuehling 	struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
1185d1a372afSFelix Kuehling 	bool is_vram = bo->tbo.resource->mem_type == TTM_PL_VRAM;
1186d1a372afSFelix Kuehling 	bool coherent = bo->flags & AMDGPU_GEM_CREATE_COHERENT;
1187d1a372afSFelix Kuehling 	bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED;
1188dc12f9edSPhilip Yang 	struct amdgpu_vm *vm = mapping->bo_va->base.vm;
11891e4a0033SFelix Kuehling 	unsigned int mtype_local, mtype;
1190d1a372afSFelix Kuehling 	bool snoop = false;
11911e4a0033SFelix Kuehling 	bool is_local;
1192d1a372afSFelix Kuehling 
1193d1a372afSFelix Kuehling 	switch (adev->ip_versions[GC_HWIP][0]) {
1194d1a372afSFelix Kuehling 	case IP_VERSION(9, 4, 1):
1195d1a372afSFelix Kuehling 	case IP_VERSION(9, 4, 2):
1196d1a372afSFelix Kuehling 		if (is_vram) {
1197d1a372afSFelix Kuehling 			if (bo_adev == adev) {
1198d1a372afSFelix Kuehling 				if (uncached)
1199d1a372afSFelix Kuehling 					mtype = MTYPE_UC;
1200d1a372afSFelix Kuehling 				else if (coherent)
1201d1a372afSFelix Kuehling 					mtype = MTYPE_CC;
1202d1a372afSFelix Kuehling 				else
1203d1a372afSFelix Kuehling 					mtype = MTYPE_RW;
1204d1a372afSFelix Kuehling 				/* FIXME: is this still needed? Or does
1205d1a372afSFelix Kuehling 				 * amdgpu_ttm_tt_pde_flags already handle this?
1206d1a372afSFelix Kuehling 				 */
1207ab1a157eSHawking Zhang 				if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||
1208ab1a157eSHawking Zhang 				     adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) &&
1209d1a372afSFelix Kuehling 				    adev->gmc.xgmi.connected_to_cpu)
1210d1a372afSFelix Kuehling 					snoop = true;
1211d1a372afSFelix Kuehling 			} else {
1212d1a372afSFelix Kuehling 				if (uncached || coherent)
1213d1a372afSFelix Kuehling 					mtype = MTYPE_UC;
1214d1a372afSFelix Kuehling 				else
1215d1a372afSFelix Kuehling 					mtype = MTYPE_NC;
1216d1a372afSFelix Kuehling 				if (mapping->bo_va->is_xgmi)
1217d1a372afSFelix Kuehling 					snoop = true;
1218d1a372afSFelix Kuehling 			}
1219d1a372afSFelix Kuehling 		} else {
1220d1a372afSFelix Kuehling 			if (uncached || coherent)
1221d1a372afSFelix Kuehling 				mtype = MTYPE_UC;
1222d1a372afSFelix Kuehling 			else
1223d1a372afSFelix Kuehling 				mtype = MTYPE_NC;
1224d1a372afSFelix Kuehling 			/* FIXME: is this still needed? Or does
1225d1a372afSFelix Kuehling 			 * amdgpu_ttm_tt_pde_flags already handle this?
1226d1a372afSFelix Kuehling 			 */
1227d1a372afSFelix Kuehling 			snoop = true;
1228d1a372afSFelix Kuehling 		}
1229d1a372afSFelix Kuehling 		break;
1230753b999aSRajneesh Bhardwaj 	case IP_VERSION(9, 4, 3):
12311e4a0033SFelix Kuehling 		/* Only local VRAM BOs or system memory on non-NUMA APUs
12321e4a0033SFelix Kuehling 		 * can be assumed to be local in their entirety. Choose
12331e4a0033SFelix Kuehling 		 * MTYPE_NC as safe fallback for all system memory BOs on
12341e4a0033SFelix Kuehling 		 * NUMA systems. Their MTYPE can be overridden per-page in
12351e4a0033SFelix Kuehling 		 * gmc_v9_0_override_vm_pte_flags.
1236753b999aSRajneesh Bhardwaj 		 */
1237b9cbd510SGraham Sider 		mtype_local = MTYPE_RW;
123876eb9c95SDavid Francis 		if (amdgpu_mtype_local == 1) {
123976eb9c95SDavid Francis 			DRM_INFO_ONCE("Using MTYPE_NC for local memory\n");
124076eb9c95SDavid Francis 			mtype_local = MTYPE_NC;
124176eb9c95SDavid Francis 		} else if (amdgpu_mtype_local == 2) {
124276eb9c95SDavid Francis 			DRM_INFO_ONCE("Using MTYPE_CC for local memory\n");
1243b9cbd510SGraham Sider 			mtype_local = MTYPE_CC;
1244b9cbd510SGraham Sider 		} else {
1245b9cbd510SGraham Sider 			DRM_INFO_ONCE("Using MTYPE_RW for local memory\n");
124676eb9c95SDavid Francis 		}
12471e4a0033SFelix Kuehling 		is_local = (!is_vram && (adev->flags & AMD_IS_APU) &&
12481e4a0033SFelix Kuehling 			    num_possible_nodes() <= 1) ||
1249dc12f9edSPhilip Yang 			   (is_vram && adev == bo_adev &&
12503ebfd221SPhilip Yang 			    KFD_XCP_MEM_ID(adev, bo->xcp_id) == vm->mem_id);
1251753b999aSRajneesh Bhardwaj 		snoop = true;
1252753b999aSRajneesh Bhardwaj 		if (uncached) {
1253753b999aSRajneesh Bhardwaj 			mtype = MTYPE_UC;
1254753b999aSRajneesh Bhardwaj 		} else if (adev->flags & AMD_IS_APU) {
12551e4a0033SFelix Kuehling 			mtype = is_local ? mtype_local : MTYPE_NC;
1256753b999aSRajneesh Bhardwaj 		} else {
1257753b999aSRajneesh Bhardwaj 			/* dGPU */
12581e4a0033SFelix Kuehling 			if (is_local)
12591e4a0033SFelix Kuehling 				mtype = mtype_local;
1260d839a158SGraham Sider 			else if (is_vram)
1261753b999aSRajneesh Bhardwaj 				mtype = MTYPE_NC;
1262d839a158SGraham Sider 			else
1263d839a158SGraham Sider 				mtype = MTYPE_UC;
1264753b999aSRajneesh Bhardwaj 		}
1265753b999aSRajneesh Bhardwaj 
1266753b999aSRajneesh Bhardwaj 		break;
1267d1a372afSFelix Kuehling 	default:
1268d1a372afSFelix Kuehling 		if (uncached || coherent)
1269d1a372afSFelix Kuehling 			mtype = MTYPE_UC;
1270d1a372afSFelix Kuehling 		else
1271d1a372afSFelix Kuehling 			mtype = MTYPE_NC;
1272d1a372afSFelix Kuehling 
1273d1a372afSFelix Kuehling 		/* FIXME: is this still needed? Or does
1274d1a372afSFelix Kuehling 		 * amdgpu_ttm_tt_pde_flags already handle this?
1275d1a372afSFelix Kuehling 		 */
1276d1a372afSFelix Kuehling 		if (!is_vram)
1277d1a372afSFelix Kuehling 			snoop = true;
1278d1a372afSFelix Kuehling 	}
1279d1a372afSFelix Kuehling 
1280d1a372afSFelix Kuehling 	if (mtype != MTYPE_NC)
1281d1a372afSFelix Kuehling 		*flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) |
1282d1a372afSFelix Kuehling 			 AMDGPU_PTE_MTYPE_VG10(mtype);
1283d1a372afSFelix Kuehling 	*flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
1284d1a372afSFelix Kuehling }
1285d1a372afSFelix Kuehling 
gmc_v9_0_get_vm_pte(struct amdgpu_device * adev,struct amdgpu_bo_va_mapping * mapping,uint64_t * flags)1286cbfae36cSChristian König static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
1287cbfae36cSChristian König 				struct amdgpu_bo_va_mapping *mapping,
1288cbfae36cSChristian König 				uint64_t *flags)
1289cbfae36cSChristian König {
12909c3db58bSChristian König 	struct amdgpu_bo *bo = mapping->bo_va->base.bo;
12919c3db58bSChristian König 
1292cbfae36cSChristian König 	*flags &= ~AMDGPU_PTE_EXECUTABLE;
1293cbfae36cSChristian König 	*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1294cbfae36cSChristian König 
1295cbfae36cSChristian König 	*flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
1296cbfae36cSChristian König 	*flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
1297cbfae36cSChristian König 
1298cbfae36cSChristian König 	if (mapping->flags & AMDGPU_PTE_PRT) {
1299cbfae36cSChristian König 		*flags |= AMDGPU_PTE_PRT;
1300cbfae36cSChristian König 		*flags &= ~AMDGPU_PTE_VALID;
1301cbfae36cSChristian König 	}
1302cbfae36cSChristian König 
13039c3db58bSChristian König 	if (bo && bo->tbo.resource)
1304d1a372afSFelix Kuehling 		gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.bo,
1305d1a372afSFelix Kuehling 					     mapping, flags);
1306cbfae36cSChristian König }
1307cbfae36cSChristian König 
gmc_v9_0_override_vm_pte_flags(struct amdgpu_device * adev,struct amdgpu_vm * vm,uint64_t addr,uint64_t * flags)1308352b919cSFelix Kuehling static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
1309352b919cSFelix Kuehling 					   struct amdgpu_vm *vm,
1310352b919cSFelix Kuehling 					   uint64_t addr, uint64_t *flags)
1311352b919cSFelix Kuehling {
1312352b919cSFelix Kuehling 	int local_node, nid;
1313352b919cSFelix Kuehling 
1314352b919cSFelix Kuehling 	/* Only GFX 9.4.3 APUs associate GPUs with NUMA nodes. Local system
1315352b919cSFelix Kuehling 	 * memory can use more efficient MTYPEs.
1316352b919cSFelix Kuehling 	 */
1317352b919cSFelix Kuehling 	if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3))
1318352b919cSFelix Kuehling 		return;
1319352b919cSFelix Kuehling 
1320352b919cSFelix Kuehling 	/* Only direct-mapped memory allows us to determine the NUMA node from
1321352b919cSFelix Kuehling 	 * the DMA address.
1322352b919cSFelix Kuehling 	 */
1323352b919cSFelix Kuehling 	if (!adev->ram_is_direct_mapped) {
1324352b919cSFelix Kuehling 		dev_dbg(adev->dev, "RAM is not direct mapped\n");
1325352b919cSFelix Kuehling 		return;
1326352b919cSFelix Kuehling 	}
1327352b919cSFelix Kuehling 
1328352b919cSFelix Kuehling 	/* Only override mappings with MTYPE_NC, which is the safe default for
1329352b919cSFelix Kuehling 	 * cacheable memory.
1330352b919cSFelix Kuehling 	 */
1331352b919cSFelix Kuehling 	if ((*flags & AMDGPU_PTE_MTYPE_VG10_MASK) !=
1332352b919cSFelix Kuehling 	    AMDGPU_PTE_MTYPE_VG10(MTYPE_NC)) {
1333352b919cSFelix Kuehling 		dev_dbg(adev->dev, "MTYPE is not NC\n");
1334352b919cSFelix Kuehling 		return;
1335352b919cSFelix Kuehling 	}
1336352b919cSFelix Kuehling 
1337dc12f9edSPhilip Yang 	/* FIXME: Only supported on native mode for now. For carve-out, the
1338352b919cSFelix Kuehling 	 * NUMA affinity of the GPU/VM needs to come from the PCI info because
1339352b919cSFelix Kuehling 	 * memory partitions are not associated with different NUMA nodes.
1340352b919cSFelix Kuehling 	 */
1341dc12f9edSPhilip Yang 	if (adev->gmc.is_app_apu && vm->mem_id >= 0) {
1342dc12f9edSPhilip Yang 		local_node = adev->gmc.mem_partitions[vm->mem_id].numa.node;
1343352b919cSFelix Kuehling 	} else {
1344352b919cSFelix Kuehling 		dev_dbg(adev->dev, "Only native mode APU is supported.\n");
1345352b919cSFelix Kuehling 		return;
1346352b919cSFelix Kuehling 	}
1347352b919cSFelix Kuehling 
1348352b919cSFelix Kuehling 	/* Only handle real RAM. Mappings of PCIe resources don't have struct
1349352b919cSFelix Kuehling 	 * page or NUMA nodes.
1350352b919cSFelix Kuehling 	 */
1351352b919cSFelix Kuehling 	if (!page_is_ram(addr >> PAGE_SHIFT)) {
1352352b919cSFelix Kuehling 		dev_dbg(adev->dev, "Page is not RAM.\n");
1353352b919cSFelix Kuehling 		return;
1354352b919cSFelix Kuehling 	}
1355352b919cSFelix Kuehling 	nid = pfn_to_nid(addr >> PAGE_SHIFT);
1356352b919cSFelix Kuehling 	dev_dbg(adev->dev, "vm->mem_id=%d, local_node=%d, nid=%d\n",
1357dc12f9edSPhilip Yang 		vm->mem_id, local_node, nid);
1358352b919cSFelix Kuehling 	if (nid == local_node) {
1359352b919cSFelix Kuehling 		uint64_t old_flags = *flags;
1360b9cbd510SGraham Sider 		unsigned int mtype_local = MTYPE_RW;
136176eb9c95SDavid Francis 
136276eb9c95SDavid Francis 		if (amdgpu_mtype_local == 1)
136376eb9c95SDavid Francis 			mtype_local = MTYPE_NC;
136476eb9c95SDavid Francis 		else if (amdgpu_mtype_local == 2)
1365b9cbd510SGraham Sider 			mtype_local = MTYPE_CC;
1366352b919cSFelix Kuehling 
1367352b919cSFelix Kuehling 		*flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) |
1368352b919cSFelix Kuehling 			 AMDGPU_PTE_MTYPE_VG10(mtype_local);
1369352b919cSFelix Kuehling 		dev_dbg(adev->dev, "flags updated from %llx to %llx\n",
1370352b919cSFelix Kuehling 			old_flags, *flags);
1371352b919cSFelix Kuehling 	}
1372352b919cSFelix Kuehling }
1373352b919cSFelix Kuehling 
gmc_v9_0_get_vbios_fb_size(struct amdgpu_device * adev)137408e85215SSrinivasan Shanmugam static unsigned int gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
13757b885f0eSAlex Deucher {
13767b885f0eSAlex Deucher 	u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
137708e85215SSrinivasan Shanmugam 	unsigned int size;
13787b885f0eSAlex Deucher 
1379dc5d4affSHarry Wentland 	/* TODO move to DC so GMC doesn't need to hard-code DCN registers */
1380dc5d4affSHarry Wentland 
13817b885f0eSAlex Deucher 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
13827b885f0eSAlex Deucher 		size = AMDGPU_VBIOS_VGA_ALLOCATION;
13837b885f0eSAlex Deucher 	} else {
13847b885f0eSAlex Deucher 		u32 viewport;
13857b885f0eSAlex Deucher 
1386630e959fSAlex Deucher 		switch (adev->ip_versions[DCE_HWIP][0]) {
1387630e959fSAlex Deucher 		case IP_VERSION(1, 0, 0):
1388630e959fSAlex Deucher 		case IP_VERSION(1, 0, 1):
13897b885f0eSAlex Deucher 			viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
13907b885f0eSAlex Deucher 			size = (REG_GET_FIELD(viewport,
13917b885f0eSAlex Deucher 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
13927b885f0eSAlex Deucher 				REG_GET_FIELD(viewport,
13937b885f0eSAlex Deucher 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
13947b885f0eSAlex Deucher 				4);
13957b885f0eSAlex Deucher 			break;
1396dc5d4affSHarry Wentland 		case IP_VERSION(2, 1, 0):
1397dc5d4affSHarry Wentland 			viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2);
1398dc5d4affSHarry Wentland 			size = (REG_GET_FIELD(viewport,
1399dc5d4affSHarry Wentland 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
1400dc5d4affSHarry Wentland 				REG_GET_FIELD(viewport,
1401dc5d4affSHarry Wentland 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
1402dc5d4affSHarry Wentland 				4);
1403dc5d4affSHarry Wentland 			break;
14047b885f0eSAlex Deucher 		default:
14057b885f0eSAlex Deucher 			viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
14067b885f0eSAlex Deucher 			size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
14077b885f0eSAlex Deucher 				REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
14087b885f0eSAlex Deucher 				4);
14097b885f0eSAlex Deucher 			break;
14107b885f0eSAlex Deucher 		}
14117b885f0eSAlex Deucher 	}
14127b885f0eSAlex Deucher 
14137b885f0eSAlex Deucher 	return size;
14147b885f0eSAlex Deucher }
14157b885f0eSAlex Deucher 
1416b6f90baaSLijo Lazar static enum amdgpu_memory_partition
gmc_v9_0_get_memory_partition(struct amdgpu_device * adev,u32 * supp_modes)14170f2e1d62SLijo Lazar gmc_v9_0_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes)
1418b6f90baaSLijo Lazar {
1419b6f90baaSLijo Lazar 	enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE;
1420b6f90baaSLijo Lazar 
1421b6f90baaSLijo Lazar 	if (adev->nbio.funcs->get_memory_partition_mode)
14220f2e1d62SLijo Lazar 		mode = adev->nbio.funcs->get_memory_partition_mode(adev,
14230f2e1d62SLijo Lazar 								   supp_modes);
1424b6f90baaSLijo Lazar 
1425b6f90baaSLijo Lazar 	return mode;
1426b6f90baaSLijo Lazar }
1427b6f90baaSLijo Lazar 
14280f2e1d62SLijo Lazar static enum amdgpu_memory_partition
gmc_v9_0_query_memory_partition(struct amdgpu_device * adev)14290f2e1d62SLijo Lazar gmc_v9_0_query_memory_partition(struct amdgpu_device *adev)
14300f2e1d62SLijo Lazar {
143146f7b4deSGavin Wan 	if (amdgpu_sriov_vf(adev))
143246f7b4deSGavin Wan 		return AMDGPU_NPS1_PARTITION_MODE;
143346f7b4deSGavin Wan 
14340f2e1d62SLijo Lazar 	return gmc_v9_0_get_memory_partition(adev, NULL);
14350f2e1d62SLijo Lazar }
14360f2e1d62SLijo Lazar 
1437132f34e4SChristian König static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
1438132f34e4SChristian König 	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
1439ea930000SAlex Sierra 	.flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
14409096d6e5SChristian König 	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
1441c633c00bSChristian König 	.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
144271776b6dSChristian König 	.map_mtype = gmc_v9_0_map_mtype,
1443cbfae36cSChristian König 	.get_vm_pde = gmc_v9_0_get_vm_pde,
14447b885f0eSAlex Deucher 	.get_vm_pte = gmc_v9_0_get_vm_pte,
1445352b919cSFelix Kuehling 	.override_vm_pte_flags = gmc_v9_0_override_vm_pte_flags,
14467b885f0eSAlex Deucher 	.get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
1447b6f90baaSLijo Lazar 	.query_mem_partition_mode = &gmc_v9_0_query_memory_partition,
1448e60f8db5SAlex Xie };
1449e60f8db5SAlex Xie 
gmc_v9_0_set_gmc_funcs(struct amdgpu_device * adev)1450132f34e4SChristian König static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
1451e60f8db5SAlex Xie {
1452132f34e4SChristian König 	adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
1453e60f8db5SAlex Xie }
1454e60f8db5SAlex Xie 
gmc_v9_0_set_umc_funcs(struct amdgpu_device * adev)14555b6b35aaSHawking Zhang static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
14565b6b35aaSHawking Zhang {
1457630e959fSAlex Deucher 	switch (adev->ip_versions[UMC_HWIP][0]) {
1458630e959fSAlex Deucher 	case IP_VERSION(6, 0, 0):
1459e7da754bSMonk Liu 		adev->umc.funcs = &umc_v6_0_funcs;
1460e7da754bSMonk Liu 		break;
1461630e959fSAlex Deucher 	case IP_VERSION(6, 1, 1):
14623aacf4eaSTao Zhou 		adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
14633aacf4eaSTao Zhou 		adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
14643aacf4eaSTao Zhou 		adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
14654cf781c2SJohn Clements 		adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
1466e69c7857STao Zhou 		adev->umc.retire_unit = 1;
14674cf781c2SJohn Clements 		adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1468efe17d5aSyipechai 		adev->umc.ras = &umc_v6_1_ras;
14694cf781c2SJohn Clements 		break;
1470630e959fSAlex Deucher 	case IP_VERSION(6, 1, 2):
14713aacf4eaSTao Zhou 		adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
14723aacf4eaSTao Zhou 		adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
14733aacf4eaSTao Zhou 		adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
14744cf781c2SJohn Clements 		adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
1475e69c7857STao Zhou 		adev->umc.retire_unit = 1;
14763aacf4eaSTao Zhou 		adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1477efe17d5aSyipechai 		adev->umc.ras = &umc_v6_1_ras;
14785b6b35aaSHawking Zhang 		break;
1479630e959fSAlex Deucher 	case IP_VERSION(6, 7, 0):
1480498d46feSTao Zhou 		adev->umc.max_ras_err_cnt_per_query =
1481498d46feSTao Zhou 			UMC_V6_7_TOTAL_CHANNEL_NUM * UMC_V6_7_BAD_PAGE_NUM_PER_CHANNEL;
1482719e433eSMukul Joshi 		adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM;
1483719e433eSMukul Joshi 		adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM;
1484186c8a85SJohn Clements 		adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET;
1485e69c7857STao Zhou 		adev->umc.retire_unit = (UMC_V6_7_NA_MAP_PA_NUM * 2);
1486186c8a85SJohn Clements 		if (!adev->gmc.xgmi.connected_to_cpu)
1487efe17d5aSyipechai 			adev->umc.ras = &umc_v6_7_ras;
1488186c8a85SJohn Clements 		if (1 & adev->smuio.funcs->get_die_id(adev))
1489186c8a85SJohn Clements 			adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_first[0][0];
1490186c8a85SJohn Clements 		else
1491186c8a85SJohn Clements 			adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_second[0][0];
1492186c8a85SJohn Clements 		break;
14935b6b35aaSHawking Zhang 	default:
14945b6b35aaSHawking Zhang 		break;
14955b6b35aaSHawking Zhang 	}
14965b6b35aaSHawking Zhang }
14975b6b35aaSHawking Zhang 
gmc_v9_0_set_mmhub_funcs(struct amdgpu_device * adev)14983d093da0STao Zhou static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
14993d093da0STao Zhou {
1500630e959fSAlex Deucher 	switch (adev->ip_versions[MMHUB_HWIP][0]) {
1501630e959fSAlex Deucher 	case IP_VERSION(9, 4, 1):
1502f6c3623bSDennis Li 		adev->mmhub.funcs = &mmhub_v9_4_funcs;
1503f6c3623bSDennis Li 		break;
1504630e959fSAlex Deucher 	case IP_VERSION(9, 4, 2):
15054da999cdSOak Zeng 		adev->mmhub.funcs = &mmhub_v1_7_funcs;
15064da999cdSOak Zeng 		break;
1507018f7300SLe Ma 	case IP_VERSION(1, 8, 0):
1508018f7300SLe Ma 		adev->mmhub.funcs = &mmhub_v1_8_funcs;
1509018f7300SLe Ma 		break;
15103d093da0STao Zhou 	default:
15119fb1506eSOak Zeng 		adev->mmhub.funcs = &mmhub_v1_0_funcs;
15123d093da0STao Zhou 		break;
15133d093da0STao Zhou 	}
15143d093da0STao Zhou }
15153d093da0STao Zhou 
gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device * adev)1516d844c6d7SHawking Zhang static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
1517d844c6d7SHawking Zhang {
1518630e959fSAlex Deucher 	switch (adev->ip_versions[MMHUB_HWIP][0]) {
1519630e959fSAlex Deucher 	case IP_VERSION(9, 4, 0):
15205e67bba3Syipechai 		adev->mmhub.ras = &mmhub_v1_0_ras;
1521d844c6d7SHawking Zhang 		break;
1522630e959fSAlex Deucher 	case IP_VERSION(9, 4, 1):
15235e67bba3Syipechai 		adev->mmhub.ras = &mmhub_v9_4_ras;
1524d844c6d7SHawking Zhang 		break;
1525630e959fSAlex Deucher 	case IP_VERSION(9, 4, 2):
15265e67bba3Syipechai 		adev->mmhub.ras = &mmhub_v1_7_ras;
1527d844c6d7SHawking Zhang 		break;
152873c2b3fdSHawking Zhang 	case IP_VERSION(1, 8, 0):
152973c2b3fdSHawking Zhang 		adev->mmhub.ras = &mmhub_v1_8_ras;
153073c2b3fdSHawking Zhang 		break;
1531d844c6d7SHawking Zhang 	default:
1532d844c6d7SHawking Zhang 		/* mmhub ras is not available */
1533d844c6d7SHawking Zhang 		break;
1534d844c6d7SHawking Zhang 	}
1535d844c6d7SHawking Zhang }
1536d844c6d7SHawking Zhang 
gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device * adev)15378ffff9b4SOak Zeng static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
15388ffff9b4SOak Zeng {
1539018f7300SLe Ma 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
1540018f7300SLe Ma 		adev->gfxhub.funcs = &gfxhub_v1_2_funcs;
1541018f7300SLe Ma 	else
15428ffff9b4SOak Zeng 		adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
15438ffff9b4SOak Zeng }
15448ffff9b4SOak Zeng 
gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device * adev)15456f12507fSHawking Zhang static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev)
15466f12507fSHawking Zhang {
15476d76e904Syipechai 	adev->hdp.ras = &hdp_v4_0_ras;
15486f12507fSHawking Zhang }
15496f12507fSHawking Zhang 
gmc_v9_0_set_mca_ras_funcs(struct amdgpu_device * adev)15507f544c54SHawking Zhang static void gmc_v9_0_set_mca_ras_funcs(struct amdgpu_device *adev)
15513907c492SJohn Clements {
15527f544c54SHawking Zhang 	struct amdgpu_mca *mca = &adev->mca;
15537f544c54SHawking Zhang 
1554630e959fSAlex Deucher 	/* is UMC the right IP to check for MCA?  Maybe DF? */
1555630e959fSAlex Deucher 	switch (adev->ip_versions[UMC_HWIP][0]) {
1556630e959fSAlex Deucher 	case IP_VERSION(6, 7, 0):
15577f544c54SHawking Zhang 		if (!adev->gmc.xgmi.connected_to_cpu) {
15587f544c54SHawking Zhang 			mca->mp0.ras = &mca_v3_0_mp0_ras;
15597f544c54SHawking Zhang 			mca->mp1.ras = &mca_v3_0_mp1_ras;
15607f544c54SHawking Zhang 			mca->mpio.ras = &mca_v3_0_mpio_ras;
15617f544c54SHawking Zhang 		}
15623907c492SJohn Clements 		break;
15633907c492SJohn Clements 	default:
15643907c492SJohn Clements 		break;
15653907c492SJohn Clements 	}
15663907c492SJohn Clements }
15673907c492SJohn Clements 
gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device * adev)1568da9d669eSHawking Zhang static void gmc_v9_0_set_xgmi_ras_funcs(struct amdgpu_device *adev)
1569da9d669eSHawking Zhang {
1570da9d669eSHawking Zhang 	if (!adev->gmc.xgmi.connected_to_cpu)
1571da9d669eSHawking Zhang 		adev->gmc.xgmi.ras = &xgmi_ras;
1572da9d669eSHawking Zhang }
1573da9d669eSHawking Zhang 
gmc_v9_0_early_init(void * handle)1574e60f8db5SAlex Xie static int gmc_v9_0_early_init(void *handle)
1575e60f8db5SAlex Xie {
1576e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1577e60f8db5SAlex Xie 
15789eb7681fSShiwu Zhang 	/*
15799eb7681fSShiwu Zhang 	 * 9.4.0, 9.4.1 and 9.4.3 don't have XGMI defined
15809eb7681fSShiwu Zhang 	 * in their IP discovery tables
15819eb7681fSShiwu Zhang 	 */
15829eb7681fSShiwu Zhang 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0) ||
15839eb7681fSShiwu Zhang 	    adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
15849eb7681fSShiwu Zhang 	    adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
158531691b8dSRajneesh Bhardwaj 		adev->gmc.xgmi.supported = true;
158631691b8dSRajneesh Bhardwaj 
1587630e959fSAlex Deucher 	if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(6, 1, 0)) {
158831691b8dSRajneesh Bhardwaj 		adev->gmc.xgmi.supported = true;
158931691b8dSRajneesh Bhardwaj 		adev->gmc.xgmi.connected_to_cpu =
159031691b8dSRajneesh Bhardwaj 			adev->smuio.funcs->is_host_gpu_xgmi_supported(adev);
159131691b8dSRajneesh Bhardwaj 	}
159231691b8dSRajneesh Bhardwaj 
1593497db7eaSRajneesh Bhardwaj 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) {
1594497db7eaSRajneesh Bhardwaj 		enum amdgpu_pkg_type pkg_type =
1595497db7eaSRajneesh Bhardwaj 			adev->smuio.funcs->get_pkg_type(adev);
1596497db7eaSRajneesh Bhardwaj 		/* On GFXIP 9.4.3. APU, there is no physical VRAM domain present
1597497db7eaSRajneesh Bhardwaj 		 * and the APU, can be in used two possible modes:
1598497db7eaSRajneesh Bhardwaj 		 *  - carveout mode
1599497db7eaSRajneesh Bhardwaj 		 *  - native APU mode
1600497db7eaSRajneesh Bhardwaj 		 * "is_app_apu" can be used to identify the APU in the native
1601497db7eaSRajneesh Bhardwaj 		 * mode.
1602497db7eaSRajneesh Bhardwaj 		 */
1603497db7eaSRajneesh Bhardwaj 		adev->gmc.is_app_apu = (pkg_type == AMDGPU_PKG_TYPE_APU &&
1604497db7eaSRajneesh Bhardwaj 					!pci_resource_len(adev->pdev, 0));
1605497db7eaSRajneesh Bhardwaj 	}
1606497db7eaSRajneesh Bhardwaj 
160749070c4eSHawking Zhang 	gmc_v9_0_set_gmc_funcs(adev);
160849070c4eSHawking Zhang 	gmc_v9_0_set_irq_funcs(adev);
160949070c4eSHawking Zhang 	gmc_v9_0_set_umc_funcs(adev);
161049070c4eSHawking Zhang 	gmc_v9_0_set_mmhub_funcs(adev);
1611d844c6d7SHawking Zhang 	gmc_v9_0_set_mmhub_ras_funcs(adev);
161249070c4eSHawking Zhang 	gmc_v9_0_set_gfxhub_funcs(adev);
16136f12507fSHawking Zhang 	gmc_v9_0_set_hdp_ras_funcs(adev);
16147f544c54SHawking Zhang 	gmc_v9_0_set_mca_ras_funcs(adev);
1615da9d669eSHawking Zhang 	gmc_v9_0_set_xgmi_ras_funcs(adev);
161649070c4eSHawking Zhang 
1617770d13b1SChristian König 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1618770d13b1SChristian König 	adev->gmc.shared_aperture_end =
1619770d13b1SChristian König 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1620bfa8eea2SFlora Cui 	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
1621770d13b1SChristian König 	adev->gmc.private_aperture_end =
1622770d13b1SChristian König 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1623e77673d1SMukul Joshi 	adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF;
1624a7ea6548SAlex Deucher 
1625e60f8db5SAlex Xie 	return 0;
1626e60f8db5SAlex Xie }
1627e60f8db5SAlex Xie 
gmc_v9_0_late_init(void * handle)1628e60f8db5SAlex Xie static int gmc_v9_0_late_init(void *handle)
1629e60f8db5SAlex Xie {
1630e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1631c5b6e585STao Zhou 	int r;
16324789c463SChristian König 
1633bdbe90f0SAlex Deucher 	r = amdgpu_gmc_allocate_vm_inv_eng(adev);
1634c713a461SEvan Quan 	if (r)
1635c713a461SEvan Quan 		return r;
16364a20300bSGuchun Chen 
16374a20300bSGuchun Chen 	/*
16384a20300bSGuchun Chen 	 * Workaround performance drop issue with VBIOS enables partial
16394a20300bSGuchun Chen 	 * writes, while disables HBM ECC for vega10.
16404a20300bSGuchun Chen 	 */
1641630e959fSAlex Deucher 	if (!amdgpu_sriov_vf(adev) &&
1642630e959fSAlex Deucher 	    (adev->ip_versions[UMC_HWIP][0] == IP_VERSION(6, 0, 0))) {
16438ab0d6f0SLuben Tuikov 		if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) {
1644cace4bffSHawking Zhang 			if (adev->df.funcs &&
1645cace4bffSHawking Zhang 			    adev->df.funcs->enable_ecc_force_par_wr_rmw)
1646bdf84a80SJoseph Greathouse 				adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
16474a20300bSGuchun Chen 		}
1648f49ea9f8SHawking Zhang 	}
164902bab923SDavid Panariti 
16508f6368a9SJohn Clements 	if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
16515e67bba3Syipechai 		if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops &&
16525e67bba3Syipechai 		    adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count)
16535e67bba3Syipechai 			adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev);
1654fe5211f1SHawking Zhang 
16556d76e904Syipechai 		if (adev->hdp.ras && adev->hdp.ras->ras_block.hw_ops &&
16566d76e904Syipechai 		    adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count)
16576d76e904Syipechai 			adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count(adev);
16588f6368a9SJohn Clements 	}
165978871b6cSHawking Zhang 
1660ba083492STao Zhou 	r = amdgpu_gmc_ras_late_init(adev);
1661791c4769Sxinhui pan 	if (r)
1662e60f8db5SAlex Xie 		return r;
1663e60f8db5SAlex Xie 
1664770d13b1SChristian König 	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1665e60f8db5SAlex Xie }
1666e60f8db5SAlex Xie 
gmc_v9_0_vram_gtt_location(struct amdgpu_device * adev,struct amdgpu_gmc * mc)1667e60f8db5SAlex Xie static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
1668770d13b1SChristian König 					struct amdgpu_gmc *mc)
1669e60f8db5SAlex Xie {
1670adbe2e3dSZhigang Luo 	u64 base = adev->mmhub.funcs->get_fb_location(adev);
16719d4f837aSFrank.Min 
16726fdd68b1SAlex Deucher 	/* add the xgmi offset of the physical node */
16736fdd68b1SAlex Deucher 	base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1674f527f310SOak Zeng 	if (adev->gmc.xgmi.connected_to_cpu) {
1675f527f310SOak Zeng 		amdgpu_gmc_sysvm_location(adev, mc);
1676f527f310SOak Zeng 	} else {
167783afe835SOak Zeng 		amdgpu_gmc_vram_location(adev, mc, base);
1678961c75cfSChristian König 		amdgpu_gmc_gart_location(adev, mc);
1679c3e1b43cSChristian König 		amdgpu_gmc_agp_location(adev, mc);
1680f527f310SOak Zeng 	}
1681e60f8db5SAlex Xie 	/* base offset of vram pages */
16828ffff9b4SOak Zeng 	adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
16836fdd68b1SAlex Deucher 
16846fdd68b1SAlex Deucher 	/* XXX: add the xgmi offset of the physical node? */
16856fdd68b1SAlex Deucher 	adev->vm_manager.vram_base_offset +=
16866fdd68b1SAlex Deucher 		adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1687e60f8db5SAlex Xie }
1688e60f8db5SAlex Xie 
1689e60f8db5SAlex Xie /**
1690e60f8db5SAlex Xie  * gmc_v9_0_mc_init - initialize the memory controller driver params
1691e60f8db5SAlex Xie  *
1692e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
1693e60f8db5SAlex Xie  *
1694e60f8db5SAlex Xie  * Look up the amount of vram, vram width, and decide how to place
1695e60f8db5SAlex Xie  * vram and gart within the GPU's physical address space.
1696e60f8db5SAlex Xie  * Returns 0 for success.
1697e60f8db5SAlex Xie  */
gmc_v9_0_mc_init(struct amdgpu_device * adev)1698e60f8db5SAlex Xie static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
1699e60f8db5SAlex Xie {
1700e60f8db5SAlex Xie 	int r;
1701e60f8db5SAlex Xie 
1702e60f8db5SAlex Xie 	/* size in MB on si */
1703228ce176SRajneesh Bhardwaj 	if (!adev->gmc.is_app_apu) {
1704770d13b1SChristian König 		adev->gmc.mc_vram_size =
1705bebc0762SHawking Zhang 			adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
1706228ce176SRajneesh Bhardwaj 	} else {
1707228ce176SRajneesh Bhardwaj 		DRM_DEBUG("Set mc_vram_size = 0 for APP APU\n");
1708228ce176SRajneesh Bhardwaj 		adev->gmc.mc_vram_size = 0;
1709228ce176SRajneesh Bhardwaj 	}
1710770d13b1SChristian König 	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
1711e60f8db5SAlex Xie 
1712be566196SOak Zeng 	if (!(adev->flags & AMD_IS_APU) &&
1713be566196SOak Zeng 	    !adev->gmc.xgmi.connected_to_cpu) {
1714e60f8db5SAlex Xie 		r = amdgpu_device_resize_fb_bar(adev);
1715e60f8db5SAlex Xie 		if (r)
1716e60f8db5SAlex Xie 			return r;
1717e60f8db5SAlex Xie 	}
1718770d13b1SChristian König 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
1719770d13b1SChristian König 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
1720e60f8db5SAlex Xie 
1721156a81beSChunming Zhou #ifdef CONFIG_X86_64
172231691b8dSRajneesh Bhardwaj 	/*
172331691b8dSRajneesh Bhardwaj 	 * AMD Accelerated Processing Platform (APP) supporting GPU-HOST xgmi
172431691b8dSRajneesh Bhardwaj 	 * interface can use VRAM through here as it appears system reserved
172531691b8dSRajneesh Bhardwaj 	 * memory in host address space.
172631691b8dSRajneesh Bhardwaj 	 *
172731691b8dSRajneesh Bhardwaj 	 * For APUs, VRAM is just the stolen system memory and can be accessed
172831691b8dSRajneesh Bhardwaj 	 * directly.
172931691b8dSRajneesh Bhardwaj 	 *
173031691b8dSRajneesh Bhardwaj 	 * Otherwise, use the legacy Host Data Path (HDP) through PCIe BAR.
173131691b8dSRajneesh Bhardwaj 	 */
173231691b8dSRajneesh Bhardwaj 
173331691b8dSRajneesh Bhardwaj 	/* check whether both host-gpu and gpu-gpu xgmi links exist */
1734b0a3bbf9SGavin Wan 	if ((!amdgpu_sriov_vf(adev) &&
1735b0a3bbf9SGavin Wan 		(adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) ||
17363de60d96SHawking Zhang 	    (adev->gmc.xgmi.supported &&
17373de60d96SHawking Zhang 	     adev->gmc.xgmi.connected_to_cpu)) {
17383de60d96SHawking Zhang 		adev->gmc.aper_base =
17393de60d96SHawking Zhang 			adev->gfxhub.funcs->get_mc_fb_offset(adev) +
17403de60d96SHawking Zhang 			adev->gmc.xgmi.physical_node_id *
174131691b8dSRajneesh Bhardwaj 			adev->gmc.xgmi.node_segment_size;
1742156a81beSChunming Zhou 		adev->gmc.aper_size = adev->gmc.real_vram_size;
1743156a81beSChunming Zhou 	}
174431691b8dSRajneesh Bhardwaj 
1745156a81beSChunming Zhou #endif
1746770d13b1SChristian König 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
1747e60f8db5SAlex Xie 
1748e60f8db5SAlex Xie 	/* set the gart size */
1749e60f8db5SAlex Xie 	if (amdgpu_gart_size == -1) {
1750630e959fSAlex Deucher 		switch (adev->ip_versions[GC_HWIP][0]) {
1751630e959fSAlex Deucher 		case IP_VERSION(9, 0, 1):  /* all engines support GPUVM */
1752630e959fSAlex Deucher 		case IP_VERSION(9, 2, 1):  /* all engines support GPUVM */
1753630e959fSAlex Deucher 		case IP_VERSION(9, 4, 0):
1754630e959fSAlex Deucher 		case IP_VERSION(9, 4, 1):
1755630e959fSAlex Deucher 		case IP_VERSION(9, 4, 2):
1756ab1a157eSHawking Zhang 		case IP_VERSION(9, 4, 3):
1757e60f8db5SAlex Xie 		default:
1758fe19b862SMonk Liu 			adev->gmc.gart_size = 512ULL << 20;
1759e60f8db5SAlex Xie 			break;
1760630e959fSAlex Deucher 		case IP_VERSION(9, 1, 0):   /* DCE SG support */
1761630e959fSAlex Deucher 		case IP_VERSION(9, 2, 2):   /* DCE SG support */
1762630e959fSAlex Deucher 		case IP_VERSION(9, 3, 0):
1763770d13b1SChristian König 			adev->gmc.gart_size = 1024ULL << 20;
1764e60f8db5SAlex Xie 			break;
1765e60f8db5SAlex Xie 		}
1766e60f8db5SAlex Xie 	} else {
1767770d13b1SChristian König 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
1768e60f8db5SAlex Xie 	}
1769e60f8db5SAlex Xie 
1770f1dc12caSOak Zeng 	adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
1771f1dc12caSOak Zeng 
1772770d13b1SChristian König 	gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
1773e60f8db5SAlex Xie 
1774e60f8db5SAlex Xie 	return 0;
1775e60f8db5SAlex Xie }
1776e60f8db5SAlex Xie 
gmc_v9_0_gart_init(struct amdgpu_device * adev)1777e60f8db5SAlex Xie static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
1778e60f8db5SAlex Xie {
1779e60f8db5SAlex Xie 	int r;
1780e60f8db5SAlex Xie 
17811123b989SChristian König 	if (adev->gart.bo) {
1782e60f8db5SAlex Xie 		WARN(1, "VEGA10 PCIE GART already initialized\n");
1783e60f8db5SAlex Xie 		return 0;
1784e60f8db5SAlex Xie 	}
17857b454b3aSOak Zeng 
17867b454b3aSOak Zeng 	if (adev->gmc.xgmi.connected_to_cpu) {
17877b454b3aSOak Zeng 		adev->gmc.vmid0_page_table_depth = 1;
17887b454b3aSOak Zeng 		adev->gmc.vmid0_page_table_block_size = 12;
17897b454b3aSOak Zeng 	} else {
17907b454b3aSOak Zeng 		adev->gmc.vmid0_page_table_depth = 0;
17917b454b3aSOak Zeng 		adev->gmc.vmid0_page_table_block_size = 0;
17927b454b3aSOak Zeng 	}
17937b454b3aSOak Zeng 
1794e60f8db5SAlex Xie 	/* Initialize common gart structure */
1795e60f8db5SAlex Xie 	r = amdgpu_gart_init(adev);
1796e60f8db5SAlex Xie 	if (r)
1797e60f8db5SAlex Xie 		return r;
1798e60f8db5SAlex Xie 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
17997596ab68SHawking Zhang 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
1800e60f8db5SAlex Xie 				 AMDGPU_PTE_EXECUTABLE;
1801522510a6SOak Zeng 
1802c9a502e9SFelix Kuehling 	if (!adev->gmc.real_vram_size) {
1803c9a502e9SFelix Kuehling 		dev_info(adev->dev, "Put GART in system memory for APU\n");
1804c9a502e9SFelix Kuehling 		r = amdgpu_gart_table_ram_alloc(adev);
1805c9a502e9SFelix Kuehling 		if (r)
1806c9a502e9SFelix Kuehling 			dev_err(adev->dev, "Failed to allocate GART in system memory\n");
1807c9a502e9SFelix Kuehling 	} else {
1808522510a6SOak Zeng 		r = amdgpu_gart_table_vram_alloc(adev);
1809522510a6SOak Zeng 		if (r)
1810522510a6SOak Zeng 			return r;
1811522510a6SOak Zeng 
1812c9a502e9SFelix Kuehling 		if (adev->gmc.xgmi.connected_to_cpu)
1813522510a6SOak Zeng 			r = amdgpu_gmc_pdb0_alloc(adev);
1814522510a6SOak Zeng 	}
1815522510a6SOak Zeng 
1816522510a6SOak Zeng 	return r;
1817e60f8db5SAlex Xie }
1818e60f8db5SAlex Xie 
1819b0a2db9bSAlex Deucher /**
1820b0a2db9bSAlex Deucher  * gmc_v9_0_save_registers - saves regs
1821b0a2db9bSAlex Deucher  *
1822b0a2db9bSAlex Deucher  * @adev: amdgpu_device pointer
1823b0a2db9bSAlex Deucher  *
1824b0a2db9bSAlex Deucher  * This saves potential register values that should be
1825b0a2db9bSAlex Deucher  * restored upon resume
1826b0a2db9bSAlex Deucher  */
gmc_v9_0_save_registers(struct amdgpu_device * adev)1827b0a2db9bSAlex Deucher static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
1828ebdef28eSAlex Deucher {
1829630e959fSAlex Deucher 	if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
1830630e959fSAlex Deucher 	    (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1)))
1831b0a2db9bSAlex Deucher 		adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
1832ebdef28eSAlex Deucher }
1833ebdef28eSAlex Deucher 
gmc_v9_0_validate_partition_info(struct amdgpu_device * adev)1834a433f1f5SLijo Lazar static bool gmc_v9_0_validate_partition_info(struct amdgpu_device *adev)
1835a433f1f5SLijo Lazar {
1836a433f1f5SLijo Lazar 	enum amdgpu_memory_partition mode;
1837a433f1f5SLijo Lazar 	u32 supp_modes;
1838a433f1f5SLijo Lazar 	bool valid;
1839a433f1f5SLijo Lazar 
1840a433f1f5SLijo Lazar 	mode = gmc_v9_0_get_memory_partition(adev, &supp_modes);
1841a433f1f5SLijo Lazar 
1842a433f1f5SLijo Lazar 	/* Mode detected by hardware not present in supported modes */
1843a433f1f5SLijo Lazar 	if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) &&
1844a433f1f5SLijo Lazar 	    !(BIT(mode - 1) & supp_modes))
1845a433f1f5SLijo Lazar 		return false;
1846a433f1f5SLijo Lazar 
1847a433f1f5SLijo Lazar 	switch (mode) {
1848a433f1f5SLijo Lazar 	case UNKNOWN_MEMORY_PARTITION_MODE:
1849a433f1f5SLijo Lazar 	case AMDGPU_NPS1_PARTITION_MODE:
1850a433f1f5SLijo Lazar 		valid = (adev->gmc.num_mem_partitions == 1);
1851a433f1f5SLijo Lazar 		break;
1852a433f1f5SLijo Lazar 	case AMDGPU_NPS2_PARTITION_MODE:
1853a433f1f5SLijo Lazar 		valid = (adev->gmc.num_mem_partitions == 2);
1854a433f1f5SLijo Lazar 		break;
1855a433f1f5SLijo Lazar 	case AMDGPU_NPS4_PARTITION_MODE:
1856a433f1f5SLijo Lazar 		valid = (adev->gmc.num_mem_partitions == 3 ||
1857a433f1f5SLijo Lazar 			 adev->gmc.num_mem_partitions == 4);
1858a433f1f5SLijo Lazar 		break;
1859a433f1f5SLijo Lazar 	default:
1860a433f1f5SLijo Lazar 		valid = false;
1861a433f1f5SLijo Lazar 	}
1862a433f1f5SLijo Lazar 
1863a433f1f5SLijo Lazar 	return valid;
1864a433f1f5SLijo Lazar }
1865a433f1f5SLijo Lazar 
gmc_v9_0_is_node_present(int * node_ids,int num_ids,int nid)1866a433f1f5SLijo Lazar static bool gmc_v9_0_is_node_present(int *node_ids, int num_ids, int nid)
1867a433f1f5SLijo Lazar {
1868a433f1f5SLijo Lazar 	int i;
1869a433f1f5SLijo Lazar 
1870a433f1f5SLijo Lazar 	/* Check if node with id 'nid' is present in 'node_ids' array */
1871a433f1f5SLijo Lazar 	for (i = 0; i < num_ids; ++i)
1872a433f1f5SLijo Lazar 		if (node_ids[i] == nid)
1873a433f1f5SLijo Lazar 			return true;
1874a433f1f5SLijo Lazar 
1875a433f1f5SLijo Lazar 	return false;
1876a433f1f5SLijo Lazar }
1877a433f1f5SLijo Lazar 
1878a433f1f5SLijo Lazar static void
gmc_v9_0_init_acpi_mem_ranges(struct amdgpu_device * adev,struct amdgpu_mem_partition_info * mem_ranges)1879a433f1f5SLijo Lazar gmc_v9_0_init_acpi_mem_ranges(struct amdgpu_device *adev,
1880a433f1f5SLijo Lazar 			      struct amdgpu_mem_partition_info *mem_ranges)
1881a433f1f5SLijo Lazar {
1882a433f1f5SLijo Lazar 	int num_ranges = 0, ret, mem_groups;
1883a433f1f5SLijo Lazar 	struct amdgpu_numa_info numa_info;
1884a433f1f5SLijo Lazar 	int node_ids[MAX_MEM_RANGES];
1885a433f1f5SLijo Lazar 	int num_xcc, xcc_id;
1886a433f1f5SLijo Lazar 	uint32_t xcc_mask;
1887a433f1f5SLijo Lazar 
1888a433f1f5SLijo Lazar 	num_xcc = NUM_XCC(adev->gfx.xcc_mask);
1889a433f1f5SLijo Lazar 	xcc_mask = (1U << num_xcc) - 1;
1890a433f1f5SLijo Lazar 	mem_groups = hweight32(adev->aid_mask);
1891a433f1f5SLijo Lazar 
1892a433f1f5SLijo Lazar 	for_each_inst(xcc_id, xcc_mask)	{
1893a433f1f5SLijo Lazar 		ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
1894a433f1f5SLijo Lazar 		if (ret)
1895a433f1f5SLijo Lazar 			continue;
1896a433f1f5SLijo Lazar 
1897a433f1f5SLijo Lazar 		if (numa_info.nid == NUMA_NO_NODE) {
1898a433f1f5SLijo Lazar 			mem_ranges[0].size = numa_info.size;
1899a433f1f5SLijo Lazar 			mem_ranges[0].numa.node = numa_info.nid;
1900a433f1f5SLijo Lazar 			num_ranges = 1;
1901a433f1f5SLijo Lazar 			break;
1902a433f1f5SLijo Lazar 		}
1903a433f1f5SLijo Lazar 
1904a433f1f5SLijo Lazar 		if (gmc_v9_0_is_node_present(node_ids, num_ranges,
1905a433f1f5SLijo Lazar 					     numa_info.nid))
1906a433f1f5SLijo Lazar 			continue;
1907a433f1f5SLijo Lazar 
1908a433f1f5SLijo Lazar 		node_ids[num_ranges] = numa_info.nid;
1909a433f1f5SLijo Lazar 		mem_ranges[num_ranges].numa.node = numa_info.nid;
1910a433f1f5SLijo Lazar 		mem_ranges[num_ranges].size = numa_info.size;
1911a433f1f5SLijo Lazar 		++num_ranges;
1912a433f1f5SLijo Lazar 	}
1913a433f1f5SLijo Lazar 
1914a433f1f5SLijo Lazar 	adev->gmc.num_mem_partitions = num_ranges;
1915a433f1f5SLijo Lazar 
1916a433f1f5SLijo Lazar 	/* If there is only partition, don't use entire size */
191745b3a914SAlex Deucher 	if (adev->gmc.num_mem_partitions == 1) {
191845b3a914SAlex Deucher 		mem_ranges[0].size = mem_ranges[0].size * (mem_groups - 1);
191945b3a914SAlex Deucher 		do_div(mem_ranges[0].size, mem_groups);
192045b3a914SAlex Deucher 	}
1921a433f1f5SLijo Lazar }
1922a433f1f5SLijo Lazar 
1923a433f1f5SLijo Lazar static void
gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device * adev,struct amdgpu_mem_partition_info * mem_ranges)1924a433f1f5SLijo Lazar gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev,
1925a433f1f5SLijo Lazar 			    struct amdgpu_mem_partition_info *mem_ranges)
1926a433f1f5SLijo Lazar {
1927a433f1f5SLijo Lazar 	enum amdgpu_memory_partition mode;
1928a433f1f5SLijo Lazar 	u32 start_addr = 0, size;
1929a433f1f5SLijo Lazar 	int i;
1930a433f1f5SLijo Lazar 
1931a433f1f5SLijo Lazar 	mode = gmc_v9_0_query_memory_partition(adev);
1932a433f1f5SLijo Lazar 
1933a433f1f5SLijo Lazar 	switch (mode) {
1934a433f1f5SLijo Lazar 	case UNKNOWN_MEMORY_PARTITION_MODE:
1935a433f1f5SLijo Lazar 	case AMDGPU_NPS1_PARTITION_MODE:
1936a433f1f5SLijo Lazar 		adev->gmc.num_mem_partitions = 1;
1937a433f1f5SLijo Lazar 		break;
1938a433f1f5SLijo Lazar 	case AMDGPU_NPS2_PARTITION_MODE:
1939a433f1f5SLijo Lazar 		adev->gmc.num_mem_partitions = 2;
1940a433f1f5SLijo Lazar 		break;
1941a433f1f5SLijo Lazar 	case AMDGPU_NPS4_PARTITION_MODE:
1942a433f1f5SLijo Lazar 		if (adev->flags & AMD_IS_APU)
1943a433f1f5SLijo Lazar 			adev->gmc.num_mem_partitions = 3;
1944a433f1f5SLijo Lazar 		else
1945a433f1f5SLijo Lazar 			adev->gmc.num_mem_partitions = 4;
1946a433f1f5SLijo Lazar 		break;
1947a433f1f5SLijo Lazar 	default:
1948a433f1f5SLijo Lazar 		adev->gmc.num_mem_partitions = 1;
1949a433f1f5SLijo Lazar 		break;
1950a433f1f5SLijo Lazar 	}
1951a433f1f5SLijo Lazar 
195245b3a914SAlex Deucher 	size = adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT;
195345b3a914SAlex Deucher 	size /= adev->gmc.num_mem_partitions;
1954a433f1f5SLijo Lazar 
1955a433f1f5SLijo Lazar 	for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
1956a433f1f5SLijo Lazar 		mem_ranges[i].range.fpfn = start_addr;
1957a433f1f5SLijo Lazar 		mem_ranges[i].size = ((u64)size << AMDGPU_GPU_PAGE_SHIFT);
1958a433f1f5SLijo Lazar 		mem_ranges[i].range.lpfn = start_addr + size - 1;
1959a433f1f5SLijo Lazar 		start_addr += size;
1960a433f1f5SLijo Lazar 	}
1961a433f1f5SLijo Lazar 
1962a433f1f5SLijo Lazar 	/* Adjust the last one */
1963a433f1f5SLijo Lazar 	mem_ranges[adev->gmc.num_mem_partitions - 1].range.lpfn =
1964a433f1f5SLijo Lazar 		(adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1;
1965a433f1f5SLijo Lazar 	mem_ranges[adev->gmc.num_mem_partitions - 1].size =
1966a433f1f5SLijo Lazar 		adev->gmc.real_vram_size -
1967a433f1f5SLijo Lazar 		((u64)mem_ranges[adev->gmc.num_mem_partitions - 1].range.fpfn
1968a433f1f5SLijo Lazar 		 << AMDGPU_GPU_PAGE_SHIFT);
1969a433f1f5SLijo Lazar }
1970a433f1f5SLijo Lazar 
gmc_v9_0_init_mem_ranges(struct amdgpu_device * adev)1971a433f1f5SLijo Lazar static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev)
1972a433f1f5SLijo Lazar {
1973a433f1f5SLijo Lazar 	bool valid;
1974a433f1f5SLijo Lazar 
1975a433f1f5SLijo Lazar 	adev->gmc.mem_partitions = kzalloc(
1976a433f1f5SLijo Lazar 		MAX_MEM_RANGES * sizeof(struct amdgpu_mem_partition_info),
1977a433f1f5SLijo Lazar 		GFP_KERNEL);
1978a433f1f5SLijo Lazar 
1979a433f1f5SLijo Lazar 	if (!adev->gmc.mem_partitions)
1980a433f1f5SLijo Lazar 		return -ENOMEM;
1981a433f1f5SLijo Lazar 
1982a433f1f5SLijo Lazar 	/* TODO : Get the range from PSP/Discovery for dGPU */
1983a433f1f5SLijo Lazar 	if (adev->gmc.is_app_apu)
1984a433f1f5SLijo Lazar 		gmc_v9_0_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions);
1985a433f1f5SLijo Lazar 	else
1986a433f1f5SLijo Lazar 		gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
1987a433f1f5SLijo Lazar 
198846f7b4deSGavin Wan 	if (amdgpu_sriov_vf(adev))
198946f7b4deSGavin Wan 		valid = true;
199046f7b4deSGavin Wan 	else
1991a433f1f5SLijo Lazar 		valid = gmc_v9_0_validate_partition_info(adev);
1992a433f1f5SLijo Lazar 	if (!valid) {
1993a433f1f5SLijo Lazar 		/* TODO: handle invalid case */
1994a433f1f5SLijo Lazar 		dev_WARN(adev->dev,
1995a433f1f5SLijo Lazar 			 "Mem ranges not matching with hardware config");
1996a433f1f5SLijo Lazar 	}
1997a433f1f5SLijo Lazar 
1998a433f1f5SLijo Lazar 	return 0;
1999a433f1f5SLijo Lazar }
2000a433f1f5SLijo Lazar 
gmc_v9_4_3_init_vram_info(struct amdgpu_device * adev)2001e20ff051SLijo Lazar static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
2002e20ff051SLijo Lazar {
2003e20ff051SLijo Lazar 	static const u32 regBIF_BIOS_SCRATCH_4 = 0x50;
2004e20ff051SLijo Lazar 	u32 vram_info;
2005e20ff051SLijo Lazar 
2006e20ff051SLijo Lazar 	if (!amdgpu_sriov_vf(adev)) {
2007e20ff051SLijo Lazar 		vram_info = RREG32(regBIF_BIOS_SCRATCH_4);
2008e20ff051SLijo Lazar 		adev->gmc.vram_vendor = vram_info & 0xF;
2009e20ff051SLijo Lazar 	}
2010e20ff051SLijo Lazar 	adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
2011e20ff051SLijo Lazar 	adev->gmc.vram_width = 128 * 64;
2012e20ff051SLijo Lazar }
2013e20ff051SLijo Lazar 
gmc_v9_0_sw_init(void * handle)2014e60f8db5SAlex Xie static int gmc_v9_0_sw_init(void *handle)
2015e60f8db5SAlex Xie {
201624bf9fd1SHarish Kasiviswanathan 	int r, vram_width = 0, vram_type = 0, vram_vendor = 0, dma_addr_bits;
2017e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
20187a1efad0SLijo Lazar 	unsigned long inst_mask = adev->aid_mask;
2019e60f8db5SAlex Xie 
20208ffff9b4SOak Zeng 	adev->gfxhub.funcs->init(adev);
20219fb1506eSOak Zeng 
20229fb1506eSOak Zeng 	adev->mmhub.funcs->init(adev);
2023e60f8db5SAlex Xie 
2024770d13b1SChristian König 	spin_lock_init(&adev->gmc.invalidate_lock);
2025e60f8db5SAlex Xie 
2026e20ff051SLijo Lazar 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) {
2027e20ff051SLijo Lazar 		gmc_v9_4_3_init_vram_info(adev);
2028e20ff051SLijo Lazar 	} else if (!adev->bios) {
20299535a86aSShiwu Zhang 		if (adev->flags & AMD_IS_APU) {
20309535a86aSShiwu Zhang 			adev->gmc.vram_type = AMDGPU_VRAM_TYPE_DDR4;
20319535a86aSShiwu Zhang 			adev->gmc.vram_width = 64 * 64;
20329535a86aSShiwu Zhang 		} else {
20339535a86aSShiwu Zhang 			adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM;
20349535a86aSShiwu Zhang 			adev->gmc.vram_width = 128 * 64;
20359535a86aSShiwu Zhang 		}
20369535a86aSShiwu Zhang 	} else {
2037ad02e08eSOri Messinger 		r = amdgpu_atomfirmware_get_vram_info(adev,
2038ad02e08eSOri Messinger 			&vram_width, &vram_type, &vram_vendor);
2039631cdbd2SAlex Deucher 		if (amdgpu_sriov_vf(adev))
2040631cdbd2SAlex Deucher 			/* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
2041631cdbd2SAlex Deucher 			 * and DF related registers is not readable, seems hardcord is the
2042631cdbd2SAlex Deucher 			 * only way to set the correct vram_width
2043631cdbd2SAlex Deucher 			 */
2044631cdbd2SAlex Deucher 			adev->gmc.vram_width = 2048;
2045631cdbd2SAlex Deucher 		else if (amdgpu_emu_mode != 1)
2046631cdbd2SAlex Deucher 			adev->gmc.vram_width = vram_width;
2047631cdbd2SAlex Deucher 
2048631cdbd2SAlex Deucher 		if (!adev->gmc.vram_width) {
2049631cdbd2SAlex Deucher 			int chansize, numchan;
2050631cdbd2SAlex Deucher 
2051631cdbd2SAlex Deucher 			/* hbm memory channel size */
2052631cdbd2SAlex Deucher 			if (adev->flags & AMD_IS_APU)
2053631cdbd2SAlex Deucher 				chansize = 64;
2054631cdbd2SAlex Deucher 			else
2055631cdbd2SAlex Deucher 				chansize = 128;
2056cace4bffSHawking Zhang 			if (adev->df.funcs &&
2057cace4bffSHawking Zhang 			    adev->df.funcs->get_hbm_channel_number) {
2058bdf84a80SJoseph Greathouse 				numchan = adev->df.funcs->get_hbm_channel_number(adev);
2059631cdbd2SAlex Deucher 				adev->gmc.vram_width = numchan * chansize;
2060631cdbd2SAlex Deucher 			}
2061cace4bffSHawking Zhang 		}
2062631cdbd2SAlex Deucher 
2063631cdbd2SAlex Deucher 		adev->gmc.vram_type = vram_type;
2064ad02e08eSOri Messinger 		adev->gmc.vram_vendor = vram_vendor;
20659535a86aSShiwu Zhang 	}
2066630e959fSAlex Deucher 	switch (adev->ip_versions[GC_HWIP][0]) {
2067630e959fSAlex Deucher 	case IP_VERSION(9, 1, 0):
2068630e959fSAlex Deucher 	case IP_VERSION(9, 2, 2):
2069d9426c3dSLe Ma 		set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
2070d9426c3dSLe Ma 		set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
20711daa2bfaSLe Ma 
20726a42fd6fSChristian König 		if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
2073f3368128SChristian König 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
20746a42fd6fSChristian König 		} else {
20756a42fd6fSChristian König 			/* vm_size is 128TB + 512GB for legacy 3-level page support */
20766a42fd6fSChristian König 			amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
2077770d13b1SChristian König 			adev->gmc.translate_further =
20786a42fd6fSChristian König 				adev->vm_manager.num_level > 1;
20796a42fd6fSChristian König 		}
2080e60f8db5SAlex Xie 		break;
2081630e959fSAlex Deucher 	case IP_VERSION(9, 0, 1):
2082630e959fSAlex Deucher 	case IP_VERSION(9, 2, 1):
2083630e959fSAlex Deucher 	case IP_VERSION(9, 4, 0):
2084630e959fSAlex Deucher 	case IP_VERSION(9, 3, 0):
2085630e959fSAlex Deucher 	case IP_VERSION(9, 4, 2):
2086d9426c3dSLe Ma 		set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
2087d9426c3dSLe Ma 		set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
20888787ee01SHuang Rui 
2089e60f8db5SAlex Xie 		/*
2090e60f8db5SAlex Xie 		 * To fulfill 4-level page support,
2091e60f8db5SAlex Xie 		 * vm size is 256TB (48bit), maximum size of Vega10,
2092e60f8db5SAlex Xie 		 * block size 512 (9bit)
2093e60f8db5SAlex Xie 		 */
2094cdba61daSwentalou 		/* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
2095cdba61daSwentalou 		if (amdgpu_sriov_vf(adev))
2096cdba61daSwentalou 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
2097cdba61daSwentalou 		else
2098f3368128SChristian König 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
2099352e683bSJoseph Greathouse 		if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
2100352e683bSJoseph Greathouse 			adev->gmc.translate_further = adev->vm_manager.num_level > 1;
2101e60f8db5SAlex Xie 		break;
2102630e959fSAlex Deucher 	case IP_VERSION(9, 4, 1):
2103d9426c3dSLe Ma 		set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask);
2104d9426c3dSLe Ma 		set_bit(AMDGPU_MMHUB0(0), adev->vmhubs_mask);
2105d9426c3dSLe Ma 		set_bit(AMDGPU_MMHUB1(0), adev->vmhubs_mask);
2106c8a6e2a3SLe Ma 
21073de2ff5dSLe Ma 		/* Keep the vm size same with Vega20 */
21083de2ff5dSLe Ma 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
2109352e683bSJoseph Greathouse 		adev->gmc.translate_further = adev->vm_manager.num_level > 1;
21103de2ff5dSLe Ma 		break;
2111ce8a12a5SLe Ma 	case IP_VERSION(9, 4, 3):
21128078f1c6SLijo Lazar 		bitmap_set(adev->vmhubs_mask, AMDGPU_GFXHUB(0),
21138078f1c6SLijo Lazar 				  NUM_XCC(adev->gfx.xcc_mask));
21147a1efad0SLijo Lazar 
21157a1efad0SLijo Lazar 		inst_mask <<= AMDGPU_MMHUB0(0);
21167a1efad0SLijo Lazar 		bitmap_or(adev->vmhubs_mask, adev->vmhubs_mask, &inst_mask, 32);
2117ce8a12a5SLe Ma 
2118ce8a12a5SLe Ma 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
2119d728eda3SPhilip Yang 		adev->gmc.translate_further = adev->vm_manager.num_level > 1;
2120ce8a12a5SLe Ma 		break;
2121e60f8db5SAlex Xie 	default:
2122e60f8db5SAlex Xie 		break;
2123e60f8db5SAlex Xie 	}
2124e60f8db5SAlex Xie 
2125e60f8db5SAlex Xie 	/* This interrupt is VMC page fault.*/
212644a99b65SAndrey Grodzovsky 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
2127770d13b1SChristian König 				&adev->gmc.vm_fault);
212830da7bb1SChristian König 	if (r)
212930da7bb1SChristian König 		return r;
213030da7bb1SChristian König 
2131630e959fSAlex Deucher 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) {
21327d19b15fSLe Ma 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
21337d19b15fSLe Ma 					&adev->gmc.vm_fault);
21347d19b15fSLe Ma 		if (r)
21357d19b15fSLe Ma 			return r;
21367d19b15fSLe Ma 	}
21377d19b15fSLe Ma 
213844a99b65SAndrey Grodzovsky 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
2139770d13b1SChristian König 				&adev->gmc.vm_fault);
2140e60f8db5SAlex Xie 
2141e60f8db5SAlex Xie 	if (r)
2142e60f8db5SAlex Xie 		return r;
2143e60f8db5SAlex Xie 
214468d705ddSHawking Zhang 	if (!amdgpu_sriov_vf(adev) &&
214568d705ddSHawking Zhang 	    !adev->gmc.xgmi.connected_to_cpu) {
2146791c4769Sxinhui pan 		/* interrupt sent to DF. */
2147791c4769Sxinhui pan 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
2148791c4769Sxinhui pan 				      &adev->gmc.ecc_irq);
2149791c4769Sxinhui pan 		if (r)
2150791c4769Sxinhui pan 			return r;
21512ee9403eSZhigang Luo 	}
2152791c4769Sxinhui pan 
2153e60f8db5SAlex Xie 	/* Set the internal MC address mask
2154e60f8db5SAlex Xie 	 * This is the max address of the GPU's
2155e60f8db5SAlex Xie 	 * internal address space.
2156e60f8db5SAlex Xie 	 */
2157770d13b1SChristian König 	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
2158e60f8db5SAlex Xie 
215912c4d7edSLijo Lazar 	dma_addr_bits = adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) ? 48:44;
216024bf9fd1SHarish Kasiviswanathan 	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(dma_addr_bits));
2161e60f8db5SAlex Xie 	if (r) {
2162e2710187SSrinivasan Shanmugam 		dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
2163244511f3SChristoph Hellwig 		return r;
2164e60f8db5SAlex Xie 	}
216524bf9fd1SHarish Kasiviswanathan 	adev->need_swiotlb = drm_need_swiotlb(dma_addr_bits);
2166e60f8db5SAlex Xie 
2167e60f8db5SAlex Xie 	r = gmc_v9_0_mc_init(adev);
2168e60f8db5SAlex Xie 	if (r)
2169e60f8db5SAlex Xie 		return r;
2170e60f8db5SAlex Xie 
21717b885f0eSAlex Deucher 	amdgpu_gmc_get_vbios_allocations(adev);
2172ebdef28eSAlex Deucher 
2173a433f1f5SLijo Lazar 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) {
2174a433f1f5SLijo Lazar 		r = gmc_v9_0_init_mem_ranges(adev);
2175a433f1f5SLijo Lazar 		if (r)
2176a433f1f5SLijo Lazar 			return r;
2177a433f1f5SLijo Lazar 	}
2178a433f1f5SLijo Lazar 
2179e60f8db5SAlex Xie 	/* Memory manager */
2180e60f8db5SAlex Xie 	r = amdgpu_bo_init(adev);
2181e60f8db5SAlex Xie 	if (r)
2182e60f8db5SAlex Xie 		return r;
2183e60f8db5SAlex Xie 
2184e60f8db5SAlex Xie 	r = gmc_v9_0_gart_init(adev);
2185e60f8db5SAlex Xie 	if (r)
2186e60f8db5SAlex Xie 		return r;
2187e60f8db5SAlex Xie 
218805ec3edaSChristian König 	/*
218905ec3edaSChristian König 	 * number of VMs
219005ec3edaSChristian König 	 * VMID 0 is reserved for System
219181659b20SFelix Kuehling 	 * amdgpu graphics/compute will use VMIDs 1..n-1
219281659b20SFelix Kuehling 	 * amdkfd will use VMIDs n..15
219381659b20SFelix Kuehling 	 *
219481659b20SFelix Kuehling 	 * The first KFD VMID is 8 for GPUs with graphics, 3 for
219581659b20SFelix Kuehling 	 * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
219681659b20SFelix Kuehling 	 * for video processing.
219705ec3edaSChristian König 	 */
219881659b20SFelix Kuehling 	adev->vm_manager.first_kfd_vmid =
2199630e959fSAlex Deucher 		(adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
2200ab1a157eSHawking Zhang 		 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||
2201ab1a157eSHawking Zhang 		 adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) ? 3 : 8;
220205ec3edaSChristian König 
220305ec3edaSChristian König 	amdgpu_vm_manager_init(adev);
220405ec3edaSChristian König 
2205b0a2db9bSAlex Deucher 	gmc_v9_0_save_registers(adev);
2206b0a2db9bSAlex Deucher 
2207a6dcf9a7SHawking Zhang 	r = amdgpu_gmc_ras_sw_init(adev);
2208a6dcf9a7SHawking Zhang 	if (r)
2209a6dcf9a7SHawking Zhang 		return r;
2210a6dcf9a7SHawking Zhang 
2211b6f90baaSLijo Lazar 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
2212b6f90baaSLijo Lazar 		amdgpu_gmc_sysfs_init(adev);
2213b6f90baaSLijo Lazar 
221405ec3edaSChristian König 	return 0;
2215e60f8db5SAlex Xie }
2216e60f8db5SAlex Xie 
gmc_v9_0_sw_fini(void * handle)2217e60f8db5SAlex Xie static int gmc_v9_0_sw_fini(void *handle)
2218e60f8db5SAlex Xie {
2219e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2220e60f8db5SAlex Xie 
2221b6f90baaSLijo Lazar 	if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3))
2222b6f90baaSLijo Lazar 		amdgpu_gmc_sysfs_fini(adev);
2223b6f90baaSLijo Lazar 
22242adf1344STao Zhou 	amdgpu_gmc_ras_fini(adev);
2225f59548c8SMonk Liu 	amdgpu_gem_force_release(adev);
2226e60f8db5SAlex Xie 	amdgpu_vm_manager_fini(adev);
2227c9a502e9SFelix Kuehling 	if (!adev->gmc.real_vram_size) {
2228c9a502e9SFelix Kuehling 		dev_info(adev->dev, "Put GART in system memory for APU free\n");
2229c9a502e9SFelix Kuehling 		amdgpu_gart_table_ram_free(adev);
2230c9a502e9SFelix Kuehling 	} else {
2231a3d9103eSAndrey Grodzovsky 		amdgpu_gart_table_vram_free(adev);
2232c9a502e9SFelix Kuehling 	}
22332d505453SGuchun Chen 	amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0);
2234e60f8db5SAlex Xie 	amdgpu_bo_fini(adev);
2235e60f8db5SAlex Xie 
2236a8f5cd04SLe Ma 	adev->gmc.num_mem_partitions = 0;
2237a8f5cd04SLe Ma 	kfree(adev->gmc.mem_partitions);
2238a8f5cd04SLe Ma 
2239e60f8db5SAlex Xie 	return 0;
2240e60f8db5SAlex Xie }
2241e60f8db5SAlex Xie 
gmc_v9_0_init_golden_registers(struct amdgpu_device * adev)2242e60f8db5SAlex Xie static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
2243e60f8db5SAlex Xie {
2244946a4d5bSShaoyun Liu 
2245630e959fSAlex Deucher 	switch (adev->ip_versions[MMHUB_HWIP][0]) {
2246630e959fSAlex Deucher 	case IP_VERSION(9, 0, 0):
22474cd4c5c0SMonk Liu 		if (amdgpu_sriov_vf(adev))
224898cad2deSTrigger Huang 			break;
2249df561f66SGustavo A. R. Silva 		fallthrough;
2250630e959fSAlex Deucher 	case IP_VERSION(9, 4, 0):
2251946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
22525c583018SEvan Quan 						golden_settings_mmhub_1_0_0,
2253c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_mmhub_1_0_0));
2254946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
22555c583018SEvan Quan 						golden_settings_athub_1_0_0,
2256c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
2257e60f8db5SAlex Xie 		break;
2258630e959fSAlex Deucher 	case IP_VERSION(9, 1, 0):
2259630e959fSAlex Deucher 	case IP_VERSION(9, 2, 0):
22608787ee01SHuang Rui 		/* TODO for renoir */
2261946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
22625c583018SEvan Quan 						golden_settings_athub_1_0_0,
2263c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
2264e4f3abaaSChunming Zhou 		break;
2265e60f8db5SAlex Xie 	default:
2266e60f8db5SAlex Xie 		break;
2267e60f8db5SAlex Xie 	}
2268e60f8db5SAlex Xie }
2269e60f8db5SAlex Xie 
2270e60f8db5SAlex Xie /**
2271c2ecd79bSShirish S  * gmc_v9_0_restore_registers - restores regs
2272c2ecd79bSShirish S  *
2273c2ecd79bSShirish S  * @adev: amdgpu_device pointer
2274c2ecd79bSShirish S  *
2275c2ecd79bSShirish S  * This restores register values, saved at suspend.
2276c2ecd79bSShirish S  */
gmc_v9_0_restore_registers(struct amdgpu_device * adev)2277b0a2db9bSAlex Deucher void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
2278c2ecd79bSShirish S {
2279630e959fSAlex Deucher 	if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
2280630e959fSAlex Deucher 	    (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) {
2281f8646661SAlex Deucher 		WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
22820eaa8012SShirish S 		WARN_ON(adev->gmc.sdpif_register !=
22830eaa8012SShirish S 			RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
22840eaa8012SShirish S 	}
2285c2ecd79bSShirish S }
2286c2ecd79bSShirish S 
2287c2ecd79bSShirish S /**
2288e60f8db5SAlex Xie  * gmc_v9_0_gart_enable - gart enable
2289e60f8db5SAlex Xie  *
2290e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
2291e60f8db5SAlex Xie  */
gmc_v9_0_gart_enable(struct amdgpu_device * adev)2292e60f8db5SAlex Xie static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
2293e60f8db5SAlex Xie {
2294cb1545f7SOak Zeng 	int r;
2295e60f8db5SAlex Xie 
2296522510a6SOak Zeng 	if (adev->gmc.xgmi.connected_to_cpu)
2297522510a6SOak Zeng 		amdgpu_gmc_init_pdb0(adev);
2298522510a6SOak Zeng 
22991123b989SChristian König 	if (adev->gart.bo == NULL) {
2300e60f8db5SAlex Xie 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
2301e60f8db5SAlex Xie 		return -EINVAL;
2302e60f8db5SAlex Xie 	}
2303522510a6SOak Zeng 
23041b08dfb8SChristian König 	amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
2305b93df61dSAlex Deucher 
2306b93df61dSAlex Deucher 	if (!adev->in_s0ix) {
23078ffff9b4SOak Zeng 		r = adev->gfxhub.funcs->gart_enable(adev);
2308e60f8db5SAlex Xie 		if (r)
2309e60f8db5SAlex Xie 			return r;
2310b93df61dSAlex Deucher 	}
2311e60f8db5SAlex Xie 
23129fb1506eSOak Zeng 	r = adev->mmhub.funcs->gart_enable(adev);
2313e60f8db5SAlex Xie 	if (r)
2314e60f8db5SAlex Xie 		return r;
2315e60f8db5SAlex Xie 
2316522510a6SOak Zeng 	DRM_INFO("PCIE GART of %uM enabled.\n",
231708e85215SSrinivasan Shanmugam 		 (unsigned int)(adev->gmc.gart_size >> 20));
2318522510a6SOak Zeng 	if (adev->gmc.pdb0_bo)
2319522510a6SOak Zeng 		DRM_INFO("PDB0 located at 0x%016llX\n",
2320522510a6SOak Zeng 				(unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo));
2321522510a6SOak Zeng 	DRM_INFO("PTB located at 0x%016llX\n",
2322cb1545f7SOak Zeng 			(unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
2323522510a6SOak Zeng 
2324cb1545f7SOak Zeng 	return 0;
2325cb1545f7SOak Zeng }
2326cb1545f7SOak Zeng 
gmc_v9_0_hw_init(void * handle)2327cb1545f7SOak Zeng static int gmc_v9_0_hw_init(void *handle)
2328cb1545f7SOak Zeng {
2329cb1545f7SOak Zeng 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2330cb1545f7SOak Zeng 	bool value;
2331479e3b02SXiaojian Du 	int i, r;
2332cb1545f7SOak Zeng 
2333cb1545f7SOak Zeng 	/* The sequence of these two function calls matters.*/
2334cb1545f7SOak Zeng 	gmc_v9_0_init_golden_registers(adev);
2335cb1545f7SOak Zeng 
2336cb1545f7SOak Zeng 	if (adev->mode_info.num_crtc) {
2337cb1545f7SOak Zeng 		/* Lockout access through VGA aperture*/
2338cb1545f7SOak Zeng 		WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
2339cb1545f7SOak Zeng 		/* disable VGA render */
2340cb1545f7SOak Zeng 		WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
2341cb1545f7SOak Zeng 	}
2342cb1545f7SOak Zeng 
23439fb1506eSOak Zeng 	if (adev->mmhub.funcs->update_power_gating)
23449fb1506eSOak Zeng 		adev->mmhub.funcs->update_power_gating(adev, true);
23459fb1506eSOak Zeng 
2346455d40c9SLikun Gao 	adev->hdp.funcs->init_registers(adev);
2347fe2b5323STiecheng Zhou 
23481d4e0a8cSMonk Liu 	/* After HDP is initialized, flush HDP.*/
2349455d40c9SLikun Gao 	adev->hdp.funcs->flush_hdp(adev, NULL);
23501d4e0a8cSMonk Liu 
2351e60f8db5SAlex Xie 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
2352e60f8db5SAlex Xie 		value = false;
2353e60f8db5SAlex Xie 	else
2354e60f8db5SAlex Xie 		value = true;
2355e60f8db5SAlex Xie 
235620bf2f6fSZhigang Luo 	if (!amdgpu_sriov_vf(adev)) {
2357b93df61dSAlex Deucher 		if (!adev->in_s0ix)
23588ffff9b4SOak Zeng 			adev->gfxhub.funcs->set_fault_enable_default(adev, value);
23599fb1506eSOak Zeng 		adev->mmhub.funcs->set_fault_enable_default(adev, value);
236020bf2f6fSZhigang Luo 	}
2361d9426c3dSLe Ma 	for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
2362f4caf584SHawking Zhang 		if (adev->in_s0ix && (i == AMDGPU_GFXHUB(0)))
2363b93df61dSAlex Deucher 			continue;
23643ff98548SOak Zeng 		gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
2365b93df61dSAlex Deucher 	}
2366e60f8db5SAlex Xie 
2367e7da754bSMonk Liu 	if (adev->umc.funcs && adev->umc.funcs->init_registers)
2368e7da754bSMonk Liu 		adev->umc.funcs->init_registers(adev);
2369e7da754bSMonk Liu 
2370479e3b02SXiaojian Du 	r = gmc_v9_0_gart_enable(adev);
2371479e3b02SXiaojian Du 	if (r)
2372479e3b02SXiaojian Du 		return r;
2373479e3b02SXiaojian Du 
2374479e3b02SXiaojian Du 	if (amdgpu_emu_mode == 1)
2375479e3b02SXiaojian Du 		return amdgpu_gmc_vram_checking(adev);
2376*7110e988SSrinivasan Shanmugam 
2377*7110e988SSrinivasan Shanmugam 	return 0;
2378e60f8db5SAlex Xie }
2379e60f8db5SAlex Xie 
2380e60f8db5SAlex Xie /**
2381e60f8db5SAlex Xie  * gmc_v9_0_gart_disable - gart disable
2382e60f8db5SAlex Xie  *
2383e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
2384e60f8db5SAlex Xie  *
2385e60f8db5SAlex Xie  * This disables all VM page table.
2386e60f8db5SAlex Xie  */
gmc_v9_0_gart_disable(struct amdgpu_device * adev)2387e60f8db5SAlex Xie static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
2388e60f8db5SAlex Xie {
2389b93df61dSAlex Deucher 	if (!adev->in_s0ix)
23908ffff9b4SOak Zeng 		adev->gfxhub.funcs->gart_disable(adev);
23919fb1506eSOak Zeng 	adev->mmhub.funcs->gart_disable(adev);
2392e60f8db5SAlex Xie }
2393e60f8db5SAlex Xie 
gmc_v9_0_hw_fini(void * handle)2394e60f8db5SAlex Xie static int gmc_v9_0_hw_fini(void *handle)
2395e60f8db5SAlex Xie {
2396e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2397e60f8db5SAlex Xie 
239871cf9e72SLeslie Shi 	gmc_v9_0_gart_disable(adev);
239971cf9e72SLeslie Shi 
24005dd696aeSTrigger Huang 	if (amdgpu_sriov_vf(adev)) {
24015dd696aeSTrigger Huang 		/* full access mode, so don't touch any GMC register */
24025dd696aeSTrigger Huang 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
24035dd696aeSTrigger Huang 		return 0;
24045dd696aeSTrigger Huang 	}
24055dd696aeSTrigger Huang 
240617252701SEvan Quan 	/*
240717252701SEvan Quan 	 * Pair the operations did in gmc_v9_0_hw_init and thus maintain
240817252701SEvan Quan 	 * a correct cached state for GMC. Otherwise, the "gate" again
240917252701SEvan Quan 	 * operation on S3 resuming will fail due to wrong cached state.
241017252701SEvan Quan 	 */
241117252701SEvan Quan 	if (adev->mmhub.funcs->update_power_gating)
241217252701SEvan Quan 		adev->mmhub.funcs->update_power_gating(adev, false);
241317252701SEvan Quan 
2414770d13b1SChristian König 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
2415e60f8db5SAlex Xie 
24160a8fc4e0SStanley.Yang 	if (adev->gmc.ecc_irq.funcs &&
24170a8fc4e0SStanley.Yang 		amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
24180a8fc4e0SStanley.Yang 		amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
24190a8fc4e0SStanley.Yang 
2420e60f8db5SAlex Xie 	return 0;
2421e60f8db5SAlex Xie }
2422e60f8db5SAlex Xie 
gmc_v9_0_suspend(void * handle)2423e60f8db5SAlex Xie static int gmc_v9_0_suspend(void *handle)
2424e60f8db5SAlex Xie {
2425e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2426e60f8db5SAlex Xie 
2427c24a3c05SLiu Shixin 	return gmc_v9_0_hw_fini(adev);
2428e60f8db5SAlex Xie }
2429e60f8db5SAlex Xie 
gmc_v9_0_resume(void * handle)2430e60f8db5SAlex Xie static int gmc_v9_0_resume(void *handle)
2431e60f8db5SAlex Xie {
2432e60f8db5SAlex Xie 	int r;
2433e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2434e60f8db5SAlex Xie 
2435e60f8db5SAlex Xie 	r = gmc_v9_0_hw_init(adev);
2436e60f8db5SAlex Xie 	if (r)
2437e60f8db5SAlex Xie 		return r;
2438e60f8db5SAlex Xie 
2439620f774fSChristian König 	amdgpu_vmid_reset_all(adev);
2440e60f8db5SAlex Xie 
244132601d48SChristian König 	return 0;
2442e60f8db5SAlex Xie }
2443e60f8db5SAlex Xie 
gmc_v9_0_is_idle(void * handle)2444e60f8db5SAlex Xie static bool gmc_v9_0_is_idle(void *handle)
2445e60f8db5SAlex Xie {
2446e60f8db5SAlex Xie 	/* MC is always ready in GMC v9.*/
2447e60f8db5SAlex Xie 	return true;
2448e60f8db5SAlex Xie }
2449e60f8db5SAlex Xie 
gmc_v9_0_wait_for_idle(void * handle)2450e60f8db5SAlex Xie static int gmc_v9_0_wait_for_idle(void *handle)
2451e60f8db5SAlex Xie {
2452e60f8db5SAlex Xie 	/* There is no need to wait for MC idle in GMC v9.*/
2453e60f8db5SAlex Xie 	return 0;
2454e60f8db5SAlex Xie }
2455e60f8db5SAlex Xie 
gmc_v9_0_soft_reset(void * handle)2456e60f8db5SAlex Xie static int gmc_v9_0_soft_reset(void *handle)
2457e60f8db5SAlex Xie {
2458e60f8db5SAlex Xie 	/* XXX for emulation.*/
2459e60f8db5SAlex Xie 	return 0;
2460e60f8db5SAlex Xie }
2461e60f8db5SAlex Xie 
gmc_v9_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)2462e60f8db5SAlex Xie static int gmc_v9_0_set_clockgating_state(void *handle,
2463e60f8db5SAlex Xie 					enum amd_clockgating_state state)
2464e60f8db5SAlex Xie {
2465d5583d4fSHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2466d5583d4fSHuang Rui 
24679fb1506eSOak Zeng 	adev->mmhub.funcs->set_clockgating(adev, state);
2468bee7b51aSLe Ma 
2469bee7b51aSLe Ma 	athub_v1_0_set_clockgating(adev, state);
2470bee7b51aSLe Ma 
2471bee7b51aSLe Ma 	return 0;
2472e60f8db5SAlex Xie }
2473e60f8db5SAlex Xie 
gmc_v9_0_get_clockgating_state(void * handle,u64 * flags)247425faeddcSEvan Quan static void gmc_v9_0_get_clockgating_state(void *handle, u64 *flags)
247513052be5SHuang Rui {
247613052be5SHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
247713052be5SHuang Rui 
24789fb1506eSOak Zeng 	adev->mmhub.funcs->get_clockgating(adev, flags);
2479bee7b51aSLe Ma 
2480bee7b51aSLe Ma 	athub_v1_0_get_clockgating(adev, flags);
248113052be5SHuang Rui }
248213052be5SHuang Rui 
gmc_v9_0_set_powergating_state(void * handle,enum amd_powergating_state state)2483e60f8db5SAlex Xie static int gmc_v9_0_set_powergating_state(void *handle,
2484e60f8db5SAlex Xie 					enum amd_powergating_state state)
2485e60f8db5SAlex Xie {
2486e60f8db5SAlex Xie 	return 0;
2487e60f8db5SAlex Xie }
2488e60f8db5SAlex Xie 
2489e60f8db5SAlex Xie const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
2490e60f8db5SAlex Xie 	.name = "gmc_v9_0",
2491e60f8db5SAlex Xie 	.early_init = gmc_v9_0_early_init,
2492e60f8db5SAlex Xie 	.late_init = gmc_v9_0_late_init,
2493e60f8db5SAlex Xie 	.sw_init = gmc_v9_0_sw_init,
2494e60f8db5SAlex Xie 	.sw_fini = gmc_v9_0_sw_fini,
2495e60f8db5SAlex Xie 	.hw_init = gmc_v9_0_hw_init,
2496e60f8db5SAlex Xie 	.hw_fini = gmc_v9_0_hw_fini,
2497e60f8db5SAlex Xie 	.suspend = gmc_v9_0_suspend,
2498e60f8db5SAlex Xie 	.resume = gmc_v9_0_resume,
2499e60f8db5SAlex Xie 	.is_idle = gmc_v9_0_is_idle,
2500e60f8db5SAlex Xie 	.wait_for_idle = gmc_v9_0_wait_for_idle,
2501e60f8db5SAlex Xie 	.soft_reset = gmc_v9_0_soft_reset,
2502e60f8db5SAlex Xie 	.set_clockgating_state = gmc_v9_0_set_clockgating_state,
2503e60f8db5SAlex Xie 	.set_powergating_state = gmc_v9_0_set_powergating_state,
250413052be5SHuang Rui 	.get_clockgating_state = gmc_v9_0_get_clockgating_state,
2505e60f8db5SAlex Xie };
2506e60f8db5SAlex Xie 
250708e85215SSrinivasan Shanmugam const struct amdgpu_ip_block_version gmc_v9_0_ip_block = {
2508e60f8db5SAlex Xie 	.type = AMD_IP_BLOCK_TYPE_GMC,
2509e60f8db5SAlex Xie 	.major = 9,
2510e60f8db5SAlex Xie 	.minor = 0,
2511e60f8db5SAlex Xie 	.rev = 0,
2512e60f8db5SAlex Xie 	.funcs = &gmc_v9_0_ip_funcs,
2513e60f8db5SAlex Xie };
2514