xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c (revision 4a20300b)
1e60f8db5SAlex Xie /*
2e60f8db5SAlex Xie  * Copyright 2016 Advanced Micro Devices, Inc.
3e60f8db5SAlex Xie  *
4e60f8db5SAlex Xie  * Permission is hereby granted, free of charge, to any person obtaining a
5e60f8db5SAlex Xie  * copy of this software and associated documentation files (the "Software"),
6e60f8db5SAlex Xie  * to deal in the Software without restriction, including without limitation
7e60f8db5SAlex Xie  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8e60f8db5SAlex Xie  * and/or sell copies of the Software, and to permit persons to whom the
9e60f8db5SAlex Xie  * Software is furnished to do so, subject to the following conditions:
10e60f8db5SAlex Xie  *
11e60f8db5SAlex Xie  * The above copyright notice and this permission notice shall be included in
12e60f8db5SAlex Xie  * all copies or substantial portions of the Software.
13e60f8db5SAlex Xie  *
14e60f8db5SAlex Xie  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15e60f8db5SAlex Xie  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16e60f8db5SAlex Xie  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17e60f8db5SAlex Xie  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18e60f8db5SAlex Xie  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19e60f8db5SAlex Xie  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20e60f8db5SAlex Xie  * OTHER DEALINGS IN THE SOFTWARE.
21e60f8db5SAlex Xie  *
22e60f8db5SAlex Xie  */
23f867723bSSam Ravnborg 
24e60f8db5SAlex Xie #include <linux/firmware.h>
25f867723bSSam Ravnborg #include <linux/pci.h>
26f867723bSSam Ravnborg 
27fd5fd480SChunming Zhou #include <drm/drm_cache.h>
28f867723bSSam Ravnborg 
29e60f8db5SAlex Xie #include "amdgpu.h"
30e60f8db5SAlex Xie #include "gmc_v9_0.h"
318d6a5230SAlex Deucher #include "amdgpu_atomfirmware.h"
322cddc50eSHuang Rui #include "amdgpu_gem.h"
33e60f8db5SAlex Xie 
3475199b8cSFeifei Xu #include "hdp/hdp_4_0_offset.h"
3575199b8cSFeifei Xu #include "hdp/hdp_4_0_sh_mask.h"
36cde5c34fSFeifei Xu #include "gc/gc_9_0_sh_mask.h"
37135d4b10SFeifei Xu #include "dce/dce_12_0_offset.h"
38135d4b10SFeifei Xu #include "dce/dce_12_0_sh_mask.h"
39fb960bd2SFeifei Xu #include "vega10_enum.h"
4065417d9fSFeifei Xu #include "mmhub/mmhub_1_0_offset.h"
41ea930000SAlex Sierra #include "athub/athub_1_0_sh_mask.h"
426ce68225SFeifei Xu #include "athub/athub_1_0_offset.h"
43250b4228SChristian König #include "oss/osssys_4_0_offset.h"
44e60f8db5SAlex Xie 
45946a4d5bSShaoyun Liu #include "soc15.h"
46ea930000SAlex Sierra #include "soc15d.h"
47e60f8db5SAlex Xie #include "soc15_common.h"
4890c7a935SFeifei Xu #include "umc/umc_6_0_sh_mask.h"
49e60f8db5SAlex Xie 
50e60f8db5SAlex Xie #include "gfxhub_v1_0.h"
51e60f8db5SAlex Xie #include "mmhub_v1_0.h"
52bee7b51aSLe Ma #include "athub_v1_0.h"
53bf0a60b7SAlex Deucher #include "gfxhub_v1_1.h"
5451cce480SLe Ma #include "mmhub_v9_4.h"
555b6b35aaSHawking Zhang #include "umc_v6_1.h"
56e7da754bSMonk Liu #include "umc_v6_0.h"
57e60f8db5SAlex Xie 
5844a99b65SAndrey Grodzovsky #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
5944a99b65SAndrey Grodzovsky 
60791c4769Sxinhui pan #include "amdgpu_ras.h"
61029fbd43SHawking Zhang #include "amdgpu_xgmi.h"
62791c4769Sxinhui pan 
63ebdef28eSAlex Deucher /* add these here since we already include dce12 headers and these are for DCN */
64ebdef28eSAlex Deucher #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION                                                          0x055d
65ebdef28eSAlex Deucher #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX                                                 2
66ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT                                        0x0
67ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT                                       0x10
68ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK                                          0x00003FFFL
69ebdef28eSAlex Deucher #define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK                                         0x3FFF0000L
70f8646661SAlex Deucher #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0                                                                  0x049d
71f8646661SAlex Deucher #define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX                                                         2
72f8646661SAlex Deucher 
73ebdef28eSAlex Deucher 
74be99ecbfSAlex Deucher static const char *gfxhub_client_ids[] = {
75be99ecbfSAlex Deucher 	"CB",
76be99ecbfSAlex Deucher 	"DB",
77be99ecbfSAlex Deucher 	"IA",
78be99ecbfSAlex Deucher 	"WD",
79be99ecbfSAlex Deucher 	"CPF",
80be99ecbfSAlex Deucher 	"CPC",
81be99ecbfSAlex Deucher 	"CPG",
82be99ecbfSAlex Deucher 	"RLC",
83be99ecbfSAlex Deucher 	"TCP",
84be99ecbfSAlex Deucher 	"SQC (inst)",
85be99ecbfSAlex Deucher 	"SQC (data)",
86be99ecbfSAlex Deucher 	"SQG",
87be99ecbfSAlex Deucher 	"PA",
88be99ecbfSAlex Deucher };
89be99ecbfSAlex Deucher 
9002f23f5fSAlex Deucher static const char *mmhub_client_ids_raven[][2] = {
9102f23f5fSAlex Deucher 	[0][0] = "MP1",
9202f23f5fSAlex Deucher 	[1][0] = "MP0",
9302f23f5fSAlex Deucher 	[2][0] = "VCN",
9402f23f5fSAlex Deucher 	[3][0] = "VCNU",
9502f23f5fSAlex Deucher 	[4][0] = "HDP",
9602f23f5fSAlex Deucher 	[5][0] = "DCE",
9702f23f5fSAlex Deucher 	[13][0] = "UTCL2",
9802f23f5fSAlex Deucher 	[19][0] = "TLS",
9902f23f5fSAlex Deucher 	[26][0] = "OSS",
10002f23f5fSAlex Deucher 	[27][0] = "SDMA0",
10102f23f5fSAlex Deucher 	[0][1] = "MP1",
10202f23f5fSAlex Deucher 	[1][1] = "MP0",
10302f23f5fSAlex Deucher 	[2][1] = "VCN",
10402f23f5fSAlex Deucher 	[3][1] = "VCNU",
10502f23f5fSAlex Deucher 	[4][1] = "HDP",
10602f23f5fSAlex Deucher 	[5][1] = "XDP",
10702f23f5fSAlex Deucher 	[6][1] = "DBGU0",
10802f23f5fSAlex Deucher 	[7][1] = "DCE",
10902f23f5fSAlex Deucher 	[8][1] = "DCEDWB0",
11002f23f5fSAlex Deucher 	[9][1] = "DCEDWB1",
11102f23f5fSAlex Deucher 	[26][1] = "OSS",
11202f23f5fSAlex Deucher 	[27][1] = "SDMA0",
11302f23f5fSAlex Deucher };
11402f23f5fSAlex Deucher 
11502f23f5fSAlex Deucher static const char *mmhub_client_ids_renoir[][2] = {
11602f23f5fSAlex Deucher 	[0][0] = "MP1",
11702f23f5fSAlex Deucher 	[1][0] = "MP0",
11802f23f5fSAlex Deucher 	[2][0] = "HDP",
11902f23f5fSAlex Deucher 	[4][0] = "DCEDMC",
12002f23f5fSAlex Deucher 	[5][0] = "DCEVGA",
12102f23f5fSAlex Deucher 	[13][0] = "UTCL2",
12202f23f5fSAlex Deucher 	[19][0] = "TLS",
12302f23f5fSAlex Deucher 	[26][0] = "OSS",
12402f23f5fSAlex Deucher 	[27][0] = "SDMA0",
12502f23f5fSAlex Deucher 	[28][0] = "VCN",
12602f23f5fSAlex Deucher 	[29][0] = "VCNU",
12702f23f5fSAlex Deucher 	[30][0] = "JPEG",
12802f23f5fSAlex Deucher 	[0][1] = "MP1",
12902f23f5fSAlex Deucher 	[1][1] = "MP0",
13002f23f5fSAlex Deucher 	[2][1] = "HDP",
13102f23f5fSAlex Deucher 	[3][1] = "XDP",
13202f23f5fSAlex Deucher 	[6][1] = "DBGU0",
13302f23f5fSAlex Deucher 	[7][1] = "DCEDMC",
13402f23f5fSAlex Deucher 	[8][1] = "DCEVGA",
13502f23f5fSAlex Deucher 	[9][1] = "DCEDWB",
13602f23f5fSAlex Deucher 	[26][1] = "OSS",
13702f23f5fSAlex Deucher 	[27][1] = "SDMA0",
13802f23f5fSAlex Deucher 	[28][1] = "VCN",
13902f23f5fSAlex Deucher 	[29][1] = "VCNU",
14002f23f5fSAlex Deucher 	[30][1] = "JPEG",
14102f23f5fSAlex Deucher };
14202f23f5fSAlex Deucher 
14302f23f5fSAlex Deucher static const char *mmhub_client_ids_vega10[][2] = {
14402f23f5fSAlex Deucher 	[0][0] = "MP0",
14502f23f5fSAlex Deucher 	[1][0] = "UVD",
14602f23f5fSAlex Deucher 	[2][0] = "UVDU",
14702f23f5fSAlex Deucher 	[3][0] = "HDP",
14802f23f5fSAlex Deucher 	[13][0] = "UTCL2",
14902f23f5fSAlex Deucher 	[14][0] = "OSS",
15002f23f5fSAlex Deucher 	[15][0] = "SDMA1",
15102f23f5fSAlex Deucher 	[32+0][0] = "VCE0",
15202f23f5fSAlex Deucher 	[32+1][0] = "VCE0U",
15302f23f5fSAlex Deucher 	[32+2][0] = "XDMA",
15402f23f5fSAlex Deucher 	[32+3][0] = "DCE",
15502f23f5fSAlex Deucher 	[32+4][0] = "MP1",
15602f23f5fSAlex Deucher 	[32+14][0] = "SDMA0",
15702f23f5fSAlex Deucher 	[0][1] = "MP0",
15802f23f5fSAlex Deucher 	[1][1] = "UVD",
15902f23f5fSAlex Deucher 	[2][1] = "UVDU",
16002f23f5fSAlex Deucher 	[3][1] = "DBGU0",
16102f23f5fSAlex Deucher 	[4][1] = "HDP",
16202f23f5fSAlex Deucher 	[5][1] = "XDP",
16302f23f5fSAlex Deucher 	[14][1] = "OSS",
16402f23f5fSAlex Deucher 	[15][1] = "SDMA0",
16502f23f5fSAlex Deucher 	[32+0][1] = "VCE0",
16602f23f5fSAlex Deucher 	[32+1][1] = "VCE0U",
16702f23f5fSAlex Deucher 	[32+2][1] = "XDMA",
16802f23f5fSAlex Deucher 	[32+3][1] = "DCE",
16902f23f5fSAlex Deucher 	[32+4][1] = "DCEDWB",
17002f23f5fSAlex Deucher 	[32+5][1] = "MP1",
17102f23f5fSAlex Deucher 	[32+6][1] = "DBGU1",
17202f23f5fSAlex Deucher 	[32+14][1] = "SDMA1",
17302f23f5fSAlex Deucher };
17402f23f5fSAlex Deucher 
17502f23f5fSAlex Deucher static const char *mmhub_client_ids_vega12[][2] = {
17602f23f5fSAlex Deucher 	[0][0] = "MP0",
17702f23f5fSAlex Deucher 	[1][0] = "VCE0",
17802f23f5fSAlex Deucher 	[2][0] = "VCE0U",
17902f23f5fSAlex Deucher 	[3][0] = "HDP",
18002f23f5fSAlex Deucher 	[13][0] = "UTCL2",
18102f23f5fSAlex Deucher 	[14][0] = "OSS",
18202f23f5fSAlex Deucher 	[15][0] = "SDMA1",
18302f23f5fSAlex Deucher 	[32+0][0] = "DCE",
18402f23f5fSAlex Deucher 	[32+1][0] = "XDMA",
18502f23f5fSAlex Deucher 	[32+2][0] = "UVD",
18602f23f5fSAlex Deucher 	[32+3][0] = "UVDU",
18702f23f5fSAlex Deucher 	[32+4][0] = "MP1",
18802f23f5fSAlex Deucher 	[32+15][0] = "SDMA0",
18902f23f5fSAlex Deucher 	[0][1] = "MP0",
19002f23f5fSAlex Deucher 	[1][1] = "VCE0",
19102f23f5fSAlex Deucher 	[2][1] = "VCE0U",
19202f23f5fSAlex Deucher 	[3][1] = "DBGU0",
19302f23f5fSAlex Deucher 	[4][1] = "HDP",
19402f23f5fSAlex Deucher 	[5][1] = "XDP",
19502f23f5fSAlex Deucher 	[14][1] = "OSS",
19602f23f5fSAlex Deucher 	[15][1] = "SDMA0",
19702f23f5fSAlex Deucher 	[32+0][1] = "DCE",
19802f23f5fSAlex Deucher 	[32+1][1] = "DCEDWB",
19902f23f5fSAlex Deucher 	[32+2][1] = "XDMA",
20002f23f5fSAlex Deucher 	[32+3][1] = "UVD",
20102f23f5fSAlex Deucher 	[32+4][1] = "UVDU",
20202f23f5fSAlex Deucher 	[32+5][1] = "MP1",
20302f23f5fSAlex Deucher 	[32+6][1] = "DBGU1",
20402f23f5fSAlex Deucher 	[32+15][1] = "SDMA1",
20502f23f5fSAlex Deucher };
20602f23f5fSAlex Deucher 
20702f23f5fSAlex Deucher static const char *mmhub_client_ids_vega20[][2] = {
20802f23f5fSAlex Deucher 	[0][0] = "XDMA",
20902f23f5fSAlex Deucher 	[1][0] = "DCE",
21002f23f5fSAlex Deucher 	[2][0] = "VCE0",
21102f23f5fSAlex Deucher 	[3][0] = "VCE0U",
21202f23f5fSAlex Deucher 	[4][0] = "UVD",
21302f23f5fSAlex Deucher 	[5][0] = "UVD1U",
21402f23f5fSAlex Deucher 	[13][0] = "OSS",
21502f23f5fSAlex Deucher 	[14][0] = "HDP",
21602f23f5fSAlex Deucher 	[15][0] = "SDMA0",
21702f23f5fSAlex Deucher 	[32+0][0] = "UVD",
21802f23f5fSAlex Deucher 	[32+1][0] = "UVDU",
21902f23f5fSAlex Deucher 	[32+2][0] = "MP1",
22002f23f5fSAlex Deucher 	[32+3][0] = "MP0",
22102f23f5fSAlex Deucher 	[32+12][0] = "UTCL2",
22202f23f5fSAlex Deucher 	[32+14][0] = "SDMA1",
22302f23f5fSAlex Deucher 	[0][1] = "XDMA",
22402f23f5fSAlex Deucher 	[1][1] = "DCE",
22502f23f5fSAlex Deucher 	[2][1] = "DCEDWB",
22602f23f5fSAlex Deucher 	[3][1] = "VCE0",
22702f23f5fSAlex Deucher 	[4][1] = "VCE0U",
22802f23f5fSAlex Deucher 	[5][1] = "UVD1",
22902f23f5fSAlex Deucher 	[6][1] = "UVD1U",
23002f23f5fSAlex Deucher 	[7][1] = "DBGU0",
23102f23f5fSAlex Deucher 	[8][1] = "XDP",
23202f23f5fSAlex Deucher 	[13][1] = "OSS",
23302f23f5fSAlex Deucher 	[14][1] = "HDP",
23402f23f5fSAlex Deucher 	[15][1] = "SDMA0",
23502f23f5fSAlex Deucher 	[32+0][1] = "UVD",
23602f23f5fSAlex Deucher 	[32+1][1] = "UVDU",
23702f23f5fSAlex Deucher 	[32+2][1] = "DBGU1",
23802f23f5fSAlex Deucher 	[32+3][1] = "MP1",
23902f23f5fSAlex Deucher 	[32+4][1] = "MP0",
24002f23f5fSAlex Deucher 	[32+14][1] = "SDMA1",
24102f23f5fSAlex Deucher };
24202f23f5fSAlex Deucher 
24302f23f5fSAlex Deucher static const char *mmhub_client_ids_arcturus[][2] = {
24402f23f5fSAlex Deucher 	[2][0] = "MP1",
24502f23f5fSAlex Deucher 	[3][0] = "MP0",
24602f23f5fSAlex Deucher 	[10][0] = "UTCL2",
24702f23f5fSAlex Deucher 	[13][0] = "OSS",
24802f23f5fSAlex Deucher 	[14][0] = "HDP",
24902f23f5fSAlex Deucher 	[15][0] = "SDMA0",
25002f23f5fSAlex Deucher 	[32+15][0] = "SDMA1",
25102f23f5fSAlex Deucher 	[64+15][0] = "SDMA2",
25202f23f5fSAlex Deucher 	[96+15][0] = "SDMA3",
25302f23f5fSAlex Deucher 	[128+15][0] = "SDMA4",
25402f23f5fSAlex Deucher 	[160+11][0] = "JPEG",
25502f23f5fSAlex Deucher 	[160+12][0] = "VCN",
25602f23f5fSAlex Deucher 	[160+13][0] = "VCNU",
25702f23f5fSAlex Deucher 	[160+15][0] = "SDMA5",
25802f23f5fSAlex Deucher 	[192+10][0] = "UTCL2",
25902f23f5fSAlex Deucher 	[192+11][0] = "JPEG1",
26002f23f5fSAlex Deucher 	[192+12][0] = "VCN1",
26102f23f5fSAlex Deucher 	[192+13][0] = "VCN1U",
26202f23f5fSAlex Deucher 	[192+15][0] = "SDMA6",
26302f23f5fSAlex Deucher 	[224+15][0] = "SDMA7",
26402f23f5fSAlex Deucher 	[0][1] = "DBGU1",
26502f23f5fSAlex Deucher 	[1][1] = "XDP",
26602f23f5fSAlex Deucher 	[2][1] = "MP1",
26702f23f5fSAlex Deucher 	[3][1] = "MP0",
26802f23f5fSAlex Deucher 	[13][1] = "OSS",
26902f23f5fSAlex Deucher 	[14][1] = "HDP",
27002f23f5fSAlex Deucher 	[15][1] = "SDMA0",
27102f23f5fSAlex Deucher 	[32+15][1] = "SDMA1",
27202f23f5fSAlex Deucher 	[64+15][1] = "SDMA2",
27302f23f5fSAlex Deucher 	[96+15][1] = "SDMA3",
27402f23f5fSAlex Deucher 	[128+15][1] = "SDMA4",
27502f23f5fSAlex Deucher 	[160+11][1] = "JPEG",
27602f23f5fSAlex Deucher 	[160+12][1] = "VCN",
27702f23f5fSAlex Deucher 	[160+13][1] = "VCNU",
27802f23f5fSAlex Deucher 	[160+15][1] = "SDMA5",
27902f23f5fSAlex Deucher 	[192+11][1] = "JPEG1",
28002f23f5fSAlex Deucher 	[192+12][1] = "VCN1",
28102f23f5fSAlex Deucher 	[192+13][1] = "VCN1U",
28202f23f5fSAlex Deucher 	[192+15][1] = "SDMA6",
28302f23f5fSAlex Deucher 	[224+15][1] = "SDMA7",
28402f23f5fSAlex Deucher };
285e60f8db5SAlex Xie 
286e60f8db5SAlex Xie static const u32 golden_settings_vega10_hdp[] =
287e60f8db5SAlex Xie {
288e60f8db5SAlex Xie 	0xf64, 0x0fffffff, 0x00000000,
289e60f8db5SAlex Xie 	0xf65, 0x0fffffff, 0x00000000,
290e60f8db5SAlex Xie 	0xf66, 0x0fffffff, 0x00000000,
291e60f8db5SAlex Xie 	0xf67, 0x0fffffff, 0x00000000,
292e60f8db5SAlex Xie 	0xf68, 0x0fffffff, 0x00000000,
293e60f8db5SAlex Xie 	0xf6a, 0x0fffffff, 0x00000000,
294e60f8db5SAlex Xie 	0xf6b, 0x0fffffff, 0x00000000,
295e60f8db5SAlex Xie 	0xf6c, 0x0fffffff, 0x00000000,
296e60f8db5SAlex Xie 	0xf6d, 0x0fffffff, 0x00000000,
297e60f8db5SAlex Xie 	0xf6e, 0x0fffffff, 0x00000000,
298e60f8db5SAlex Xie };
299e60f8db5SAlex Xie 
300946a4d5bSShaoyun Liu static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
3015c583018SEvan Quan {
302946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
303946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0, 0x00000030, 0x55555565)
3045c583018SEvan Quan };
3055c583018SEvan Quan 
306946a4d5bSShaoyun Liu static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
3075c583018SEvan Quan {
308946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL, 0x0000ff00, 0x00000800),
309946a4d5bSShaoyun Liu 	SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
3105c583018SEvan Quan };
3115c583018SEvan Quan 
312791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
313791c4769Sxinhui pan 	(0x000143c0 + 0x00000000),
314791c4769Sxinhui pan 	(0x000143c0 + 0x00000800),
315791c4769Sxinhui pan 	(0x000143c0 + 0x00001000),
316791c4769Sxinhui pan 	(0x000143c0 + 0x00001800),
317791c4769Sxinhui pan 	(0x000543c0 + 0x00000000),
318791c4769Sxinhui pan 	(0x000543c0 + 0x00000800),
319791c4769Sxinhui pan 	(0x000543c0 + 0x00001000),
320791c4769Sxinhui pan 	(0x000543c0 + 0x00001800),
321791c4769Sxinhui pan 	(0x000943c0 + 0x00000000),
322791c4769Sxinhui pan 	(0x000943c0 + 0x00000800),
323791c4769Sxinhui pan 	(0x000943c0 + 0x00001000),
324791c4769Sxinhui pan 	(0x000943c0 + 0x00001800),
325791c4769Sxinhui pan 	(0x000d43c0 + 0x00000000),
326791c4769Sxinhui pan 	(0x000d43c0 + 0x00000800),
327791c4769Sxinhui pan 	(0x000d43c0 + 0x00001000),
328791c4769Sxinhui pan 	(0x000d43c0 + 0x00001800),
329791c4769Sxinhui pan 	(0x001143c0 + 0x00000000),
330791c4769Sxinhui pan 	(0x001143c0 + 0x00000800),
331791c4769Sxinhui pan 	(0x001143c0 + 0x00001000),
332791c4769Sxinhui pan 	(0x001143c0 + 0x00001800),
333791c4769Sxinhui pan 	(0x001543c0 + 0x00000000),
334791c4769Sxinhui pan 	(0x001543c0 + 0x00000800),
335791c4769Sxinhui pan 	(0x001543c0 + 0x00001000),
336791c4769Sxinhui pan 	(0x001543c0 + 0x00001800),
337791c4769Sxinhui pan 	(0x001943c0 + 0x00000000),
338791c4769Sxinhui pan 	(0x001943c0 + 0x00000800),
339791c4769Sxinhui pan 	(0x001943c0 + 0x00001000),
340791c4769Sxinhui pan 	(0x001943c0 + 0x00001800),
341791c4769Sxinhui pan 	(0x001d43c0 + 0x00000000),
342791c4769Sxinhui pan 	(0x001d43c0 + 0x00000800),
343791c4769Sxinhui pan 	(0x001d43c0 + 0x00001000),
344791c4769Sxinhui pan 	(0x001d43c0 + 0x00001800),
34502bab923SDavid Panariti };
34602bab923SDavid Panariti 
347791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
348791c4769Sxinhui pan 	(0x000143e0 + 0x00000000),
349791c4769Sxinhui pan 	(0x000143e0 + 0x00000800),
350791c4769Sxinhui pan 	(0x000143e0 + 0x00001000),
351791c4769Sxinhui pan 	(0x000143e0 + 0x00001800),
352791c4769Sxinhui pan 	(0x000543e0 + 0x00000000),
353791c4769Sxinhui pan 	(0x000543e0 + 0x00000800),
354791c4769Sxinhui pan 	(0x000543e0 + 0x00001000),
355791c4769Sxinhui pan 	(0x000543e0 + 0x00001800),
356791c4769Sxinhui pan 	(0x000943e0 + 0x00000000),
357791c4769Sxinhui pan 	(0x000943e0 + 0x00000800),
358791c4769Sxinhui pan 	(0x000943e0 + 0x00001000),
359791c4769Sxinhui pan 	(0x000943e0 + 0x00001800),
360791c4769Sxinhui pan 	(0x000d43e0 + 0x00000000),
361791c4769Sxinhui pan 	(0x000d43e0 + 0x00000800),
362791c4769Sxinhui pan 	(0x000d43e0 + 0x00001000),
363791c4769Sxinhui pan 	(0x000d43e0 + 0x00001800),
364791c4769Sxinhui pan 	(0x001143e0 + 0x00000000),
365791c4769Sxinhui pan 	(0x001143e0 + 0x00000800),
366791c4769Sxinhui pan 	(0x001143e0 + 0x00001000),
367791c4769Sxinhui pan 	(0x001143e0 + 0x00001800),
368791c4769Sxinhui pan 	(0x001543e0 + 0x00000000),
369791c4769Sxinhui pan 	(0x001543e0 + 0x00000800),
370791c4769Sxinhui pan 	(0x001543e0 + 0x00001000),
371791c4769Sxinhui pan 	(0x001543e0 + 0x00001800),
372791c4769Sxinhui pan 	(0x001943e0 + 0x00000000),
373791c4769Sxinhui pan 	(0x001943e0 + 0x00000800),
374791c4769Sxinhui pan 	(0x001943e0 + 0x00001000),
375791c4769Sxinhui pan 	(0x001943e0 + 0x00001800),
376791c4769Sxinhui pan 	(0x001d43e0 + 0x00000000),
377791c4769Sxinhui pan 	(0x001d43e0 + 0x00000800),
378791c4769Sxinhui pan 	(0x001d43e0 + 0x00001000),
379791c4769Sxinhui pan 	(0x001d43e0 + 0x00001800),
38002bab923SDavid Panariti };
38102bab923SDavid Panariti 
382791c4769Sxinhui pan static const uint32_t ecc_umc_mcumc_status_addrs[] = {
383791c4769Sxinhui pan 	(0x000143c2 + 0x00000000),
384791c4769Sxinhui pan 	(0x000143c2 + 0x00000800),
385791c4769Sxinhui pan 	(0x000143c2 + 0x00001000),
386791c4769Sxinhui pan 	(0x000143c2 + 0x00001800),
387791c4769Sxinhui pan 	(0x000543c2 + 0x00000000),
388791c4769Sxinhui pan 	(0x000543c2 + 0x00000800),
389791c4769Sxinhui pan 	(0x000543c2 + 0x00001000),
390791c4769Sxinhui pan 	(0x000543c2 + 0x00001800),
391791c4769Sxinhui pan 	(0x000943c2 + 0x00000000),
392791c4769Sxinhui pan 	(0x000943c2 + 0x00000800),
393791c4769Sxinhui pan 	(0x000943c2 + 0x00001000),
394791c4769Sxinhui pan 	(0x000943c2 + 0x00001800),
395791c4769Sxinhui pan 	(0x000d43c2 + 0x00000000),
396791c4769Sxinhui pan 	(0x000d43c2 + 0x00000800),
397791c4769Sxinhui pan 	(0x000d43c2 + 0x00001000),
398791c4769Sxinhui pan 	(0x000d43c2 + 0x00001800),
399791c4769Sxinhui pan 	(0x001143c2 + 0x00000000),
400791c4769Sxinhui pan 	(0x001143c2 + 0x00000800),
401791c4769Sxinhui pan 	(0x001143c2 + 0x00001000),
402791c4769Sxinhui pan 	(0x001143c2 + 0x00001800),
403791c4769Sxinhui pan 	(0x001543c2 + 0x00000000),
404791c4769Sxinhui pan 	(0x001543c2 + 0x00000800),
405791c4769Sxinhui pan 	(0x001543c2 + 0x00001000),
406791c4769Sxinhui pan 	(0x001543c2 + 0x00001800),
407791c4769Sxinhui pan 	(0x001943c2 + 0x00000000),
408791c4769Sxinhui pan 	(0x001943c2 + 0x00000800),
409791c4769Sxinhui pan 	(0x001943c2 + 0x00001000),
410791c4769Sxinhui pan 	(0x001943c2 + 0x00001800),
411791c4769Sxinhui pan 	(0x001d43c2 + 0x00000000),
412791c4769Sxinhui pan 	(0x001d43c2 + 0x00000800),
413791c4769Sxinhui pan 	(0x001d43c2 + 0x00001000),
414791c4769Sxinhui pan 	(0x001d43c2 + 0x00001800),
41502bab923SDavid Panariti };
41602bab923SDavid Panariti 
417791c4769Sxinhui pan static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
418791c4769Sxinhui pan 		struct amdgpu_irq_src *src,
419791c4769Sxinhui pan 		unsigned type,
420791c4769Sxinhui pan 		enum amdgpu_interrupt_state state)
421791c4769Sxinhui pan {
422791c4769Sxinhui pan 	u32 bits, i, tmp, reg;
423791c4769Sxinhui pan 
4241e2c6d55SJohn Clements 	/* Devices newer then VEGA10/12 shall have these programming
4251e2c6d55SJohn Clements 	     sequences performed by PSP BL */
4261e2c6d55SJohn Clements 	if (adev->asic_type >= CHIP_VEGA20)
4271e2c6d55SJohn Clements 		return 0;
4281e2c6d55SJohn Clements 
429791c4769Sxinhui pan 	bits = 0x7f;
430791c4769Sxinhui pan 
431791c4769Sxinhui pan 	switch (state) {
432791c4769Sxinhui pan 	case AMDGPU_IRQ_STATE_DISABLE:
433791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
434791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_addrs[i];
435791c4769Sxinhui pan 			tmp = RREG32(reg);
436791c4769Sxinhui pan 			tmp &= ~bits;
437791c4769Sxinhui pan 			WREG32(reg, tmp);
438791c4769Sxinhui pan 		}
439791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
440791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
441791c4769Sxinhui pan 			tmp = RREG32(reg);
442791c4769Sxinhui pan 			tmp &= ~bits;
443791c4769Sxinhui pan 			WREG32(reg, tmp);
444791c4769Sxinhui pan 		}
445791c4769Sxinhui pan 		break;
446791c4769Sxinhui pan 	case AMDGPU_IRQ_STATE_ENABLE:
447791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
448791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_addrs[i];
449791c4769Sxinhui pan 			tmp = RREG32(reg);
450791c4769Sxinhui pan 			tmp |= bits;
451791c4769Sxinhui pan 			WREG32(reg, tmp);
452791c4769Sxinhui pan 		}
453791c4769Sxinhui pan 		for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
454791c4769Sxinhui pan 			reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
455791c4769Sxinhui pan 			tmp = RREG32(reg);
456791c4769Sxinhui pan 			tmp |= bits;
457791c4769Sxinhui pan 			WREG32(reg, tmp);
458791c4769Sxinhui pan 		}
459791c4769Sxinhui pan 		break;
460791c4769Sxinhui pan 	default:
461791c4769Sxinhui pan 		break;
462791c4769Sxinhui pan 	}
463791c4769Sxinhui pan 
464791c4769Sxinhui pan 	return 0;
465791c4769Sxinhui pan }
466791c4769Sxinhui pan 
467e60f8db5SAlex Xie static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
468e60f8db5SAlex Xie 					struct amdgpu_irq_src *src,
469e60f8db5SAlex Xie 					unsigned type,
470e60f8db5SAlex Xie 					enum amdgpu_interrupt_state state)
471e60f8db5SAlex Xie {
472e60f8db5SAlex Xie 	struct amdgpu_vmhub *hub;
473ae6d1416STom St Denis 	u32 tmp, reg, bits, i, j;
474e60f8db5SAlex Xie 
47511250164SChristian König 	bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
47611250164SChristian König 		VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
47711250164SChristian König 		VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
47811250164SChristian König 		VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
47911250164SChristian König 		VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
48011250164SChristian König 		VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
48111250164SChristian König 		VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
48211250164SChristian König 
483e60f8db5SAlex Xie 	switch (state) {
484e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_DISABLE:
4851daa2bfaSLe Ma 		for (j = 0; j < adev->num_vmhubs; j++) {
486ae6d1416STom St Denis 			hub = &adev->vmhub[j];
487e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
488e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
489e60f8db5SAlex Xie 				tmp = RREG32(reg);
490e60f8db5SAlex Xie 				tmp &= ~bits;
491e60f8db5SAlex Xie 				WREG32(reg, tmp);
492e60f8db5SAlex Xie 			}
493e60f8db5SAlex Xie 		}
494e60f8db5SAlex Xie 		break;
495e60f8db5SAlex Xie 	case AMDGPU_IRQ_STATE_ENABLE:
4961daa2bfaSLe Ma 		for (j = 0; j < adev->num_vmhubs; j++) {
497ae6d1416STom St Denis 			hub = &adev->vmhub[j];
498e60f8db5SAlex Xie 			for (i = 0; i < 16; i++) {
499e60f8db5SAlex Xie 				reg = hub->vm_context0_cntl + i;
500e60f8db5SAlex Xie 				tmp = RREG32(reg);
501e60f8db5SAlex Xie 				tmp |= bits;
502e60f8db5SAlex Xie 				WREG32(reg, tmp);
503e60f8db5SAlex Xie 			}
504e60f8db5SAlex Xie 		}
505e60f8db5SAlex Xie 	default:
506e60f8db5SAlex Xie 		break;
507e60f8db5SAlex Xie 	}
508e60f8db5SAlex Xie 
509e60f8db5SAlex Xie 	return 0;
510e60f8db5SAlex Xie }
511e60f8db5SAlex Xie 
512e60f8db5SAlex Xie static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
513e60f8db5SAlex Xie 				struct amdgpu_irq_src *source,
514e60f8db5SAlex Xie 				struct amdgpu_iv_entry *entry)
515e60f8db5SAlex Xie {
51651c60898SLe Ma 	struct amdgpu_vmhub *hub;
517c468f9e2SChristian König 	bool retry_fault = !!(entry->src_data[1] & 0x80);
51802f23f5fSAlex Deucher 	uint32_t status = 0, cid = 0, rw = 0;
519e60f8db5SAlex Xie 	u64 addr;
52051c60898SLe Ma 	char hub_name[10];
52102f23f5fSAlex Deucher 	const char *mmhub_cid;
522e60f8db5SAlex Xie 
523e60f8db5SAlex Xie 	addr = (u64)entry->src_data[0] << 12;
524e60f8db5SAlex Xie 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
525e60f8db5SAlex Xie 
526c1a8abd9SChristian König 	if (retry_fault && amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
527c1a8abd9SChristian König 						    entry->timestamp))
52822666cc1SChristian König 		return 1; /* This also prevents sending it to KFD */
52922666cc1SChristian König 
53051c60898SLe Ma 	if (entry->client_id == SOC15_IH_CLIENTID_VMC) {
53151c60898SLe Ma 		snprintf(hub_name, sizeof(hub_name), "mmhub0");
53251c60898SLe Ma 		hub = &adev->vmhub[AMDGPU_MMHUB_0];
53351c60898SLe Ma 	} else if (entry->client_id == SOC15_IH_CLIENTID_VMC1) {
53451c60898SLe Ma 		snprintf(hub_name, sizeof(hub_name), "mmhub1");
53551c60898SLe Ma 		hub = &adev->vmhub[AMDGPU_MMHUB_1];
53651c60898SLe Ma 	} else {
53751c60898SLe Ma 		snprintf(hub_name, sizeof(hub_name), "gfxhub0");
53851c60898SLe Ma 		hub = &adev->vmhub[AMDGPU_GFXHUB_0];
53951c60898SLe Ma 	}
54051c60898SLe Ma 
541c1a8abd9SChristian König 	/* If it's the first fault for this address, process it normally */
542ec671737SChristian König 	if (retry_fault && !in_interrupt() &&
543ec671737SChristian König 	    amdgpu_vm_handle_fault(adev, entry->pasid, addr))
544ec671737SChristian König 		return 1; /* This also prevents sending it to KFD */
545ec671737SChristian König 
54679a0c465SMonk Liu 	if (!amdgpu_sriov_vf(adev)) {
54753499173SXiaojie Yuan 		/*
54853499173SXiaojie Yuan 		 * Issue a dummy read to wait for the status register to
54953499173SXiaojie Yuan 		 * be updated to avoid reading an incorrect value due to
55053499173SXiaojie Yuan 		 * the new fast GRBM interface.
55153499173SXiaojie Yuan 		 */
55253499173SXiaojie Yuan 		if (entry->vmid_src == AMDGPU_GFXHUB_0)
55353499173SXiaojie Yuan 			RREG32(hub->vm_l2_pro_fault_status);
55453499173SXiaojie Yuan 
5555a9b8e8aSChristian König 		status = RREG32(hub->vm_l2_pro_fault_status);
556be99ecbfSAlex Deucher 		cid = REG_GET_FIELD(status,
557be99ecbfSAlex Deucher 				    VM_L2_PROTECTION_FAULT_STATUS, CID);
55802f23f5fSAlex Deucher 		rw = REG_GET_FIELD(status,
55902f23f5fSAlex Deucher 				   VM_L2_PROTECTION_FAULT_STATUS, RW);
5605a9b8e8aSChristian König 		WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
5614d6cbde3SFelix Kuehling 	}
562e60f8db5SAlex Xie 
5634d6cbde3SFelix Kuehling 	if (printk_ratelimit()) {
56405794effSShirish S 		struct amdgpu_task_info task_info;
565efaa9646SAndrey Grodzovsky 
56605794effSShirish S 		memset(&task_info, 0, sizeof(struct amdgpu_task_info));
567efaa9646SAndrey Grodzovsky 		amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
568efaa9646SAndrey Grodzovsky 
5694d6cbde3SFelix Kuehling 		dev_err(adev->dev,
570c468f9e2SChristian König 			"[%s] %s page fault (src_id:%u ring:%u vmid:%u "
571c468f9e2SChristian König 			"pasid:%u, for process %s pid %d thread %s pid %d)\n",
57251c60898SLe Ma 			hub_name, retry_fault ? "retry" : "no-retry",
573c4f46f22SChristian König 			entry->src_id, entry->ring_id, entry->vmid,
574efaa9646SAndrey Grodzovsky 			entry->pasid, task_info.process_name, task_info.tgid,
575efaa9646SAndrey Grodzovsky 			task_info.task_name, task_info.pid);
5765ddd4a9aSYong Zhao 		dev_err(adev->dev, "  in page starting at address 0x%016llx from client %d\n",
57779a0c465SMonk Liu 			addr, entry->client_id);
5785ddd4a9aSYong Zhao 		if (!amdgpu_sriov_vf(adev)) {
5794d6cbde3SFelix Kuehling 			dev_err(adev->dev,
5804d6cbde3SFelix Kuehling 				"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
5814d6cbde3SFelix Kuehling 				status);
58202f23f5fSAlex Deucher 			if (hub == &adev->vmhub[AMDGPU_GFXHUB_0]) {
583be99ecbfSAlex Deucher 				dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
584be99ecbfSAlex Deucher 					cid >= ARRAY_SIZE(gfxhub_client_ids) ? "unknown" : gfxhub_client_ids[cid],
585be99ecbfSAlex Deucher 					cid);
58602f23f5fSAlex Deucher 			} else {
58702f23f5fSAlex Deucher 				switch (adev->asic_type) {
58802f23f5fSAlex Deucher 				case CHIP_VEGA10:
58902f23f5fSAlex Deucher 					mmhub_cid = mmhub_client_ids_vega10[cid][rw];
59002f23f5fSAlex Deucher 					break;
59102f23f5fSAlex Deucher 				case CHIP_VEGA12:
59202f23f5fSAlex Deucher 					mmhub_cid = mmhub_client_ids_vega12[cid][rw];
59302f23f5fSAlex Deucher 					break;
59402f23f5fSAlex Deucher 				case CHIP_VEGA20:
59502f23f5fSAlex Deucher 					mmhub_cid = mmhub_client_ids_vega20[cid][rw];
59602f23f5fSAlex Deucher 					break;
59702f23f5fSAlex Deucher 				case CHIP_ARCTURUS:
59802f23f5fSAlex Deucher 					mmhub_cid = mmhub_client_ids_arcturus[cid][rw];
59902f23f5fSAlex Deucher 					break;
60002f23f5fSAlex Deucher 				case CHIP_RAVEN:
60102f23f5fSAlex Deucher 					mmhub_cid = mmhub_client_ids_raven[cid][rw];
60202f23f5fSAlex Deucher 					break;
60302f23f5fSAlex Deucher 				case CHIP_RENOIR:
60402f23f5fSAlex Deucher 					mmhub_cid = mmhub_client_ids_renoir[cid][rw];
60502f23f5fSAlex Deucher 					break;
60602f23f5fSAlex Deucher 				default:
60702f23f5fSAlex Deucher 					mmhub_cid = NULL;
60802f23f5fSAlex Deucher 					break;
60902f23f5fSAlex Deucher 				}
61002f23f5fSAlex Deucher 				dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
61102f23f5fSAlex Deucher 					mmhub_cid ? mmhub_cid : "unknown", cid);
61202f23f5fSAlex Deucher 			}
6135ddd4a9aSYong Zhao 			dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
6145ddd4a9aSYong Zhao 				REG_GET_FIELD(status,
6155ddd4a9aSYong Zhao 				VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
6165ddd4a9aSYong Zhao 			dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
6175ddd4a9aSYong Zhao 				REG_GET_FIELD(status,
6185ddd4a9aSYong Zhao 				VM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
6195ddd4a9aSYong Zhao 			dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
6205ddd4a9aSYong Zhao 				REG_GET_FIELD(status,
6215ddd4a9aSYong Zhao 				VM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
6225ddd4a9aSYong Zhao 			dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
6235ddd4a9aSYong Zhao 				REG_GET_FIELD(status,
6245ddd4a9aSYong Zhao 				VM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
62502f23f5fSAlex Deucher 			dev_err(adev->dev, "\t RW: 0x%x\n", rw);
6265ddd4a9aSYong Zhao 		}
62779a0c465SMonk Liu 	}
628e60f8db5SAlex Xie 
629e60f8db5SAlex Xie 	return 0;
630e60f8db5SAlex Xie }
631e60f8db5SAlex Xie 
632e60f8db5SAlex Xie static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
633e60f8db5SAlex Xie 	.set = gmc_v9_0_vm_fault_interrupt_state,
634e60f8db5SAlex Xie 	.process = gmc_v9_0_process_interrupt,
635e60f8db5SAlex Xie };
636e60f8db5SAlex Xie 
637791c4769Sxinhui pan 
638791c4769Sxinhui pan static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
639791c4769Sxinhui pan 	.set = gmc_v9_0_ecc_interrupt_state,
64034cc4fd9STao Zhou 	.process = amdgpu_umc_process_ecc_irq,
641791c4769Sxinhui pan };
642791c4769Sxinhui pan 
643e60f8db5SAlex Xie static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
644e60f8db5SAlex Xie {
645770d13b1SChristian König 	adev->gmc.vm_fault.num_types = 1;
646770d13b1SChristian König 	adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
647791c4769Sxinhui pan 
6482ee9403eSZhigang Luo 	if (!amdgpu_sriov_vf(adev)) {
649791c4769Sxinhui pan 		adev->gmc.ecc_irq.num_types = 1;
650791c4769Sxinhui pan 		adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
651e60f8db5SAlex Xie 	}
6522ee9403eSZhigang Luo }
653e60f8db5SAlex Xie 
6542a79d868SYong Zhao static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
6552a79d868SYong Zhao 					uint32_t flush_type)
65603f89febSChristian König {
65703f89febSChristian König 	u32 req = 0;
65803f89febSChristian König 
65903f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
660c4f46f22SChristian König 			    PER_VMID_INVALIDATE_REQ, 1 << vmid);
6612a79d868SYong Zhao 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
66203f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
66303f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
66403f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
66503f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
66603f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
66703f89febSChristian König 	req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
66803f89febSChristian König 			    CLEAR_PROTECTION_FAULT_STATUS_ADDR,	0);
66903f89febSChristian König 
67003f89febSChristian König 	return req;
67103f89febSChristian König }
67203f89febSChristian König 
67390f6452cSchangzhu /**
67490f6452cSchangzhu  * gmc_v9_0_use_invalidate_semaphore - judge whether to use semaphore
67590f6452cSchangzhu  *
67690f6452cSchangzhu  * @adev: amdgpu_device pointer
67790f6452cSchangzhu  * @vmhub: vmhub type
67890f6452cSchangzhu  *
67990f6452cSchangzhu  */
68090f6452cSchangzhu static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
68190f6452cSchangzhu 				       uint32_t vmhub)
68290f6452cSchangzhu {
68390f6452cSchangzhu 	return ((vmhub == AMDGPU_MMHUB_0 ||
68490f6452cSchangzhu 		 vmhub == AMDGPU_MMHUB_1) &&
68590f6452cSchangzhu 		(!amdgpu_sriov_vf(adev)) &&
68654f78a76SAlex Deucher 		(!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
68754f78a76SAlex Deucher 		   (adev->apu_flags & AMD_APU_IS_PICASSO))));
68890f6452cSchangzhu }
68990f6452cSchangzhu 
690ea930000SAlex Sierra static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
691ea930000SAlex Sierra 					uint8_t vmid, uint16_t *p_pasid)
692ea930000SAlex Sierra {
693ea930000SAlex Sierra 	uint32_t value;
694ea930000SAlex Sierra 
695ea930000SAlex Sierra 	value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
696ea930000SAlex Sierra 		     + vmid);
697ea930000SAlex Sierra 	*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
698ea930000SAlex Sierra 
699ea930000SAlex Sierra 	return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
700ea930000SAlex Sierra }
701ea930000SAlex Sierra 
702e60f8db5SAlex Xie /*
703e60f8db5SAlex Xie  * GART
704e60f8db5SAlex Xie  * VMID 0 is the physical GPU addresses as used by the kernel.
705e60f8db5SAlex Xie  * VMIDs 1-15 are used for userspace clients and are handled
706e60f8db5SAlex Xie  * by the amdgpu vm/hsa code.
707e60f8db5SAlex Xie  */
708e60f8db5SAlex Xie 
709e60f8db5SAlex Xie /**
7102a79d868SYong Zhao  * gmc_v9_0_flush_gpu_tlb - tlb flush with certain type
711e60f8db5SAlex Xie  *
712e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
713e60f8db5SAlex Xie  * @vmid: vm instance to flush
7142a79d868SYong Zhao  * @flush_type: the flush type
715e60f8db5SAlex Xie  *
7162a79d868SYong Zhao  * Flush the TLB for the requested page table using certain type.
717e60f8db5SAlex Xie  */
7183ff98548SOak Zeng static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
7193ff98548SOak Zeng 					uint32_t vmhub, uint32_t flush_type)
720e60f8db5SAlex Xie {
72190f6452cSchangzhu 	bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
722e60f8db5SAlex Xie 	const unsigned eng = 17;
723b80cd524SFelix Kuehling 	u32 j, inv_req, inv_req2, tmp;
7243ff98548SOak Zeng 	struct amdgpu_vmhub *hub;
725e60f8db5SAlex Xie 
7263ff98548SOak Zeng 	BUG_ON(vmhub >= adev->num_vmhubs);
7273ff98548SOak Zeng 
7283ff98548SOak Zeng 	hub = &adev->vmhub[vmhub];
729b80cd524SFelix Kuehling 	if (adev->gmc.xgmi.num_physical_nodes &&
730b80cd524SFelix Kuehling 	    adev->asic_type == CHIP_VEGA20) {
731b80cd524SFelix Kuehling 		/* Vega20+XGMI caches PTEs in TC and TLB. Add a
732b80cd524SFelix Kuehling 		 * heavy-weight TLB flush (type 2), which flushes
733b80cd524SFelix Kuehling 		 * both. Due to a race condition with concurrent
734b80cd524SFelix Kuehling 		 * memory accesses using the same TLB cache line, we
735b80cd524SFelix Kuehling 		 * still need a second TLB flush after this.
736b80cd524SFelix Kuehling 		 */
737b80cd524SFelix Kuehling 		inv_req = gmc_v9_0_get_invalidate_req(vmid, 2);
738b80cd524SFelix Kuehling 		inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);
739b80cd524SFelix Kuehling 	} else {
74037c58ddfSFelix Kuehling 		inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
741b80cd524SFelix Kuehling 		inv_req2 = 0;
742b80cd524SFelix Kuehling 	}
743e60f8db5SAlex Xie 
74482d1a1b1SChengming Gui 	/* This is necessary for a HW workaround under SRIOV as well
74582d1a1b1SChengming Gui 	 * as GFXOFF under bare metal
74682d1a1b1SChengming Gui 	 */
74782d1a1b1SChengming Gui 	if (adev->gfx.kiq.ring.sched.ready &&
74882d1a1b1SChengming Gui 	    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
74981202807SDennis Li 	    down_read_trylock(&adev->reset_sem)) {
750148f597dSHuang Rui 		uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
751148f597dSHuang Rui 		uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
752af5fe1e9SChristian König 
75337c58ddfSFelix Kuehling 		amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
754af5fe1e9SChristian König 						   1 << vmid);
75581202807SDennis Li 		up_read(&adev->reset_sem);
7563ff98548SOak Zeng 		return;
757fc0faf04SEmily Deng 	}
7583890d111SEmily Deng 
7593890d111SEmily Deng 	spin_lock(&adev->gmc.invalidate_lock);
760f920d1bbSchangzhu 
761f920d1bbSchangzhu 	/*
762f920d1bbSchangzhu 	 * It may lose gpuvm invalidate acknowldege state across power-gating
763f920d1bbSchangzhu 	 * off cycle, add semaphore acquire before invalidation and semaphore
764f920d1bbSchangzhu 	 * release after invalidation to avoid entering power gated state
765f920d1bbSchangzhu 	 * to WA the Issue
766f920d1bbSchangzhu 	 */
767f920d1bbSchangzhu 
768f920d1bbSchangzhu 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
76990f6452cSchangzhu 	if (use_semaphore) {
770f920d1bbSchangzhu 		for (j = 0; j < adev->usec_timeout; j++) {
771f920d1bbSchangzhu 			/* a read return value of 1 means semaphore acuqire */
772148f597dSHuang Rui 			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem +
773148f597dSHuang Rui 					    hub->eng_distance * eng);
774f920d1bbSchangzhu 			if (tmp & 0x1)
775f920d1bbSchangzhu 				break;
776f920d1bbSchangzhu 			udelay(1);
777f920d1bbSchangzhu 		}
778f920d1bbSchangzhu 
779f920d1bbSchangzhu 		if (j >= adev->usec_timeout)
780f920d1bbSchangzhu 			DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
781f920d1bbSchangzhu 	}
782f920d1bbSchangzhu 
783b80cd524SFelix Kuehling 	do {
784148f597dSHuang Rui 		WREG32_NO_KIQ(hub->vm_inv_eng0_req +
785148f597dSHuang Rui 			      hub->eng_distance * eng, inv_req);
78653499173SXiaojie Yuan 
78753499173SXiaojie Yuan 		/*
788b80cd524SFelix Kuehling 		 * Issue a dummy read to wait for the ACK register to
789b80cd524SFelix Kuehling 		 * be cleared to avoid a false ACK due to the new fast
790b80cd524SFelix Kuehling 		 * GRBM interface.
79153499173SXiaojie Yuan 		 */
79253499173SXiaojie Yuan 		if (vmhub == AMDGPU_GFXHUB_0)
793148f597dSHuang Rui 			RREG32_NO_KIQ(hub->vm_inv_eng0_req +
794148f597dSHuang Rui 				      hub->eng_distance * eng);
79553499173SXiaojie Yuan 
796e60f8db5SAlex Xie 		for (j = 0; j < adev->usec_timeout; j++) {
797148f597dSHuang Rui 			tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack +
798148f597dSHuang Rui 					    hub->eng_distance * eng);
799396557b0SChristian König 			if (tmp & (1 << vmid))
800e60f8db5SAlex Xie 				break;
801e60f8db5SAlex Xie 			udelay(1);
802e60f8db5SAlex Xie 		}
803f920d1bbSchangzhu 
804b80cd524SFelix Kuehling 		inv_req = inv_req2;
805b80cd524SFelix Kuehling 		inv_req2 = 0;
806b80cd524SFelix Kuehling 	} while (inv_req);
807b80cd524SFelix Kuehling 
808f920d1bbSchangzhu 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
80990f6452cSchangzhu 	if (use_semaphore)
810f920d1bbSchangzhu 		/*
811f920d1bbSchangzhu 		 * add semaphore release after invalidation,
812f920d1bbSchangzhu 		 * write with 0 means semaphore release
813f920d1bbSchangzhu 		 */
814148f597dSHuang Rui 		WREG32_NO_KIQ(hub->vm_inv_eng0_sem +
815148f597dSHuang Rui 			      hub->eng_distance * eng, 0);
816f920d1bbSchangzhu 
8173890d111SEmily Deng 	spin_unlock(&adev->gmc.invalidate_lock);
818f920d1bbSchangzhu 
819396557b0SChristian König 	if (j < adev->usec_timeout)
8203ff98548SOak Zeng 		return;
821396557b0SChristian König 
822e60f8db5SAlex Xie 	DRM_ERROR("Timeout waiting for VM flush ACK!\n");
823e60f8db5SAlex Xie }
824e60f8db5SAlex Xie 
825ea930000SAlex Sierra /**
826ea930000SAlex Sierra  * gmc_v9_0_flush_gpu_tlb_pasid - tlb flush via pasid
827ea930000SAlex Sierra  *
828ea930000SAlex Sierra  * @adev: amdgpu_device pointer
829ea930000SAlex Sierra  * @pasid: pasid to be flush
830ea930000SAlex Sierra  *
831ea930000SAlex Sierra  * Flush the TLB for the requested pasid.
832ea930000SAlex Sierra  */
833ea930000SAlex Sierra static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
834ea930000SAlex Sierra 					uint16_t pasid, uint32_t flush_type,
835ea930000SAlex Sierra 					bool all_hub)
836ea930000SAlex Sierra {
837ea930000SAlex Sierra 	int vmid, i;
838ea930000SAlex Sierra 	signed long r;
839ea930000SAlex Sierra 	uint32_t seq;
840ea930000SAlex Sierra 	uint16_t queried_pasid;
841ea930000SAlex Sierra 	bool ret;
842ea930000SAlex Sierra 	struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
843ea930000SAlex Sierra 	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
844ea930000SAlex Sierra 
84553b3f8f4SDennis Li 	if (amdgpu_in_reset(adev))
846ea930000SAlex Sierra 		return -EIO;
847ea930000SAlex Sierra 
84881202807SDennis Li 	if (ring->sched.ready && down_read_trylock(&adev->reset_sem)) {
849b80cd524SFelix Kuehling 		/* Vega20+XGMI caches PTEs in TC and TLB. Add a
850b80cd524SFelix Kuehling 		 * heavy-weight TLB flush (type 2), which flushes
851b80cd524SFelix Kuehling 		 * both. Due to a race condition with concurrent
852b80cd524SFelix Kuehling 		 * memory accesses using the same TLB cache line, we
853b80cd524SFelix Kuehling 		 * still need a second TLB flush after this.
854b80cd524SFelix Kuehling 		 */
855b80cd524SFelix Kuehling 		bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes &&
856b80cd524SFelix Kuehling 				       adev->asic_type == CHIP_VEGA20);
857b80cd524SFelix Kuehling 		/* 2 dwords flush + 8 dwords fence */
858b80cd524SFelix Kuehling 		unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8;
859b80cd524SFelix Kuehling 
860b80cd524SFelix Kuehling 		if (vega20_xgmi_wa)
861b80cd524SFelix Kuehling 			ndw += kiq->pmf->invalidate_tlbs_size;
862b80cd524SFelix Kuehling 
863ea930000SAlex Sierra 		spin_lock(&adev->gfx.kiq.ring_lock);
86436a1707aSAlex Sierra 		/* 2 dwords flush + 8 dwords fence */
865b80cd524SFelix Kuehling 		amdgpu_ring_alloc(ring, ndw);
866b80cd524SFelix Kuehling 		if (vega20_xgmi_wa)
867b80cd524SFelix Kuehling 			kiq->pmf->kiq_invalidate_tlbs(ring,
868b80cd524SFelix Kuehling 						      pasid, 2, all_hub);
869ea930000SAlex Sierra 		kiq->pmf->kiq_invalidate_tlbs(ring,
870ea930000SAlex Sierra 					pasid, flush_type, all_hub);
87104e4e2e9SYintian Tao 		r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
87204e4e2e9SYintian Tao 		if (r) {
87304e4e2e9SYintian Tao 			amdgpu_ring_undo(ring);
874abb17b1eSColin Ian King 			spin_unlock(&adev->gfx.kiq.ring_lock);
87581202807SDennis Li 			up_read(&adev->reset_sem);
87604e4e2e9SYintian Tao 			return -ETIME;
87704e4e2e9SYintian Tao 		}
87804e4e2e9SYintian Tao 
879ea930000SAlex Sierra 		amdgpu_ring_commit(ring);
880ea930000SAlex Sierra 		spin_unlock(&adev->gfx.kiq.ring_lock);
881ea930000SAlex Sierra 		r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
882ea930000SAlex Sierra 		if (r < 1) {
883aac89168SDennis Li 			dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
88481202807SDennis Li 			up_read(&adev->reset_sem);
885ea930000SAlex Sierra 			return -ETIME;
886ea930000SAlex Sierra 		}
88781202807SDennis Li 		up_read(&adev->reset_sem);
888ea930000SAlex Sierra 		return 0;
889ea930000SAlex Sierra 	}
890ea930000SAlex Sierra 
891ea930000SAlex Sierra 	for (vmid = 1; vmid < 16; vmid++) {
892ea930000SAlex Sierra 
893ea930000SAlex Sierra 		ret = gmc_v9_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
894ea930000SAlex Sierra 				&queried_pasid);
895ea930000SAlex Sierra 		if (ret && queried_pasid == pasid) {
896ea930000SAlex Sierra 			if (all_hub) {
897ea930000SAlex Sierra 				for (i = 0; i < adev->num_vmhubs; i++)
898ea930000SAlex Sierra 					gmc_v9_0_flush_gpu_tlb(adev, vmid,
899fa34edbeSFelix Kuehling 							i, flush_type);
900ea930000SAlex Sierra 			} else {
901ea930000SAlex Sierra 				gmc_v9_0_flush_gpu_tlb(adev, vmid,
902fa34edbeSFelix Kuehling 						AMDGPU_GFXHUB_0, flush_type);
903ea930000SAlex Sierra 			}
904ea930000SAlex Sierra 			break;
905ea930000SAlex Sierra 		}
906ea930000SAlex Sierra 	}
907ea930000SAlex Sierra 
908ea930000SAlex Sierra 	return 0;
909ea930000SAlex Sierra 
910ea930000SAlex Sierra }
911ea930000SAlex Sierra 
9129096d6e5SChristian König static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
913c633c00bSChristian König 					    unsigned vmid, uint64_t pd_addr)
9149096d6e5SChristian König {
91590f6452cSchangzhu 	bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
916250b4228SChristian König 	struct amdgpu_device *adev = ring->adev;
917250b4228SChristian König 	struct amdgpu_vmhub *hub = &adev->vmhub[ring->funcs->vmhub];
9182a79d868SYong Zhao 	uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
9199096d6e5SChristian König 	unsigned eng = ring->vm_inv_eng;
9209096d6e5SChristian König 
921f920d1bbSchangzhu 	/*
922f920d1bbSchangzhu 	 * It may lose gpuvm invalidate acknowldege state across power-gating
923f920d1bbSchangzhu 	 * off cycle, add semaphore acquire before invalidation and semaphore
924f920d1bbSchangzhu 	 * release after invalidation to avoid entering power gated state
925f920d1bbSchangzhu 	 * to WA the Issue
926f920d1bbSchangzhu 	 */
927f920d1bbSchangzhu 
928f920d1bbSchangzhu 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
92990f6452cSchangzhu 	if (use_semaphore)
930f920d1bbSchangzhu 		/* a read return value of 1 means semaphore acuqire */
931f920d1bbSchangzhu 		amdgpu_ring_emit_reg_wait(ring,
932148f597dSHuang Rui 					  hub->vm_inv_eng0_sem +
933148f597dSHuang Rui 					  hub->eng_distance * eng, 0x1, 0x1);
934f920d1bbSchangzhu 
935148f597dSHuang Rui 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 +
936148f597dSHuang Rui 			      (hub->ctx_addr_distance * vmid),
9379096d6e5SChristian König 			      lower_32_bits(pd_addr));
9389096d6e5SChristian König 
939148f597dSHuang Rui 	amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 +
940148f597dSHuang Rui 			      (hub->ctx_addr_distance * vmid),
9419096d6e5SChristian König 			      upper_32_bits(pd_addr));
9429096d6e5SChristian König 
943148f597dSHuang Rui 	amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req +
944148f597dSHuang Rui 					    hub->eng_distance * eng,
945148f597dSHuang Rui 					    hub->vm_inv_eng0_ack +
946148f597dSHuang Rui 					    hub->eng_distance * eng,
947f8bc9037SAlex Deucher 					    req, 1 << vmid);
948f732b6b3SChristian König 
949f920d1bbSchangzhu 	/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
95090f6452cSchangzhu 	if (use_semaphore)
951f920d1bbSchangzhu 		/*
952f920d1bbSchangzhu 		 * add semaphore release after invalidation,
953f920d1bbSchangzhu 		 * write with 0 means semaphore release
954f920d1bbSchangzhu 		 */
955148f597dSHuang Rui 		amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem +
956148f597dSHuang Rui 				      hub->eng_distance * eng, 0);
957f920d1bbSchangzhu 
9589096d6e5SChristian König 	return pd_addr;
9599096d6e5SChristian König }
9609096d6e5SChristian König 
961c633c00bSChristian König static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
962c633c00bSChristian König 					unsigned pasid)
963c633c00bSChristian König {
964c633c00bSChristian König 	struct amdgpu_device *adev = ring->adev;
965c633c00bSChristian König 	uint32_t reg;
966c633c00bSChristian König 
967f2d66571SLe Ma 	/* Do nothing because there's no lut register for mmhub1. */
968f2d66571SLe Ma 	if (ring->funcs->vmhub == AMDGPU_MMHUB_1)
969f2d66571SLe Ma 		return;
970f2d66571SLe Ma 
971a2d15ed7SLe Ma 	if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
972c633c00bSChristian König 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
973c633c00bSChristian König 	else
974c633c00bSChristian König 		reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
975c633c00bSChristian König 
976c633c00bSChristian König 	amdgpu_ring_emit_wreg(ring, reg, pasid);
977c633c00bSChristian König }
978c633c00bSChristian König 
979e60f8db5SAlex Xie /*
980e60f8db5SAlex Xie  * PTE format on VEGA 10:
981e60f8db5SAlex Xie  * 63:59 reserved
982e60f8db5SAlex Xie  * 58:57 mtype
983e60f8db5SAlex Xie  * 56 F
984e60f8db5SAlex Xie  * 55 L
985e60f8db5SAlex Xie  * 54 P
986e60f8db5SAlex Xie  * 53 SW
987e60f8db5SAlex Xie  * 52 T
988e60f8db5SAlex Xie  * 50:48 reserved
989e60f8db5SAlex Xie  * 47:12 4k physical page base address
990e60f8db5SAlex Xie  * 11:7 fragment
991e60f8db5SAlex Xie  * 6 write
992e60f8db5SAlex Xie  * 5 read
993e60f8db5SAlex Xie  * 4 exe
994e60f8db5SAlex Xie  * 3 Z
995e60f8db5SAlex Xie  * 2 snooped
996e60f8db5SAlex Xie  * 1 system
997e60f8db5SAlex Xie  * 0 valid
998e60f8db5SAlex Xie  *
999e60f8db5SAlex Xie  * PDE format on VEGA 10:
1000e60f8db5SAlex Xie  * 63:59 block fragment size
1001e60f8db5SAlex Xie  * 58:55 reserved
1002e60f8db5SAlex Xie  * 54 P
1003e60f8db5SAlex Xie  * 53:48 reserved
1004e60f8db5SAlex Xie  * 47:6 physical base address of PD or PTE
1005e60f8db5SAlex Xie  * 5:3 reserved
1006e60f8db5SAlex Xie  * 2 C
1007e60f8db5SAlex Xie  * 1 system
1008e60f8db5SAlex Xie  * 0 valid
1009e60f8db5SAlex Xie  */
1010e60f8db5SAlex Xie 
101171776b6dSChristian König static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
1012e60f8db5SAlex Xie 
1013e60f8db5SAlex Xie {
101471776b6dSChristian König 	switch (flags) {
1015e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_DEFAULT:
101671776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1017e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_NC:
101871776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1019e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_WC:
102071776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC);
1021093e48c0SOak Zeng 	case AMDGPU_VM_MTYPE_RW:
102271776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW);
1023e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_CC:
102471776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
1025e60f8db5SAlex Xie 	case AMDGPU_VM_MTYPE_UC:
102671776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC);
1027e60f8db5SAlex Xie 	default:
102871776b6dSChristian König 		return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC);
1029e60f8db5SAlex Xie 	}
1030e60f8db5SAlex Xie }
1031e60f8db5SAlex Xie 
10323de676d8SChristian König static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level,
10333de676d8SChristian König 				uint64_t *addr, uint64_t *flags)
1034f75e237cSChristian König {
1035bbc9fb10SChristian König 	if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
10363de676d8SChristian König 		*addr = adev->vm_manager.vram_base_offset + *addr -
1037770d13b1SChristian König 			adev->gmc.vram_start;
10383de676d8SChristian König 	BUG_ON(*addr & 0xFFFF00000000003FULL);
10396a42fd6fSChristian König 
1040770d13b1SChristian König 	if (!adev->gmc.translate_further)
10416a42fd6fSChristian König 		return;
10426a42fd6fSChristian König 
10436a42fd6fSChristian König 	if (level == AMDGPU_VM_PDB1) {
10446a42fd6fSChristian König 		/* Set the block fragment size */
10456a42fd6fSChristian König 		if (!(*flags & AMDGPU_PDE_PTE))
10466a42fd6fSChristian König 			*flags |= AMDGPU_PDE_BFS(0x9);
10476a42fd6fSChristian König 
10486a42fd6fSChristian König 	} else if (level == AMDGPU_VM_PDB0) {
10496a42fd6fSChristian König 		if (*flags & AMDGPU_PDE_PTE)
10506a42fd6fSChristian König 			*flags &= ~AMDGPU_PDE_PTE;
10516a42fd6fSChristian König 		else
10526a42fd6fSChristian König 			*flags |= AMDGPU_PTE_TF;
10536a42fd6fSChristian König 	}
1054f75e237cSChristian König }
1055f75e237cSChristian König 
1056cbfae36cSChristian König static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
1057cbfae36cSChristian König 				struct amdgpu_bo_va_mapping *mapping,
1058cbfae36cSChristian König 				uint64_t *flags)
1059cbfae36cSChristian König {
1060cbfae36cSChristian König 	*flags &= ~AMDGPU_PTE_EXECUTABLE;
1061cbfae36cSChristian König 	*flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
1062cbfae36cSChristian König 
1063cbfae36cSChristian König 	*flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
1064cbfae36cSChristian König 	*flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK;
1065cbfae36cSChristian König 
1066cbfae36cSChristian König 	if (mapping->flags & AMDGPU_PTE_PRT) {
1067cbfae36cSChristian König 		*flags |= AMDGPU_PTE_PRT;
1068cbfae36cSChristian König 		*flags &= ~AMDGPU_PTE_VALID;
1069cbfae36cSChristian König 	}
1070cbfae36cSChristian König 
1071cbfae36cSChristian König 	if (adev->asic_type == CHIP_ARCTURUS &&
1072cbfae36cSChristian König 	    !(*flags & AMDGPU_PTE_SYSTEM) &&
1073cbfae36cSChristian König 	    mapping->bo_va->is_xgmi)
1074cbfae36cSChristian König 		*flags |= AMDGPU_PTE_SNOOPED;
1075cbfae36cSChristian König }
1076cbfae36cSChristian König 
10777b885f0eSAlex Deucher static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
10787b885f0eSAlex Deucher {
10797b885f0eSAlex Deucher 	u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
10807b885f0eSAlex Deucher 	unsigned size;
10817b885f0eSAlex Deucher 
10827b885f0eSAlex Deucher 	if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
10837b885f0eSAlex Deucher 		size = AMDGPU_VBIOS_VGA_ALLOCATION;
10847b885f0eSAlex Deucher 	} else {
10857b885f0eSAlex Deucher 		u32 viewport;
10867b885f0eSAlex Deucher 
10877b885f0eSAlex Deucher 		switch (adev->asic_type) {
10887b885f0eSAlex Deucher 		case CHIP_RAVEN:
10897b885f0eSAlex Deucher 		case CHIP_RENOIR:
10907b885f0eSAlex Deucher 			viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
10917b885f0eSAlex Deucher 			size = (REG_GET_FIELD(viewport,
10927b885f0eSAlex Deucher 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
10937b885f0eSAlex Deucher 				REG_GET_FIELD(viewport,
10947b885f0eSAlex Deucher 					      HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
10957b885f0eSAlex Deucher 				4);
10967b885f0eSAlex Deucher 			break;
10977b885f0eSAlex Deucher 		case CHIP_VEGA10:
10987b885f0eSAlex Deucher 		case CHIP_VEGA12:
10997b885f0eSAlex Deucher 		case CHIP_VEGA20:
11007b885f0eSAlex Deucher 		default:
11017b885f0eSAlex Deucher 			viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
11027b885f0eSAlex Deucher 			size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
11037b885f0eSAlex Deucher 				REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) *
11047b885f0eSAlex Deucher 				4);
11057b885f0eSAlex Deucher 			break;
11067b885f0eSAlex Deucher 		}
11077b885f0eSAlex Deucher 	}
11087b885f0eSAlex Deucher 
11097b885f0eSAlex Deucher 	return size;
11107b885f0eSAlex Deucher }
11117b885f0eSAlex Deucher 
1112132f34e4SChristian König static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
1113132f34e4SChristian König 	.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
1114ea930000SAlex Sierra 	.flush_gpu_tlb_pasid = gmc_v9_0_flush_gpu_tlb_pasid,
11159096d6e5SChristian König 	.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
1116c633c00bSChristian König 	.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
111771776b6dSChristian König 	.map_mtype = gmc_v9_0_map_mtype,
1118cbfae36cSChristian König 	.get_vm_pde = gmc_v9_0_get_vm_pde,
11197b885f0eSAlex Deucher 	.get_vm_pte = gmc_v9_0_get_vm_pte,
11207b885f0eSAlex Deucher 	.get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size,
1121e60f8db5SAlex Xie };
1122e60f8db5SAlex Xie 
1123132f34e4SChristian König static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
1124e60f8db5SAlex Xie {
1125132f34e4SChristian König 	adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs;
1126e60f8db5SAlex Xie }
1127e60f8db5SAlex Xie 
11285b6b35aaSHawking Zhang static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
11295b6b35aaSHawking Zhang {
11305b6b35aaSHawking Zhang 	switch (adev->asic_type) {
1131e7da754bSMonk Liu 	case CHIP_VEGA10:
1132e7da754bSMonk Liu 		adev->umc.funcs = &umc_v6_0_funcs;
1133e7da754bSMonk Liu 		break;
11345b6b35aaSHawking Zhang 	case CHIP_VEGA20:
11353aacf4eaSTao Zhou 		adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
11363aacf4eaSTao Zhou 		adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
11373aacf4eaSTao Zhou 		adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
11384cf781c2SJohn Clements 		adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20;
11394cf781c2SJohn Clements 		adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
11404cf781c2SJohn Clements 		adev->umc.funcs = &umc_v6_1_funcs;
11414cf781c2SJohn Clements 		break;
11429e612c11SHawking Zhang 	case CHIP_ARCTURUS:
11433aacf4eaSTao Zhou 		adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
11443aacf4eaSTao Zhou 		adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
11453aacf4eaSTao Zhou 		adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
11464cf781c2SJohn Clements 		adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT;
11473aacf4eaSTao Zhou 		adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
1148045c0216STao Zhou 		adev->umc.funcs = &umc_v6_1_funcs;
11495b6b35aaSHawking Zhang 		break;
11505b6b35aaSHawking Zhang 	default:
11515b6b35aaSHawking Zhang 		break;
11525b6b35aaSHawking Zhang 	}
11535b6b35aaSHawking Zhang }
11545b6b35aaSHawking Zhang 
11553d093da0STao Zhou static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
11563d093da0STao Zhou {
11573d093da0STao Zhou 	switch (adev->asic_type) {
1158f6c3623bSDennis Li 	case CHIP_ARCTURUS:
1159f6c3623bSDennis Li 		adev->mmhub.funcs = &mmhub_v9_4_funcs;
1160f6c3623bSDennis Li 		break;
11613d093da0STao Zhou 	default:
11629fb1506eSOak Zeng 		adev->mmhub.funcs = &mmhub_v1_0_funcs;
11633d093da0STao Zhou 		break;
11643d093da0STao Zhou 	}
11653d093da0STao Zhou }
11663d093da0STao Zhou 
11678ffff9b4SOak Zeng static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
11688ffff9b4SOak Zeng {
11698ffff9b4SOak Zeng 	switch (adev->asic_type) {
11708ffff9b4SOak Zeng 	case CHIP_ARCTURUS:
11718ffff9b4SOak Zeng 	case CHIP_VEGA20:
11728ffff9b4SOak Zeng 		adev->gfxhub.funcs = &gfxhub_v1_1_funcs;
11738ffff9b4SOak Zeng 		break;
11748ffff9b4SOak Zeng 	default:
11758ffff9b4SOak Zeng 		adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
11768ffff9b4SOak Zeng 		break;
11778ffff9b4SOak Zeng 	}
11788ffff9b4SOak Zeng }
11798ffff9b4SOak Zeng 
1180e60f8db5SAlex Xie static int gmc_v9_0_early_init(void *handle)
1181e60f8db5SAlex Xie {
1182e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1183e60f8db5SAlex Xie 
1184132f34e4SChristian König 	gmc_v9_0_set_gmc_funcs(adev);
1185e60f8db5SAlex Xie 	gmc_v9_0_set_irq_funcs(adev);
11865b6b35aaSHawking Zhang 	gmc_v9_0_set_umc_funcs(adev);
11873d093da0STao Zhou 	gmc_v9_0_set_mmhub_funcs(adev);
11888ffff9b4SOak Zeng 	gmc_v9_0_set_gfxhub_funcs(adev);
1189e60f8db5SAlex Xie 
1190770d13b1SChristian König 	adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
1191770d13b1SChristian König 	adev->gmc.shared_aperture_end =
1192770d13b1SChristian König 		adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
1193bfa8eea2SFlora Cui 	adev->gmc.private_aperture_start = 0x1000000000000000ULL;
1194770d13b1SChristian König 	adev->gmc.private_aperture_end =
1195770d13b1SChristian König 		adev->gmc.private_aperture_start + (4ULL << 30) - 1;
1196a7ea6548SAlex Deucher 
1197e60f8db5SAlex Xie 	return 0;
1198e60f8db5SAlex Xie }
1199e60f8db5SAlex Xie 
1200e60f8db5SAlex Xie static int gmc_v9_0_late_init(void *handle)
1201e60f8db5SAlex Xie {
1202e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1203c5b6e585STao Zhou 	int r;
12044789c463SChristian König 
1205cd2b5623SAlex Deucher 	amdgpu_bo_late_init(adev);
12066f752ec2SAndrey Grodzovsky 
1207bdbe90f0SAlex Deucher 	r = amdgpu_gmc_allocate_vm_inv_eng(adev);
1208c713a461SEvan Quan 	if (r)
1209c713a461SEvan Quan 		return r;
12104a20300bSGuchun Chen 
12114a20300bSGuchun Chen 	/*
12124a20300bSGuchun Chen 	 * Workaround performance drop issue with VBIOS enables partial
12134a20300bSGuchun Chen 	 * writes, while disables HBM ECC for vega10.
12144a20300bSGuchun Chen 	 */
121588474ccaSGuchun Chen 	if (!amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_VEGA10)) {
12164a20300bSGuchun Chen 		if (!(adev->ras_features & (1 << AMDGPU_RAS_BLOCK__UMC))) {
1217bdf84a80SJoseph Greathouse 			if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
1218bdf84a80SJoseph Greathouse 				adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
12194a20300bSGuchun Chen 		}
1220f49ea9f8SHawking Zhang 	}
122102bab923SDavid Panariti 
1222fe5211f1SHawking Zhang 	if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
1223fe5211f1SHawking Zhang 		adev->mmhub.funcs->reset_ras_error_count(adev);
1224fe5211f1SHawking Zhang 
1225ba083492STao Zhou 	r = amdgpu_gmc_ras_late_init(adev);
1226791c4769Sxinhui pan 	if (r)
1227e60f8db5SAlex Xie 		return r;
1228e60f8db5SAlex Xie 
1229770d13b1SChristian König 	return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
1230e60f8db5SAlex Xie }
1231e60f8db5SAlex Xie 
1232e60f8db5SAlex Xie static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
1233770d13b1SChristian König 					struct amdgpu_gmc *mc)
1234e60f8db5SAlex Xie {
1235e60f8db5SAlex Xie 	u64 base = 0;
12369d4f837aSFrank.Min 
12379fb1506eSOak Zeng 	if (!amdgpu_sriov_vf(adev))
12389fb1506eSOak Zeng 		base = adev->mmhub.funcs->get_fb_location(adev);
12399d4f837aSFrank.Min 
12406fdd68b1SAlex Deucher 	/* add the xgmi offset of the physical node */
12416fdd68b1SAlex Deucher 	base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
124283afe835SOak Zeng 	amdgpu_gmc_vram_location(adev, mc, base);
1243961c75cfSChristian König 	amdgpu_gmc_gart_location(adev, mc);
1244c3e1b43cSChristian König 	amdgpu_gmc_agp_location(adev, mc);
1245e60f8db5SAlex Xie 	/* base offset of vram pages */
12468ffff9b4SOak Zeng 	adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
12476fdd68b1SAlex Deucher 
12486fdd68b1SAlex Deucher 	/* XXX: add the xgmi offset of the physical node? */
12496fdd68b1SAlex Deucher 	adev->vm_manager.vram_base_offset +=
12506fdd68b1SAlex Deucher 		adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
1251e60f8db5SAlex Xie }
1252e60f8db5SAlex Xie 
1253e60f8db5SAlex Xie /**
1254e60f8db5SAlex Xie  * gmc_v9_0_mc_init - initialize the memory controller driver params
1255e60f8db5SAlex Xie  *
1256e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
1257e60f8db5SAlex Xie  *
1258e60f8db5SAlex Xie  * Look up the amount of vram, vram width, and decide how to place
1259e60f8db5SAlex Xie  * vram and gart within the GPU's physical address space.
1260e60f8db5SAlex Xie  * Returns 0 for success.
1261e60f8db5SAlex Xie  */
1262e60f8db5SAlex Xie static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
1263e60f8db5SAlex Xie {
1264e60f8db5SAlex Xie 	int r;
1265e60f8db5SAlex Xie 
1266e60f8db5SAlex Xie 	/* size in MB on si */
1267770d13b1SChristian König 	adev->gmc.mc_vram_size =
1268bebc0762SHawking Zhang 		adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
1269770d13b1SChristian König 	adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
1270e60f8db5SAlex Xie 
1271e60f8db5SAlex Xie 	if (!(adev->flags & AMD_IS_APU)) {
1272e60f8db5SAlex Xie 		r = amdgpu_device_resize_fb_bar(adev);
1273e60f8db5SAlex Xie 		if (r)
1274e60f8db5SAlex Xie 			return r;
1275e60f8db5SAlex Xie 	}
1276770d13b1SChristian König 	adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
1277770d13b1SChristian König 	adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
1278e60f8db5SAlex Xie 
1279156a81beSChunming Zhou #ifdef CONFIG_X86_64
1280156a81beSChunming Zhou 	if (adev->flags & AMD_IS_APU) {
12818ffff9b4SOak Zeng 		adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
1282156a81beSChunming Zhou 		adev->gmc.aper_size = adev->gmc.real_vram_size;
1283156a81beSChunming Zhou 	}
1284156a81beSChunming Zhou #endif
1285e60f8db5SAlex Xie 	/* In case the PCI BAR is larger than the actual amount of vram */
1286770d13b1SChristian König 	adev->gmc.visible_vram_size = adev->gmc.aper_size;
1287770d13b1SChristian König 	if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
1288770d13b1SChristian König 		adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
1289e60f8db5SAlex Xie 
1290e60f8db5SAlex Xie 	/* set the gart size */
1291e60f8db5SAlex Xie 	if (amdgpu_gart_size == -1) {
1292e60f8db5SAlex Xie 		switch (adev->asic_type) {
1293e60f8db5SAlex Xie 		case CHIP_VEGA10:  /* all engines support GPUVM */
1294273a14cdSAlex Deucher 		case CHIP_VEGA12:  /* all engines support GPUVM */
1295d96b428cSFeifei Xu 		case CHIP_VEGA20:
12963de2ff5dSLe Ma 		case CHIP_ARCTURUS:
1297e60f8db5SAlex Xie 		default:
1298fe19b862SMonk Liu 			adev->gmc.gart_size = 512ULL << 20;
1299e60f8db5SAlex Xie 			break;
1300e60f8db5SAlex Xie 		case CHIP_RAVEN:   /* DCE SG support */
13018787ee01SHuang Rui 		case CHIP_RENOIR:
1302770d13b1SChristian König 			adev->gmc.gart_size = 1024ULL << 20;
1303e60f8db5SAlex Xie 			break;
1304e60f8db5SAlex Xie 		}
1305e60f8db5SAlex Xie 	} else {
1306770d13b1SChristian König 		adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
1307e60f8db5SAlex Xie 	}
1308e60f8db5SAlex Xie 
1309770d13b1SChristian König 	gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
1310e60f8db5SAlex Xie 
1311e60f8db5SAlex Xie 	return 0;
1312e60f8db5SAlex Xie }
1313e60f8db5SAlex Xie 
1314e60f8db5SAlex Xie static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
1315e60f8db5SAlex Xie {
1316e60f8db5SAlex Xie 	int r;
1317e60f8db5SAlex Xie 
13181123b989SChristian König 	if (adev->gart.bo) {
1319e60f8db5SAlex Xie 		WARN(1, "VEGA10 PCIE GART already initialized\n");
1320e60f8db5SAlex Xie 		return 0;
1321e60f8db5SAlex Xie 	}
1322e60f8db5SAlex Xie 	/* Initialize common gart structure */
1323e60f8db5SAlex Xie 	r = amdgpu_gart_init(adev);
1324e60f8db5SAlex Xie 	if (r)
1325e60f8db5SAlex Xie 		return r;
1326e60f8db5SAlex Xie 	adev->gart.table_size = adev->gart.num_gpu_pages * 8;
13277596ab68SHawking Zhang 	adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
1328e60f8db5SAlex Xie 				 AMDGPU_PTE_EXECUTABLE;
1329e60f8db5SAlex Xie 	return amdgpu_gart_table_vram_alloc(adev);
1330e60f8db5SAlex Xie }
1331e60f8db5SAlex Xie 
1332b0a2db9bSAlex Deucher /**
1333b0a2db9bSAlex Deucher  * gmc_v9_0_save_registers - saves regs
1334b0a2db9bSAlex Deucher  *
1335b0a2db9bSAlex Deucher  * @adev: amdgpu_device pointer
1336b0a2db9bSAlex Deucher  *
1337b0a2db9bSAlex Deucher  * This saves potential register values that should be
1338b0a2db9bSAlex Deucher  * restored upon resume
1339b0a2db9bSAlex Deucher  */
1340b0a2db9bSAlex Deucher static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
1341ebdef28eSAlex Deucher {
1342b0a2db9bSAlex Deucher 	if (adev->asic_type == CHIP_RAVEN)
1343b0a2db9bSAlex Deucher 		adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
1344ebdef28eSAlex Deucher }
1345ebdef28eSAlex Deucher 
1346e60f8db5SAlex Xie static int gmc_v9_0_sw_init(void *handle)
1347e60f8db5SAlex Xie {
1348ad02e08eSOri Messinger 	int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
1349e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1350e60f8db5SAlex Xie 
13518ffff9b4SOak Zeng 	adev->gfxhub.funcs->init(adev);
13529fb1506eSOak Zeng 
13539fb1506eSOak Zeng 	adev->mmhub.funcs->init(adev);
1354e60f8db5SAlex Xie 
1355770d13b1SChristian König 	spin_lock_init(&adev->gmc.invalidate_lock);
1356e60f8db5SAlex Xie 
1357ad02e08eSOri Messinger 	r = amdgpu_atomfirmware_get_vram_info(adev,
1358ad02e08eSOri Messinger 		&vram_width, &vram_type, &vram_vendor);
1359631cdbd2SAlex Deucher 	if (amdgpu_sriov_vf(adev))
1360631cdbd2SAlex Deucher 		/* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN,
1361631cdbd2SAlex Deucher 		 * and DF related registers is not readable, seems hardcord is the
1362631cdbd2SAlex Deucher 		 * only way to set the correct vram_width
1363631cdbd2SAlex Deucher 		 */
1364631cdbd2SAlex Deucher 		adev->gmc.vram_width = 2048;
1365631cdbd2SAlex Deucher 	else if (amdgpu_emu_mode != 1)
1366631cdbd2SAlex Deucher 		adev->gmc.vram_width = vram_width;
1367631cdbd2SAlex Deucher 
1368631cdbd2SAlex Deucher 	if (!adev->gmc.vram_width) {
1369631cdbd2SAlex Deucher 		int chansize, numchan;
1370631cdbd2SAlex Deucher 
1371631cdbd2SAlex Deucher 		/* hbm memory channel size */
1372631cdbd2SAlex Deucher 		if (adev->flags & AMD_IS_APU)
1373631cdbd2SAlex Deucher 			chansize = 64;
1374631cdbd2SAlex Deucher 		else
1375631cdbd2SAlex Deucher 			chansize = 128;
1376631cdbd2SAlex Deucher 
1377bdf84a80SJoseph Greathouse 		numchan = adev->df.funcs->get_hbm_channel_number(adev);
1378631cdbd2SAlex Deucher 		adev->gmc.vram_width = numchan * chansize;
1379631cdbd2SAlex Deucher 	}
1380631cdbd2SAlex Deucher 
1381631cdbd2SAlex Deucher 	adev->gmc.vram_type = vram_type;
1382ad02e08eSOri Messinger 	adev->gmc.vram_vendor = vram_vendor;
1383e60f8db5SAlex Xie 	switch (adev->asic_type) {
1384e60f8db5SAlex Xie 	case CHIP_RAVEN:
13851daa2bfaSLe Ma 		adev->num_vmhubs = 2;
13861daa2bfaSLe Ma 
13876a42fd6fSChristian König 		if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
1388f3368128SChristian König 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
13896a42fd6fSChristian König 		} else {
13906a42fd6fSChristian König 			/* vm_size is 128TB + 512GB for legacy 3-level page support */
13916a42fd6fSChristian König 			amdgpu_vm_adjust_size(adev, 128 * 1024 + 512, 9, 2, 48);
1392770d13b1SChristian König 			adev->gmc.translate_further =
13936a42fd6fSChristian König 				adev->vm_manager.num_level > 1;
13946a42fd6fSChristian König 		}
1395e60f8db5SAlex Xie 		break;
1396e60f8db5SAlex Xie 	case CHIP_VEGA10:
1397273a14cdSAlex Deucher 	case CHIP_VEGA12:
1398d96b428cSFeifei Xu 	case CHIP_VEGA20:
13998787ee01SHuang Rui 	case CHIP_RENOIR:
14001daa2bfaSLe Ma 		adev->num_vmhubs = 2;
14011daa2bfaSLe Ma 
14028787ee01SHuang Rui 
1403e60f8db5SAlex Xie 		/*
1404e60f8db5SAlex Xie 		 * To fulfill 4-level page support,
1405e60f8db5SAlex Xie 		 * vm size is 256TB (48bit), maximum size of Vega10,
1406e60f8db5SAlex Xie 		 * block size 512 (9bit)
1407e60f8db5SAlex Xie 		 */
1408cdba61daSwentalou 		/* sriov restrict max_pfn below AMDGPU_GMC_HOLE */
1409cdba61daSwentalou 		if (amdgpu_sriov_vf(adev))
1410cdba61daSwentalou 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
1411cdba61daSwentalou 		else
1412f3368128SChristian König 			amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
1413e60f8db5SAlex Xie 		break;
14143de2ff5dSLe Ma 	case CHIP_ARCTURUS:
1415c8a6e2a3SLe Ma 		adev->num_vmhubs = 3;
1416c8a6e2a3SLe Ma 
14173de2ff5dSLe Ma 		/* Keep the vm size same with Vega20 */
14183de2ff5dSLe Ma 		amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
14193de2ff5dSLe Ma 		break;
1420e60f8db5SAlex Xie 	default:
1421e60f8db5SAlex Xie 		break;
1422e60f8db5SAlex Xie 	}
1423e60f8db5SAlex Xie 
1424e60f8db5SAlex Xie 	/* This interrupt is VMC page fault.*/
142544a99b65SAndrey Grodzovsky 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC, VMC_1_0__SRCID__VM_FAULT,
1426770d13b1SChristian König 				&adev->gmc.vm_fault);
142730da7bb1SChristian König 	if (r)
142830da7bb1SChristian König 		return r;
142930da7bb1SChristian König 
14307d19b15fSLe Ma 	if (adev->asic_type == CHIP_ARCTURUS) {
14317d19b15fSLe Ma 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
14327d19b15fSLe Ma 					&adev->gmc.vm_fault);
14337d19b15fSLe Ma 		if (r)
14347d19b15fSLe Ma 			return r;
14357d19b15fSLe Ma 	}
14367d19b15fSLe Ma 
143744a99b65SAndrey Grodzovsky 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2, UTCL2_1_0__SRCID__FAULT,
1438770d13b1SChristian König 				&adev->gmc.vm_fault);
1439e60f8db5SAlex Xie 
1440e60f8db5SAlex Xie 	if (r)
1441e60f8db5SAlex Xie 		return r;
1442e60f8db5SAlex Xie 
14432ee9403eSZhigang Luo 	if (!amdgpu_sriov_vf(adev)) {
1444791c4769Sxinhui pan 		/* interrupt sent to DF. */
1445791c4769Sxinhui pan 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
1446791c4769Sxinhui pan 				      &adev->gmc.ecc_irq);
1447791c4769Sxinhui pan 		if (r)
1448791c4769Sxinhui pan 			return r;
14492ee9403eSZhigang Luo 	}
1450791c4769Sxinhui pan 
1451e60f8db5SAlex Xie 	/* Set the internal MC address mask
1452e60f8db5SAlex Xie 	 * This is the max address of the GPU's
1453e60f8db5SAlex Xie 	 * internal address space.
1454e60f8db5SAlex Xie 	 */
1455770d13b1SChristian König 	adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
1456e60f8db5SAlex Xie 
1457244511f3SChristoph Hellwig 	r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
1458e60f8db5SAlex Xie 	if (r) {
1459e60f8db5SAlex Xie 		printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
1460244511f3SChristoph Hellwig 		return r;
1461e60f8db5SAlex Xie 	}
1462244511f3SChristoph Hellwig 	adev->need_swiotlb = drm_need_swiotlb(44);
1463e60f8db5SAlex Xie 
146447622ba0SAlex Deucher 	if (adev->gmc.xgmi.supported) {
14658ffff9b4SOak Zeng 		r = adev->gfxhub.funcs->get_xgmi_info(adev);
1466bf0a60b7SAlex Deucher 		if (r)
1467bf0a60b7SAlex Deucher 			return r;
1468bf0a60b7SAlex Deucher 	}
1469bf0a60b7SAlex Deucher 
1470e60f8db5SAlex Xie 	r = gmc_v9_0_mc_init(adev);
1471e60f8db5SAlex Xie 	if (r)
1472e60f8db5SAlex Xie 		return r;
1473e60f8db5SAlex Xie 
14747b885f0eSAlex Deucher 	amdgpu_gmc_get_vbios_allocations(adev);
1475ebdef28eSAlex Deucher 
1476e60f8db5SAlex Xie 	/* Memory manager */
1477e60f8db5SAlex Xie 	r = amdgpu_bo_init(adev);
1478e60f8db5SAlex Xie 	if (r)
1479e60f8db5SAlex Xie 		return r;
1480e60f8db5SAlex Xie 
1481e60f8db5SAlex Xie 	r = gmc_v9_0_gart_init(adev);
1482e60f8db5SAlex Xie 	if (r)
1483e60f8db5SAlex Xie 		return r;
1484e60f8db5SAlex Xie 
148505ec3edaSChristian König 	/*
148605ec3edaSChristian König 	 * number of VMs
148705ec3edaSChristian König 	 * VMID 0 is reserved for System
148881659b20SFelix Kuehling 	 * amdgpu graphics/compute will use VMIDs 1..n-1
148981659b20SFelix Kuehling 	 * amdkfd will use VMIDs n..15
149081659b20SFelix Kuehling 	 *
149181659b20SFelix Kuehling 	 * The first KFD VMID is 8 for GPUs with graphics, 3 for
149281659b20SFelix Kuehling 	 * compute-only GPUs. On compute-only GPUs that leaves 2 VMIDs
149381659b20SFelix Kuehling 	 * for video processing.
149405ec3edaSChristian König 	 */
149581659b20SFelix Kuehling 	adev->vm_manager.first_kfd_vmid =
149681659b20SFelix Kuehling 		adev->asic_type == CHIP_ARCTURUS ? 3 : 8;
149705ec3edaSChristian König 
149805ec3edaSChristian König 	amdgpu_vm_manager_init(adev);
149905ec3edaSChristian König 
1500b0a2db9bSAlex Deucher 	gmc_v9_0_save_registers(adev);
1501b0a2db9bSAlex Deucher 
150205ec3edaSChristian König 	return 0;
1503e60f8db5SAlex Xie }
1504e60f8db5SAlex Xie 
1505e60f8db5SAlex Xie static int gmc_v9_0_sw_fini(void *handle)
1506e60f8db5SAlex Xie {
1507e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1508e60f8db5SAlex Xie 
15092adf1344STao Zhou 	amdgpu_gmc_ras_fini(adev);
1510f59548c8SMonk Liu 	amdgpu_gem_force_release(adev);
1511e60f8db5SAlex Xie 	amdgpu_vm_manager_fini(adev);
1512a3d9103eSAndrey Grodzovsky 	amdgpu_gart_table_vram_free(adev);
1513e60f8db5SAlex Xie 	amdgpu_bo_fini(adev);
1514a3d9103eSAndrey Grodzovsky 	amdgpu_gart_fini(adev);
1515e60f8db5SAlex Xie 
1516e60f8db5SAlex Xie 	return 0;
1517e60f8db5SAlex Xie }
1518e60f8db5SAlex Xie 
1519e60f8db5SAlex Xie static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
1520e60f8db5SAlex Xie {
1521946a4d5bSShaoyun Liu 
1522e60f8db5SAlex Xie 	switch (adev->asic_type) {
1523e60f8db5SAlex Xie 	case CHIP_VEGA10:
15244cd4c5c0SMonk Liu 		if (amdgpu_sriov_vf(adev))
152598cad2deSTrigger Huang 			break;
1526df561f66SGustavo A. R. Silva 		fallthrough;
1527d96b428cSFeifei Xu 	case CHIP_VEGA20:
1528946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
15295c583018SEvan Quan 						golden_settings_mmhub_1_0_0,
1530c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_mmhub_1_0_0));
1531946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
15325c583018SEvan Quan 						golden_settings_athub_1_0_0,
1533c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1534e60f8db5SAlex Xie 		break;
1535273a14cdSAlex Deucher 	case CHIP_VEGA12:
1536273a14cdSAlex Deucher 		break;
1537e4f3abaaSChunming Zhou 	case CHIP_RAVEN:
15388787ee01SHuang Rui 		/* TODO for renoir */
1539946a4d5bSShaoyun Liu 		soc15_program_register_sequence(adev,
15405c583018SEvan Quan 						golden_settings_athub_1_0_0,
1541c47b41a7SChristian König 						ARRAY_SIZE(golden_settings_athub_1_0_0));
1542e4f3abaaSChunming Zhou 		break;
1543e60f8db5SAlex Xie 	default:
1544e60f8db5SAlex Xie 		break;
1545e60f8db5SAlex Xie 	}
1546e60f8db5SAlex Xie }
1547e60f8db5SAlex Xie 
1548e60f8db5SAlex Xie /**
1549c2ecd79bSShirish S  * gmc_v9_0_restore_registers - restores regs
1550c2ecd79bSShirish S  *
1551c2ecd79bSShirish S  * @adev: amdgpu_device pointer
1552c2ecd79bSShirish S  *
1553c2ecd79bSShirish S  * This restores register values, saved at suspend.
1554c2ecd79bSShirish S  */
1555b0a2db9bSAlex Deucher void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
1556c2ecd79bSShirish S {
15570eaa8012SShirish S 	if (adev->asic_type == CHIP_RAVEN) {
1558f8646661SAlex Deucher 		WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
15590eaa8012SShirish S 		WARN_ON(adev->gmc.sdpif_register !=
15600eaa8012SShirish S 			RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
15610eaa8012SShirish S 	}
1562c2ecd79bSShirish S }
1563c2ecd79bSShirish S 
1564c2ecd79bSShirish S /**
1565e60f8db5SAlex Xie  * gmc_v9_0_gart_enable - gart enable
1566e60f8db5SAlex Xie  *
1567e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
1568e60f8db5SAlex Xie  */
1569e60f8db5SAlex Xie static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
1570e60f8db5SAlex Xie {
1571cb1545f7SOak Zeng 	int r;
1572e60f8db5SAlex Xie 
15731123b989SChristian König 	if (adev->gart.bo == NULL) {
1574e60f8db5SAlex Xie 		dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
1575e60f8db5SAlex Xie 		return -EINVAL;
1576e60f8db5SAlex Xie 	}
1577ce1b1b66SMonk Liu 	r = amdgpu_gart_table_vram_pin(adev);
1578ce1b1b66SMonk Liu 	if (r)
1579ce1b1b66SMonk Liu 		return r;
1580e60f8db5SAlex Xie 
15818ffff9b4SOak Zeng 	r = adev->gfxhub.funcs->gart_enable(adev);
1582e60f8db5SAlex Xie 	if (r)
1583e60f8db5SAlex Xie 		return r;
1584e60f8db5SAlex Xie 
15859fb1506eSOak Zeng 	r = adev->mmhub.funcs->gart_enable(adev);
1586e60f8db5SAlex Xie 	if (r)
1587e60f8db5SAlex Xie 		return r;
1588e60f8db5SAlex Xie 
1589cb1545f7SOak Zeng 	DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
1590cb1545f7SOak Zeng 		 (unsigned)(adev->gmc.gart_size >> 20),
1591cb1545f7SOak Zeng 		 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
1592cb1545f7SOak Zeng 	adev->gart.ready = true;
1593cb1545f7SOak Zeng 	return 0;
1594cb1545f7SOak Zeng }
1595cb1545f7SOak Zeng 
1596cb1545f7SOak Zeng static int gmc_v9_0_hw_init(void *handle)
1597cb1545f7SOak Zeng {
1598cb1545f7SOak Zeng 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1599cb1545f7SOak Zeng 	bool value;
1600cb1545f7SOak Zeng 	int r, i;
1601cb1545f7SOak Zeng 	u32 tmp;
1602cb1545f7SOak Zeng 
1603cb1545f7SOak Zeng 	/* The sequence of these two function calls matters.*/
1604cb1545f7SOak Zeng 	gmc_v9_0_init_golden_registers(adev);
1605cb1545f7SOak Zeng 
1606cb1545f7SOak Zeng 	if (adev->mode_info.num_crtc) {
1607cb1545f7SOak Zeng 		if (adev->asic_type != CHIP_ARCTURUS) {
1608cb1545f7SOak Zeng 			/* Lockout access through VGA aperture*/
1609cb1545f7SOak Zeng 			WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
1610cb1545f7SOak Zeng 
1611cb1545f7SOak Zeng 			/* disable VGA render */
1612cb1545f7SOak Zeng 			WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
1613cb1545f7SOak Zeng 		}
1614cb1545f7SOak Zeng 	}
1615cb1545f7SOak Zeng 
1616cb1545f7SOak Zeng 	amdgpu_device_program_register_sequence(adev,
1617cb1545f7SOak Zeng 						golden_settings_vega10_hdp,
1618cb1545f7SOak Zeng 						ARRAY_SIZE(golden_settings_vega10_hdp));
1619cb1545f7SOak Zeng 
16209fb1506eSOak Zeng 	if (adev->mmhub.funcs->update_power_gating)
16219fb1506eSOak Zeng 		adev->mmhub.funcs->update_power_gating(adev, true);
16229fb1506eSOak Zeng 
1623cb1545f7SOak Zeng 	switch (adev->asic_type) {
1624f81b86a0SOak Zeng 	case CHIP_ARCTURUS:
1625f81b86a0SOak Zeng 		WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
1626f81b86a0SOak Zeng 		break;
1627cb1545f7SOak Zeng 	default:
1628cb1545f7SOak Zeng 		break;
1629cb1545f7SOak Zeng 	}
1630cb1545f7SOak Zeng 
1631846347c9STom St Denis 	WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
1632e60f8db5SAlex Xie 
1633b9509c80SHuang Rui 	tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
1634b9509c80SHuang Rui 	WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
1635e60f8db5SAlex Xie 
1636fe2b5323STiecheng Zhou 	WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
1637fe2b5323STiecheng Zhou 	WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
1638fe2b5323STiecheng Zhou 
16391d4e0a8cSMonk Liu 	/* After HDP is initialized, flush HDP.*/
1640bebc0762SHawking Zhang 	adev->nbio.funcs->hdp_flush(adev, NULL);
16411d4e0a8cSMonk Liu 
1642e60f8db5SAlex Xie 	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
1643e60f8db5SAlex Xie 		value = false;
1644e60f8db5SAlex Xie 	else
1645e60f8db5SAlex Xie 		value = true;
1646e60f8db5SAlex Xie 
164720bf2f6fSZhigang Luo 	if (!amdgpu_sriov_vf(adev)) {
16488ffff9b4SOak Zeng 		adev->gfxhub.funcs->set_fault_enable_default(adev, value);
16499fb1506eSOak Zeng 		adev->mmhub.funcs->set_fault_enable_default(adev, value);
165020bf2f6fSZhigang Luo 	}
16513ff98548SOak Zeng 	for (i = 0; i < adev->num_vmhubs; ++i)
16523ff98548SOak Zeng 		gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
1653e60f8db5SAlex Xie 
1654e7da754bSMonk Liu 	if (adev->umc.funcs && adev->umc.funcs->init_registers)
1655e7da754bSMonk Liu 		adev->umc.funcs->init_registers(adev);
1656e7da754bSMonk Liu 
1657e60f8db5SAlex Xie 	r = gmc_v9_0_gart_enable(adev);
1658e60f8db5SAlex Xie 
1659e60f8db5SAlex Xie 	return r;
1660e60f8db5SAlex Xie }
1661e60f8db5SAlex Xie 
1662e60f8db5SAlex Xie /**
1663e60f8db5SAlex Xie  * gmc_v9_0_gart_disable - gart disable
1664e60f8db5SAlex Xie  *
1665e60f8db5SAlex Xie  * @adev: amdgpu_device pointer
1666e60f8db5SAlex Xie  *
1667e60f8db5SAlex Xie  * This disables all VM page table.
1668e60f8db5SAlex Xie  */
1669e60f8db5SAlex Xie static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
1670e60f8db5SAlex Xie {
16718ffff9b4SOak Zeng 	adev->gfxhub.funcs->gart_disable(adev);
16729fb1506eSOak Zeng 	adev->mmhub.funcs->gart_disable(adev);
1673ce1b1b66SMonk Liu 	amdgpu_gart_table_vram_unpin(adev);
1674e60f8db5SAlex Xie }
1675e60f8db5SAlex Xie 
1676e60f8db5SAlex Xie static int gmc_v9_0_hw_fini(void *handle)
1677e60f8db5SAlex Xie {
1678e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1679e60f8db5SAlex Xie 
16805dd696aeSTrigger Huang 	if (amdgpu_sriov_vf(adev)) {
16815dd696aeSTrigger Huang 		/* full access mode, so don't touch any GMC register */
16825dd696aeSTrigger Huang 		DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
16835dd696aeSTrigger Huang 		return 0;
16845dd696aeSTrigger Huang 	}
16855dd696aeSTrigger Huang 
1686791c4769Sxinhui pan 	amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
1687770d13b1SChristian König 	amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
1688e60f8db5SAlex Xie 	gmc_v9_0_gart_disable(adev);
1689e60f8db5SAlex Xie 
1690e60f8db5SAlex Xie 	return 0;
1691e60f8db5SAlex Xie }
1692e60f8db5SAlex Xie 
1693e60f8db5SAlex Xie static int gmc_v9_0_suspend(void *handle)
1694e60f8db5SAlex Xie {
1695e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1696e60f8db5SAlex Xie 
1697c24a3c05SLiu Shixin 	return gmc_v9_0_hw_fini(adev);
1698e60f8db5SAlex Xie }
1699e60f8db5SAlex Xie 
1700e60f8db5SAlex Xie static int gmc_v9_0_resume(void *handle)
1701e60f8db5SAlex Xie {
1702e60f8db5SAlex Xie 	int r;
1703e60f8db5SAlex Xie 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1704e60f8db5SAlex Xie 
1705e60f8db5SAlex Xie 	r = gmc_v9_0_hw_init(adev);
1706e60f8db5SAlex Xie 	if (r)
1707e60f8db5SAlex Xie 		return r;
1708e60f8db5SAlex Xie 
1709620f774fSChristian König 	amdgpu_vmid_reset_all(adev);
1710e60f8db5SAlex Xie 
171132601d48SChristian König 	return 0;
1712e60f8db5SAlex Xie }
1713e60f8db5SAlex Xie 
1714e60f8db5SAlex Xie static bool gmc_v9_0_is_idle(void *handle)
1715e60f8db5SAlex Xie {
1716e60f8db5SAlex Xie 	/* MC is always ready in GMC v9.*/
1717e60f8db5SAlex Xie 	return true;
1718e60f8db5SAlex Xie }
1719e60f8db5SAlex Xie 
1720e60f8db5SAlex Xie static int gmc_v9_0_wait_for_idle(void *handle)
1721e60f8db5SAlex Xie {
1722e60f8db5SAlex Xie 	/* There is no need to wait for MC idle in GMC v9.*/
1723e60f8db5SAlex Xie 	return 0;
1724e60f8db5SAlex Xie }
1725e60f8db5SAlex Xie 
1726e60f8db5SAlex Xie static int gmc_v9_0_soft_reset(void *handle)
1727e60f8db5SAlex Xie {
1728e60f8db5SAlex Xie 	/* XXX for emulation.*/
1729e60f8db5SAlex Xie 	return 0;
1730e60f8db5SAlex Xie }
1731e60f8db5SAlex Xie 
1732e60f8db5SAlex Xie static int gmc_v9_0_set_clockgating_state(void *handle,
1733e60f8db5SAlex Xie 					enum amd_clockgating_state state)
1734e60f8db5SAlex Xie {
1735d5583d4fSHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1736d5583d4fSHuang Rui 
17379fb1506eSOak Zeng 	adev->mmhub.funcs->set_clockgating(adev, state);
1738bee7b51aSLe Ma 
1739bee7b51aSLe Ma 	athub_v1_0_set_clockgating(adev, state);
1740bee7b51aSLe Ma 
1741bee7b51aSLe Ma 	return 0;
1742e60f8db5SAlex Xie }
1743e60f8db5SAlex Xie 
174413052be5SHuang Rui static void gmc_v9_0_get_clockgating_state(void *handle, u32 *flags)
174513052be5SHuang Rui {
174613052be5SHuang Rui 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
174713052be5SHuang Rui 
17489fb1506eSOak Zeng 	adev->mmhub.funcs->get_clockgating(adev, flags);
1749bee7b51aSLe Ma 
1750bee7b51aSLe Ma 	athub_v1_0_get_clockgating(adev, flags);
175113052be5SHuang Rui }
175213052be5SHuang Rui 
1753e60f8db5SAlex Xie static int gmc_v9_0_set_powergating_state(void *handle,
1754e60f8db5SAlex Xie 					enum amd_powergating_state state)
1755e60f8db5SAlex Xie {
1756e60f8db5SAlex Xie 	return 0;
1757e60f8db5SAlex Xie }
1758e60f8db5SAlex Xie 
1759e60f8db5SAlex Xie const struct amd_ip_funcs gmc_v9_0_ip_funcs = {
1760e60f8db5SAlex Xie 	.name = "gmc_v9_0",
1761e60f8db5SAlex Xie 	.early_init = gmc_v9_0_early_init,
1762e60f8db5SAlex Xie 	.late_init = gmc_v9_0_late_init,
1763e60f8db5SAlex Xie 	.sw_init = gmc_v9_0_sw_init,
1764e60f8db5SAlex Xie 	.sw_fini = gmc_v9_0_sw_fini,
1765e60f8db5SAlex Xie 	.hw_init = gmc_v9_0_hw_init,
1766e60f8db5SAlex Xie 	.hw_fini = gmc_v9_0_hw_fini,
1767e60f8db5SAlex Xie 	.suspend = gmc_v9_0_suspend,
1768e60f8db5SAlex Xie 	.resume = gmc_v9_0_resume,
1769e60f8db5SAlex Xie 	.is_idle = gmc_v9_0_is_idle,
1770e60f8db5SAlex Xie 	.wait_for_idle = gmc_v9_0_wait_for_idle,
1771e60f8db5SAlex Xie 	.soft_reset = gmc_v9_0_soft_reset,
1772e60f8db5SAlex Xie 	.set_clockgating_state = gmc_v9_0_set_clockgating_state,
1773e60f8db5SAlex Xie 	.set_powergating_state = gmc_v9_0_set_powergating_state,
177413052be5SHuang Rui 	.get_clockgating_state = gmc_v9_0_get_clockgating_state,
1775e60f8db5SAlex Xie };
1776e60f8db5SAlex Xie 
1777e60f8db5SAlex Xie const struct amdgpu_ip_block_version gmc_v9_0_ip_block =
1778e60f8db5SAlex Xie {
1779e60f8db5SAlex Xie 	.type = AMD_IP_BLOCK_TYPE_GMC,
1780e60f8db5SAlex Xie 	.major = 9,
1781e60f8db5SAlex Xie 	.minor = 0,
1782e60f8db5SAlex Xie 	.rev = 0,
1783e60f8db5SAlex Xie 	.funcs = &gmc_v9_0_ip_funcs,
1784e60f8db5SAlex Xie };
1785