xref: /openbmc/linux/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c (revision 67ff4a72)
1 /*
2  * Copyright 2021 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "amdgpu_ras.h"
24 #include "amdgpu.h"
25 #include "amdgpu_mca.h"
26 
27 #define smnMCMP0_STATUST0 	0x03830408
28 #define smnMCMP1_STATUST0 	0x03b30408
29 #define smnMCMPIO_STATUST0 	0x0c930408
30 
31 
32 static void mca_v3_0_mp0_query_ras_error_count(struct amdgpu_device *adev,
33 					       void *ras_error_status)
34 {
35 	amdgpu_mca_query_ras_error_count(adev,
36 				         smnMCMP0_STATUST0,
37 				         ras_error_status);
38 }
39 
40 static int mca_v3_0_mp0_ras_late_init(struct amdgpu_device *adev, void *ras_info)
41 {
42 	return amdgpu_mca_ras_late_init(adev, &adev->mca.mp0);
43 }
44 
45 static void mca_v3_0_mp0_ras_fini(struct amdgpu_device *adev)
46 {
47 	amdgpu_mca_ras_fini(adev, &adev->mca.mp0);
48 }
49 
50 static int mca_v3_0_ras_block_match(struct amdgpu_ras_block_object *block_obj,
51 				enum amdgpu_ras_block block, uint32_t sub_block_index)
52 {
53 	if (!block_obj)
54 		return -EINVAL;
55 
56 	if ((block_obj->block == block) &&
57 		(block_obj->sub_block_index == sub_block_index)) {
58 		return 0;
59 	}
60 
61 	return -EINVAL;
62 }
63 
64 const struct amdgpu_ras_block_hw_ops mca_v3_0_mp0_hw_ops = {
65 	.query_ras_error_count = mca_v3_0_mp0_query_ras_error_count,
66 	.query_ras_error_address = NULL,
67 };
68 
69 struct amdgpu_mca_ras_block mca_v3_0_mp0_ras = {
70 	.ras_block = {
71 		.block = AMDGPU_RAS_BLOCK__MCA,
72 		.sub_block_index = AMDGPU_RAS_MCA_BLOCK__MP0,
73 		.name = "mp0",
74 		.hw_ops = &mca_v3_0_mp0_hw_ops,
75 		.ras_block_match = mca_v3_0_ras_block_match,
76 		.ras_late_init = mca_v3_0_mp0_ras_late_init,
77 		.ras_fini = mca_v3_0_mp0_ras_fini,
78 	},
79 };
80 
81 static void mca_v3_0_mp1_query_ras_error_count(struct amdgpu_device *adev,
82 					       void *ras_error_status)
83 {
84 	amdgpu_mca_query_ras_error_count(adev,
85 				         smnMCMP1_STATUST0,
86 				         ras_error_status);
87 }
88 
89 static int mca_v3_0_mp1_ras_late_init(struct amdgpu_device *adev, void *ras_info)
90 {
91 	return amdgpu_mca_ras_late_init(adev, &adev->mca.mp1);
92 }
93 
94 static void mca_v3_0_mp1_ras_fini(struct amdgpu_device *adev)
95 {
96 	amdgpu_mca_ras_fini(adev, &adev->mca.mp1);
97 }
98 
99 const struct amdgpu_ras_block_hw_ops mca_v3_0_mp1_hw_ops = {
100 	.query_ras_error_count = mca_v3_0_mp1_query_ras_error_count,
101 	.query_ras_error_address = NULL,
102 };
103 
104 struct amdgpu_mca_ras_block mca_v3_0_mp1_ras = {
105 	.ras_block = {
106 		.block = AMDGPU_RAS_BLOCK__MCA,
107 		.sub_block_index = AMDGPU_RAS_MCA_BLOCK__MP1,
108 		.name = "mp1",
109 		.hw_ops = &mca_v3_0_mp1_hw_ops,
110 		.ras_block_match = mca_v3_0_ras_block_match,
111 		.ras_late_init = mca_v3_0_mp1_ras_late_init,
112 		.ras_fini = mca_v3_0_mp1_ras_fini,
113 	},
114 };
115 
116 static void mca_v3_0_mpio_query_ras_error_count(struct amdgpu_device *adev,
117 					       void *ras_error_status)
118 {
119 	amdgpu_mca_query_ras_error_count(adev,
120 				         smnMCMPIO_STATUST0,
121 				         ras_error_status);
122 }
123 
124 static int mca_v3_0_mpio_ras_late_init(struct amdgpu_device *adev, void *ras_info)
125 {
126 	return amdgpu_mca_ras_late_init(adev, &adev->mca.mpio);
127 }
128 
129 static void mca_v3_0_mpio_ras_fini(struct amdgpu_device *adev)
130 {
131 	amdgpu_mca_ras_fini(adev, &adev->mca.mpio);
132 }
133 
134 const struct amdgpu_ras_block_hw_ops mca_v3_0_mpio_hw_ops = {
135 	.query_ras_error_count = mca_v3_0_mpio_query_ras_error_count,
136 	.query_ras_error_address = NULL,
137 };
138 
139 struct amdgpu_mca_ras_block mca_v3_0_mpio_ras = {
140 	.ras_block = {
141 		.block = AMDGPU_RAS_BLOCK__MCA,
142 		.sub_block_index = AMDGPU_RAS_MCA_BLOCK__MPIO,
143 		.name = "mpio",
144 		.hw_ops = &mca_v3_0_mpio_hw_ops,
145 		.ras_block_match = mca_v3_0_ras_block_match,
146 		.ras_late_init = mca_v3_0_mpio_ras_late_init,
147 		.ras_fini = mca_v3_0_mpio_ras_fini,
148 	},
149 };
150 
151 
152 static void mca_v3_0_init(struct amdgpu_device *adev)
153 {
154 	struct amdgpu_mca *mca = &adev->mca;
155 
156 	mca->mp0.ras = &mca_v3_0_mp0_ras;
157 	mca->mp1.ras = &mca_v3_0_mp1_ras;
158 	mca->mpio.ras = &mca_v3_0_mpio_ras;
159 	amdgpu_ras_register_ras_block(adev, &mca->mp0.ras->ras_block);
160 	amdgpu_ras_register_ras_block(adev, &mca->mp1.ras->ras_block);
161 	amdgpu_ras_register_ras_block(adev, &mca->mpio.ras->ras_block);
162 }
163 
164 const struct amdgpu_mca_funcs mca_v3_0_funcs = {
165 	.init = mca_v3_0_init,
166 };