xref: /openbmc/linux/drivers/fpga/dfl-fme-error.c (revision e82c878d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Driver for FPGA Management Engine Error Management
4  *
5  * Copyright 2019 Intel Corporation, Inc.
6  *
7  * Authors:
8  *   Kang Luwei <luwei.kang@intel.com>
9  *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
10  *   Wu Hao <hao.wu@intel.com>
11  *   Joseph Grecco <joe.grecco@intel.com>
12  *   Enno Luebbers <enno.luebbers@intel.com>
13  *   Tim Whisonant <tim.whisonant@intel.com>
14  *   Ananda Ravuri <ananda.ravuri@intel.com>
15  *   Mitchel, Henry <henry.mitchel@intel.com>
16  */
17 
18 #include <linux/uaccess.h>
19 
20 #include "dfl.h"
21 #include "dfl-fme.h"
22 
23 #define FME_ERROR_MASK		0x8
24 #define FME_ERROR		0x10
25 #define MBP_ERROR		BIT_ULL(6)
26 #define PCIE0_ERROR_MASK	0x18
27 #define PCIE0_ERROR		0x20
28 #define PCIE1_ERROR_MASK	0x28
29 #define PCIE1_ERROR		0x30
30 #define FME_FIRST_ERROR		0x38
31 #define FME_NEXT_ERROR		0x40
32 #define RAS_NONFAT_ERROR_MASK	0x48
33 #define RAS_NONFAT_ERROR	0x50
34 #define RAS_CATFAT_ERROR_MASK	0x58
35 #define RAS_CATFAT_ERROR	0x60
36 #define RAS_ERROR_INJECT	0x68
37 #define INJECT_ERROR_MASK	GENMASK_ULL(2, 0)
38 
39 #define ERROR_MASK		GENMASK_ULL(63, 0)
40 
41 static ssize_t pcie0_errors_show(struct device *dev,
42 				 struct device_attribute *attr, char *buf)
43 {
44 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
45 	void __iomem *base;
46 	u64 value;
47 
48 	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
49 
50 	mutex_lock(&pdata->lock);
51 	value = readq(base + PCIE0_ERROR);
52 	mutex_unlock(&pdata->lock);
53 
54 	return sprintf(buf, "0x%llx\n", (unsigned long long)value);
55 }
56 
57 static ssize_t pcie0_errors_store(struct device *dev,
58 				  struct device_attribute *attr,
59 				  const char *buf, size_t count)
60 {
61 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
62 	void __iomem *base;
63 	int ret = 0;
64 	u64 v, val;
65 
66 	if (kstrtou64(buf, 0, &val))
67 		return -EINVAL;
68 
69 	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
70 
71 	mutex_lock(&pdata->lock);
72 	writeq(GENMASK_ULL(63, 0), base + PCIE0_ERROR_MASK);
73 
74 	v = readq(base + PCIE0_ERROR);
75 	if (val == v)
76 		writeq(v, base + PCIE0_ERROR);
77 	else
78 		ret = -EINVAL;
79 
80 	writeq(0ULL, base + PCIE0_ERROR_MASK);
81 	mutex_unlock(&pdata->lock);
82 	return ret ? ret : count;
83 }
84 static DEVICE_ATTR_RW(pcie0_errors);
85 
86 static ssize_t pcie1_errors_show(struct device *dev,
87 				 struct device_attribute *attr, char *buf)
88 {
89 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
90 	void __iomem *base;
91 	u64 value;
92 
93 	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
94 
95 	mutex_lock(&pdata->lock);
96 	value = readq(base + PCIE1_ERROR);
97 	mutex_unlock(&pdata->lock);
98 
99 	return sprintf(buf, "0x%llx\n", (unsigned long long)value);
100 }
101 
102 static ssize_t pcie1_errors_store(struct device *dev,
103 				  struct device_attribute *attr,
104 				  const char *buf, size_t count)
105 {
106 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
107 	void __iomem *base;
108 	int ret = 0;
109 	u64 v, val;
110 
111 	if (kstrtou64(buf, 0, &val))
112 		return -EINVAL;
113 
114 	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
115 
116 	mutex_lock(&pdata->lock);
117 	writeq(GENMASK_ULL(63, 0), base + PCIE1_ERROR_MASK);
118 
119 	v = readq(base + PCIE1_ERROR);
120 	if (val == v)
121 		writeq(v, base + PCIE1_ERROR);
122 	else
123 		ret = -EINVAL;
124 
125 	writeq(0ULL, base + PCIE1_ERROR_MASK);
126 	mutex_unlock(&pdata->lock);
127 	return ret ? ret : count;
128 }
129 static DEVICE_ATTR_RW(pcie1_errors);
130 
131 static ssize_t nonfatal_errors_show(struct device *dev,
132 				    struct device_attribute *attr, char *buf)
133 {
134 	void __iomem *base;
135 
136 	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
137 
138 	return sprintf(buf, "0x%llx\n",
139 		       (unsigned long long)readq(base + RAS_NONFAT_ERROR));
140 }
141 static DEVICE_ATTR_RO(nonfatal_errors);
142 
143 static ssize_t catfatal_errors_show(struct device *dev,
144 				    struct device_attribute *attr, char *buf)
145 {
146 	void __iomem *base;
147 
148 	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
149 
150 	return sprintf(buf, "0x%llx\n",
151 		       (unsigned long long)readq(base + RAS_CATFAT_ERROR));
152 }
153 static DEVICE_ATTR_RO(catfatal_errors);
154 
155 static ssize_t inject_errors_show(struct device *dev,
156 				  struct device_attribute *attr, char *buf)
157 {
158 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
159 	void __iomem *base;
160 	u64 v;
161 
162 	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
163 
164 	mutex_lock(&pdata->lock);
165 	v = readq(base + RAS_ERROR_INJECT);
166 	mutex_unlock(&pdata->lock);
167 
168 	return sprintf(buf, "0x%llx\n",
169 		       (unsigned long long)FIELD_GET(INJECT_ERROR_MASK, v));
170 }
171 
172 static ssize_t inject_errors_store(struct device *dev,
173 				   struct device_attribute *attr,
174 				   const char *buf, size_t count)
175 {
176 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
177 	void __iomem *base;
178 	u8 inject_error;
179 	u64 v;
180 
181 	if (kstrtou8(buf, 0, &inject_error))
182 		return -EINVAL;
183 
184 	if (inject_error & ~INJECT_ERROR_MASK)
185 		return -EINVAL;
186 
187 	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
188 
189 	mutex_lock(&pdata->lock);
190 	v = readq(base + RAS_ERROR_INJECT);
191 	v &= ~INJECT_ERROR_MASK;
192 	v |= FIELD_PREP(INJECT_ERROR_MASK, inject_error);
193 	writeq(v, base + RAS_ERROR_INJECT);
194 	mutex_unlock(&pdata->lock);
195 
196 	return count;
197 }
198 static DEVICE_ATTR_RW(inject_errors);
199 
200 static ssize_t fme_errors_show(struct device *dev,
201 			       struct device_attribute *attr, char *buf)
202 {
203 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
204 	void __iomem *base;
205 	u64 value;
206 
207 	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
208 
209 	mutex_lock(&pdata->lock);
210 	value = readq(base + FME_ERROR);
211 	mutex_unlock(&pdata->lock);
212 
213 	return sprintf(buf, "0x%llx\n", (unsigned long long)value);
214 }
215 
216 static ssize_t fme_errors_store(struct device *dev,
217 				struct device_attribute *attr,
218 				const char *buf, size_t count)
219 {
220 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
221 	void __iomem *base;
222 	u64 v, val;
223 	int ret = 0;
224 
225 	if (kstrtou64(buf, 0, &val))
226 		return -EINVAL;
227 
228 	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
229 
230 	mutex_lock(&pdata->lock);
231 	writeq(GENMASK_ULL(63, 0), base + FME_ERROR_MASK);
232 
233 	v = readq(base + FME_ERROR);
234 	if (val == v)
235 		writeq(v, base + FME_ERROR);
236 	else
237 		ret = -EINVAL;
238 
239 	/* Workaround: disable MBP_ERROR if feature revision is 0 */
240 	writeq(dfl_feature_revision(base) ? 0ULL : MBP_ERROR,
241 	       base + FME_ERROR_MASK);
242 	mutex_unlock(&pdata->lock);
243 	return ret ? ret : count;
244 }
245 static DEVICE_ATTR_RW(fme_errors);
246 
247 static ssize_t first_error_show(struct device *dev,
248 				struct device_attribute *attr, char *buf)
249 {
250 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
251 	void __iomem *base;
252 	u64 value;
253 
254 	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
255 
256 	mutex_lock(&pdata->lock);
257 	value = readq(base + FME_FIRST_ERROR);
258 	mutex_unlock(&pdata->lock);
259 
260 	return sprintf(buf, "0x%llx\n", (unsigned long long)value);
261 }
262 static DEVICE_ATTR_RO(first_error);
263 
264 static ssize_t next_error_show(struct device *dev,
265 			       struct device_attribute *attr, char *buf)
266 {
267 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
268 	void __iomem *base;
269 	u64 value;
270 
271 	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
272 
273 	mutex_lock(&pdata->lock);
274 	value = readq(base + FME_NEXT_ERROR);
275 	mutex_unlock(&pdata->lock);
276 
277 	return sprintf(buf, "0x%llx\n", (unsigned long long)value);
278 }
279 static DEVICE_ATTR_RO(next_error);
280 
281 static struct attribute *fme_global_err_attrs[] = {
282 	&dev_attr_pcie0_errors.attr,
283 	&dev_attr_pcie1_errors.attr,
284 	&dev_attr_nonfatal_errors.attr,
285 	&dev_attr_catfatal_errors.attr,
286 	&dev_attr_inject_errors.attr,
287 	&dev_attr_fme_errors.attr,
288 	&dev_attr_first_error.attr,
289 	&dev_attr_next_error.attr,
290 	NULL,
291 };
292 
293 static umode_t fme_global_err_attrs_visible(struct kobject *kobj,
294 					    struct attribute *attr, int n)
295 {
296 	struct device *dev = kobj_to_dev(kobj);
297 
298 	/*
299 	 * sysfs entries are visible only if related private feature is
300 	 * enumerated.
301 	 */
302 	if (!dfl_get_feature_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR))
303 		return 0;
304 
305 	return attr->mode;
306 }
307 
308 const struct attribute_group fme_global_err_group = {
309 	.name       = "errors",
310 	.attrs      = fme_global_err_attrs,
311 	.is_visible = fme_global_err_attrs_visible,
312 };
313 
314 static void fme_err_mask(struct device *dev, bool mask)
315 {
316 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
317 	void __iomem *base;
318 
319 	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
320 
321 	mutex_lock(&pdata->lock);
322 
323 	/* Workaround: keep MBP_ERROR always masked if revision is 0 */
324 	if (dfl_feature_revision(base))
325 		writeq(mask ? ERROR_MASK : 0, base + FME_ERROR_MASK);
326 	else
327 		writeq(mask ? ERROR_MASK : MBP_ERROR, base + FME_ERROR_MASK);
328 
329 	writeq(mask ? ERROR_MASK : 0, base + PCIE0_ERROR_MASK);
330 	writeq(mask ? ERROR_MASK : 0, base + PCIE1_ERROR_MASK);
331 	writeq(mask ? ERROR_MASK : 0, base + RAS_NONFAT_ERROR_MASK);
332 	writeq(mask ? ERROR_MASK : 0, base + RAS_CATFAT_ERROR_MASK);
333 
334 	mutex_unlock(&pdata->lock);
335 }
336 
337 static int fme_global_err_init(struct platform_device *pdev,
338 			       struct dfl_feature *feature)
339 {
340 	fme_err_mask(&pdev->dev, false);
341 
342 	return 0;
343 }
344 
345 static void fme_global_err_uinit(struct platform_device *pdev,
346 				 struct dfl_feature *feature)
347 {
348 	fme_err_mask(&pdev->dev, true);
349 }
350 
351 const struct dfl_feature_id fme_global_err_id_table[] = {
352 	{.id = FME_FEATURE_ID_GLOBAL_ERR,},
353 	{0,}
354 };
355 
356 const struct dfl_feature_ops fme_global_err_ops = {
357 	.init = fme_global_err_init,
358 	.uinit = fme_global_err_uinit,
359 };
360