1cb3c2c47SWu Hao // SPDX-License-Identifier: GPL-2.0
2cb3c2c47SWu Hao /*
3cb3c2c47SWu Hao * Driver for FPGA Management Engine Error Management
4cb3c2c47SWu Hao *
5cb3c2c47SWu Hao * Copyright 2019 Intel Corporation, Inc.
6cb3c2c47SWu Hao *
7cb3c2c47SWu Hao * Authors:
8cb3c2c47SWu Hao * Kang Luwei <luwei.kang@intel.com>
9cb3c2c47SWu Hao * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10cb3c2c47SWu Hao * Wu Hao <hao.wu@intel.com>
11cb3c2c47SWu Hao * Joseph Grecco <joe.grecco@intel.com>
12cb3c2c47SWu Hao * Enno Luebbers <enno.luebbers@intel.com>
13cb3c2c47SWu Hao * Tim Whisonant <tim.whisonant@intel.com>
14cb3c2c47SWu Hao * Ananda Ravuri <ananda.ravuri@intel.com>
15cb3c2c47SWu Hao * Mitchel, Henry <henry.mitchel@intel.com>
16cb3c2c47SWu Hao */
17cb3c2c47SWu Hao
18*d43f20baSXu Yilun #include <linux/fpga-dfl.h>
19cb3c2c47SWu Hao #include <linux/uaccess.h>
20cb3c2c47SWu Hao
21cb3c2c47SWu Hao #include "dfl.h"
22cb3c2c47SWu Hao #include "dfl-fme.h"
23cb3c2c47SWu Hao
24cb3c2c47SWu Hao #define FME_ERROR_MASK 0x8
25cb3c2c47SWu Hao #define FME_ERROR 0x10
26cb3c2c47SWu Hao #define MBP_ERROR BIT_ULL(6)
27cb3c2c47SWu Hao #define PCIE0_ERROR_MASK 0x18
28cb3c2c47SWu Hao #define PCIE0_ERROR 0x20
29cb3c2c47SWu Hao #define PCIE1_ERROR_MASK 0x28
30cb3c2c47SWu Hao #define PCIE1_ERROR 0x30
31cb3c2c47SWu Hao #define FME_FIRST_ERROR 0x38
32cb3c2c47SWu Hao #define FME_NEXT_ERROR 0x40
33cb3c2c47SWu Hao #define RAS_NONFAT_ERROR_MASK 0x48
34cb3c2c47SWu Hao #define RAS_NONFAT_ERROR 0x50
35cb3c2c47SWu Hao #define RAS_CATFAT_ERROR_MASK 0x58
36cb3c2c47SWu Hao #define RAS_CATFAT_ERROR 0x60
37cb3c2c47SWu Hao #define RAS_ERROR_INJECT 0x68
38cb3c2c47SWu Hao #define INJECT_ERROR_MASK GENMASK_ULL(2, 0)
39cb3c2c47SWu Hao
40cb3c2c47SWu Hao #define ERROR_MASK GENMASK_ULL(63, 0)
41cb3c2c47SWu Hao
pcie0_errors_show(struct device * dev,struct device_attribute * attr,char * buf)42cb3c2c47SWu Hao static ssize_t pcie0_errors_show(struct device *dev,
43cb3c2c47SWu Hao struct device_attribute *attr, char *buf)
44cb3c2c47SWu Hao {
45cb3c2c47SWu Hao struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
46cb3c2c47SWu Hao void __iomem *base;
47cb3c2c47SWu Hao u64 value;
48cb3c2c47SWu Hao
49cb3c2c47SWu Hao base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
50cb3c2c47SWu Hao
51cb3c2c47SWu Hao mutex_lock(&pdata->lock);
52cb3c2c47SWu Hao value = readq(base + PCIE0_ERROR);
53cb3c2c47SWu Hao mutex_unlock(&pdata->lock);
54cb3c2c47SWu Hao
55cb3c2c47SWu Hao return sprintf(buf, "0x%llx\n", (unsigned long long)value);
56cb3c2c47SWu Hao }
57cb3c2c47SWu Hao
pcie0_errors_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)58cb3c2c47SWu Hao static ssize_t pcie0_errors_store(struct device *dev,
59cb3c2c47SWu Hao struct device_attribute *attr,
60cb3c2c47SWu Hao const char *buf, size_t count)
61cb3c2c47SWu Hao {
62cb3c2c47SWu Hao struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
63cb3c2c47SWu Hao void __iomem *base;
64cb3c2c47SWu Hao int ret = 0;
65cb3c2c47SWu Hao u64 v, val;
66cb3c2c47SWu Hao
67cb3c2c47SWu Hao if (kstrtou64(buf, 0, &val))
68cb3c2c47SWu Hao return -EINVAL;
69cb3c2c47SWu Hao
70cb3c2c47SWu Hao base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
71cb3c2c47SWu Hao
72cb3c2c47SWu Hao mutex_lock(&pdata->lock);
73cb3c2c47SWu Hao writeq(GENMASK_ULL(63, 0), base + PCIE0_ERROR_MASK);
74cb3c2c47SWu Hao
75cb3c2c47SWu Hao v = readq(base + PCIE0_ERROR);
76cb3c2c47SWu Hao if (val == v)
77cb3c2c47SWu Hao writeq(v, base + PCIE0_ERROR);
78cb3c2c47SWu Hao else
79cb3c2c47SWu Hao ret = -EINVAL;
80cb3c2c47SWu Hao
81cb3c2c47SWu Hao writeq(0ULL, base + PCIE0_ERROR_MASK);
82cb3c2c47SWu Hao mutex_unlock(&pdata->lock);
83cb3c2c47SWu Hao return ret ? ret : count;
84cb3c2c47SWu Hao }
85cb3c2c47SWu Hao static DEVICE_ATTR_RW(pcie0_errors);
86cb3c2c47SWu Hao
pcie1_errors_show(struct device * dev,struct device_attribute * attr,char * buf)87cb3c2c47SWu Hao static ssize_t pcie1_errors_show(struct device *dev,
88cb3c2c47SWu Hao struct device_attribute *attr, char *buf)
89cb3c2c47SWu Hao {
90cb3c2c47SWu Hao struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
91cb3c2c47SWu Hao void __iomem *base;
92cb3c2c47SWu Hao u64 value;
93cb3c2c47SWu Hao
94cb3c2c47SWu Hao base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
95cb3c2c47SWu Hao
96cb3c2c47SWu Hao mutex_lock(&pdata->lock);
97cb3c2c47SWu Hao value = readq(base + PCIE1_ERROR);
98cb3c2c47SWu Hao mutex_unlock(&pdata->lock);
99cb3c2c47SWu Hao
100cb3c2c47SWu Hao return sprintf(buf, "0x%llx\n", (unsigned long long)value);
101cb3c2c47SWu Hao }
102cb3c2c47SWu Hao
pcie1_errors_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)103cb3c2c47SWu Hao static ssize_t pcie1_errors_store(struct device *dev,
104cb3c2c47SWu Hao struct device_attribute *attr,
105cb3c2c47SWu Hao const char *buf, size_t count)
106cb3c2c47SWu Hao {
107cb3c2c47SWu Hao struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
108cb3c2c47SWu Hao void __iomem *base;
109cb3c2c47SWu Hao int ret = 0;
110cb3c2c47SWu Hao u64 v, val;
111cb3c2c47SWu Hao
112cb3c2c47SWu Hao if (kstrtou64(buf, 0, &val))
113cb3c2c47SWu Hao return -EINVAL;
114cb3c2c47SWu Hao
115cb3c2c47SWu Hao base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
116cb3c2c47SWu Hao
117cb3c2c47SWu Hao mutex_lock(&pdata->lock);
118cb3c2c47SWu Hao writeq(GENMASK_ULL(63, 0), base + PCIE1_ERROR_MASK);
119cb3c2c47SWu Hao
120cb3c2c47SWu Hao v = readq(base + PCIE1_ERROR);
121cb3c2c47SWu Hao if (val == v)
122cb3c2c47SWu Hao writeq(v, base + PCIE1_ERROR);
123cb3c2c47SWu Hao else
124cb3c2c47SWu Hao ret = -EINVAL;
125cb3c2c47SWu Hao
126cb3c2c47SWu Hao writeq(0ULL, base + PCIE1_ERROR_MASK);
127cb3c2c47SWu Hao mutex_unlock(&pdata->lock);
128cb3c2c47SWu Hao return ret ? ret : count;
129cb3c2c47SWu Hao }
130cb3c2c47SWu Hao static DEVICE_ATTR_RW(pcie1_errors);
131cb3c2c47SWu Hao
nonfatal_errors_show(struct device * dev,struct device_attribute * attr,char * buf)132cb3c2c47SWu Hao static ssize_t nonfatal_errors_show(struct device *dev,
133cb3c2c47SWu Hao struct device_attribute *attr, char *buf)
134cb3c2c47SWu Hao {
135cb3c2c47SWu Hao void __iomem *base;
136cb3c2c47SWu Hao
137cb3c2c47SWu Hao base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
138cb3c2c47SWu Hao
139cb3c2c47SWu Hao return sprintf(buf, "0x%llx\n",
140cb3c2c47SWu Hao (unsigned long long)readq(base + RAS_NONFAT_ERROR));
141cb3c2c47SWu Hao }
142cb3c2c47SWu Hao static DEVICE_ATTR_RO(nonfatal_errors);
143cb3c2c47SWu Hao
catfatal_errors_show(struct device * dev,struct device_attribute * attr,char * buf)144cb3c2c47SWu Hao static ssize_t catfatal_errors_show(struct device *dev,
145cb3c2c47SWu Hao struct device_attribute *attr, char *buf)
146cb3c2c47SWu Hao {
147cb3c2c47SWu Hao void __iomem *base;
148cb3c2c47SWu Hao
149cb3c2c47SWu Hao base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
150cb3c2c47SWu Hao
151cb3c2c47SWu Hao return sprintf(buf, "0x%llx\n",
152cb3c2c47SWu Hao (unsigned long long)readq(base + RAS_CATFAT_ERROR));
153cb3c2c47SWu Hao }
154cb3c2c47SWu Hao static DEVICE_ATTR_RO(catfatal_errors);
155cb3c2c47SWu Hao
inject_errors_show(struct device * dev,struct device_attribute * attr,char * buf)156cb3c2c47SWu Hao static ssize_t inject_errors_show(struct device *dev,
157cb3c2c47SWu Hao struct device_attribute *attr, char *buf)
158cb3c2c47SWu Hao {
159cb3c2c47SWu Hao struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
160cb3c2c47SWu Hao void __iomem *base;
161cb3c2c47SWu Hao u64 v;
162cb3c2c47SWu Hao
163cb3c2c47SWu Hao base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
164cb3c2c47SWu Hao
165cb3c2c47SWu Hao mutex_lock(&pdata->lock);
166cb3c2c47SWu Hao v = readq(base + RAS_ERROR_INJECT);
167cb3c2c47SWu Hao mutex_unlock(&pdata->lock);
168cb3c2c47SWu Hao
169cb3c2c47SWu Hao return sprintf(buf, "0x%llx\n",
170cb3c2c47SWu Hao (unsigned long long)FIELD_GET(INJECT_ERROR_MASK, v));
171cb3c2c47SWu Hao }
172cb3c2c47SWu Hao
inject_errors_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)173cb3c2c47SWu Hao static ssize_t inject_errors_store(struct device *dev,
174cb3c2c47SWu Hao struct device_attribute *attr,
175cb3c2c47SWu Hao const char *buf, size_t count)
176cb3c2c47SWu Hao {
177cb3c2c47SWu Hao struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
178cb3c2c47SWu Hao void __iomem *base;
179cb3c2c47SWu Hao u8 inject_error;
180cb3c2c47SWu Hao u64 v;
181cb3c2c47SWu Hao
182cb3c2c47SWu Hao if (kstrtou8(buf, 0, &inject_error))
183cb3c2c47SWu Hao return -EINVAL;
184cb3c2c47SWu Hao
185cb3c2c47SWu Hao if (inject_error & ~INJECT_ERROR_MASK)
186cb3c2c47SWu Hao return -EINVAL;
187cb3c2c47SWu Hao
188cb3c2c47SWu Hao base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
189cb3c2c47SWu Hao
190cb3c2c47SWu Hao mutex_lock(&pdata->lock);
191cb3c2c47SWu Hao v = readq(base + RAS_ERROR_INJECT);
192cb3c2c47SWu Hao v &= ~INJECT_ERROR_MASK;
193cb3c2c47SWu Hao v |= FIELD_PREP(INJECT_ERROR_MASK, inject_error);
194cb3c2c47SWu Hao writeq(v, base + RAS_ERROR_INJECT);
195cb3c2c47SWu Hao mutex_unlock(&pdata->lock);
196cb3c2c47SWu Hao
197cb3c2c47SWu Hao return count;
198cb3c2c47SWu Hao }
199cb3c2c47SWu Hao static DEVICE_ATTR_RW(inject_errors);
200cb3c2c47SWu Hao
fme_errors_show(struct device * dev,struct device_attribute * attr,char * buf)201cb3c2c47SWu Hao static ssize_t fme_errors_show(struct device *dev,
202cb3c2c47SWu Hao struct device_attribute *attr, char *buf)
203cb3c2c47SWu Hao {
204cb3c2c47SWu Hao struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
205cb3c2c47SWu Hao void __iomem *base;
206cb3c2c47SWu Hao u64 value;
207cb3c2c47SWu Hao
208cb3c2c47SWu Hao base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
209cb3c2c47SWu Hao
210cb3c2c47SWu Hao mutex_lock(&pdata->lock);
211cb3c2c47SWu Hao value = readq(base + FME_ERROR);
212cb3c2c47SWu Hao mutex_unlock(&pdata->lock);
213cb3c2c47SWu Hao
214cb3c2c47SWu Hao return sprintf(buf, "0x%llx\n", (unsigned long long)value);
215cb3c2c47SWu Hao }
216cb3c2c47SWu Hao
fme_errors_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)217cb3c2c47SWu Hao static ssize_t fme_errors_store(struct device *dev,
218cb3c2c47SWu Hao struct device_attribute *attr,
219cb3c2c47SWu Hao const char *buf, size_t count)
220cb3c2c47SWu Hao {
221cb3c2c47SWu Hao struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
222cb3c2c47SWu Hao void __iomem *base;
223cb3c2c47SWu Hao u64 v, val;
224cb3c2c47SWu Hao int ret = 0;
225cb3c2c47SWu Hao
226cb3c2c47SWu Hao if (kstrtou64(buf, 0, &val))
227cb3c2c47SWu Hao return -EINVAL;
228cb3c2c47SWu Hao
229cb3c2c47SWu Hao base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
230cb3c2c47SWu Hao
231cb3c2c47SWu Hao mutex_lock(&pdata->lock);
232cb3c2c47SWu Hao writeq(GENMASK_ULL(63, 0), base + FME_ERROR_MASK);
233cb3c2c47SWu Hao
234cb3c2c47SWu Hao v = readq(base + FME_ERROR);
235cb3c2c47SWu Hao if (val == v)
236cb3c2c47SWu Hao writeq(v, base + FME_ERROR);
237cb3c2c47SWu Hao else
238cb3c2c47SWu Hao ret = -EINVAL;
239cb3c2c47SWu Hao
240cb3c2c47SWu Hao /* Workaround: disable MBP_ERROR if feature revision is 0 */
241cb3c2c47SWu Hao writeq(dfl_feature_revision(base) ? 0ULL : MBP_ERROR,
242cb3c2c47SWu Hao base + FME_ERROR_MASK);
243cb3c2c47SWu Hao mutex_unlock(&pdata->lock);
244cb3c2c47SWu Hao return ret ? ret : count;
245cb3c2c47SWu Hao }
246cb3c2c47SWu Hao static DEVICE_ATTR_RW(fme_errors);
247cb3c2c47SWu Hao
first_error_show(struct device * dev,struct device_attribute * attr,char * buf)248cb3c2c47SWu Hao static ssize_t first_error_show(struct device *dev,
249cb3c2c47SWu Hao struct device_attribute *attr, char *buf)
250cb3c2c47SWu Hao {
251cb3c2c47SWu Hao struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
252cb3c2c47SWu Hao void __iomem *base;
253cb3c2c47SWu Hao u64 value;
254cb3c2c47SWu Hao
255cb3c2c47SWu Hao base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
256cb3c2c47SWu Hao
257cb3c2c47SWu Hao mutex_lock(&pdata->lock);
258cb3c2c47SWu Hao value = readq(base + FME_FIRST_ERROR);
259cb3c2c47SWu Hao mutex_unlock(&pdata->lock);
260cb3c2c47SWu Hao
261cb3c2c47SWu Hao return sprintf(buf, "0x%llx\n", (unsigned long long)value);
262cb3c2c47SWu Hao }
263cb3c2c47SWu Hao static DEVICE_ATTR_RO(first_error);
264cb3c2c47SWu Hao
next_error_show(struct device * dev,struct device_attribute * attr,char * buf)265cb3c2c47SWu Hao static ssize_t next_error_show(struct device *dev,
266cb3c2c47SWu Hao struct device_attribute *attr, char *buf)
267cb3c2c47SWu Hao {
268cb3c2c47SWu Hao struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
269cb3c2c47SWu Hao void __iomem *base;
270cb3c2c47SWu Hao u64 value;
271cb3c2c47SWu Hao
272cb3c2c47SWu Hao base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
273cb3c2c47SWu Hao
274cb3c2c47SWu Hao mutex_lock(&pdata->lock);
275cb3c2c47SWu Hao value = readq(base + FME_NEXT_ERROR);
276cb3c2c47SWu Hao mutex_unlock(&pdata->lock);
277cb3c2c47SWu Hao
278cb3c2c47SWu Hao return sprintf(buf, "0x%llx\n", (unsigned long long)value);
279cb3c2c47SWu Hao }
280cb3c2c47SWu Hao static DEVICE_ATTR_RO(next_error);
281cb3c2c47SWu Hao
282cb3c2c47SWu Hao static struct attribute *fme_global_err_attrs[] = {
283cb3c2c47SWu Hao &dev_attr_pcie0_errors.attr,
284cb3c2c47SWu Hao &dev_attr_pcie1_errors.attr,
285cb3c2c47SWu Hao &dev_attr_nonfatal_errors.attr,
286cb3c2c47SWu Hao &dev_attr_catfatal_errors.attr,
287cb3c2c47SWu Hao &dev_attr_inject_errors.attr,
288cb3c2c47SWu Hao &dev_attr_fme_errors.attr,
289cb3c2c47SWu Hao &dev_attr_first_error.attr,
290cb3c2c47SWu Hao &dev_attr_next_error.attr,
291cb3c2c47SWu Hao NULL,
292cb3c2c47SWu Hao };
293cb3c2c47SWu Hao
fme_global_err_attrs_visible(struct kobject * kobj,struct attribute * attr,int n)294cb3c2c47SWu Hao static umode_t fme_global_err_attrs_visible(struct kobject *kobj,
295cb3c2c47SWu Hao struct attribute *attr, int n)
296cb3c2c47SWu Hao {
297cb3c2c47SWu Hao struct device *dev = kobj_to_dev(kobj);
298cb3c2c47SWu Hao
299cb3c2c47SWu Hao /*
300cb3c2c47SWu Hao * sysfs entries are visible only if related private feature is
301cb3c2c47SWu Hao * enumerated.
302cb3c2c47SWu Hao */
303cb3c2c47SWu Hao if (!dfl_get_feature_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR))
304cb3c2c47SWu Hao return 0;
305cb3c2c47SWu Hao
306cb3c2c47SWu Hao return attr->mode;
307cb3c2c47SWu Hao }
308cb3c2c47SWu Hao
309cb3c2c47SWu Hao const struct attribute_group fme_global_err_group = {
310cb3c2c47SWu Hao .name = "errors",
311cb3c2c47SWu Hao .attrs = fme_global_err_attrs,
312cb3c2c47SWu Hao .is_visible = fme_global_err_attrs_visible,
313cb3c2c47SWu Hao };
314cb3c2c47SWu Hao
fme_err_mask(struct device * dev,bool mask)315cb3c2c47SWu Hao static void fme_err_mask(struct device *dev, bool mask)
316cb3c2c47SWu Hao {
317cb3c2c47SWu Hao struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
318cb3c2c47SWu Hao void __iomem *base;
319cb3c2c47SWu Hao
320cb3c2c47SWu Hao base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
321cb3c2c47SWu Hao
322cb3c2c47SWu Hao mutex_lock(&pdata->lock);
323cb3c2c47SWu Hao
324cb3c2c47SWu Hao /* Workaround: keep MBP_ERROR always masked if revision is 0 */
325cb3c2c47SWu Hao if (dfl_feature_revision(base))
326cb3c2c47SWu Hao writeq(mask ? ERROR_MASK : 0, base + FME_ERROR_MASK);
327cb3c2c47SWu Hao else
328cb3c2c47SWu Hao writeq(mask ? ERROR_MASK : MBP_ERROR, base + FME_ERROR_MASK);
329cb3c2c47SWu Hao
330cb3c2c47SWu Hao writeq(mask ? ERROR_MASK : 0, base + PCIE0_ERROR_MASK);
331cb3c2c47SWu Hao writeq(mask ? ERROR_MASK : 0, base + PCIE1_ERROR_MASK);
332cb3c2c47SWu Hao writeq(mask ? ERROR_MASK : 0, base + RAS_NONFAT_ERROR_MASK);
333cb3c2c47SWu Hao writeq(mask ? ERROR_MASK : 0, base + RAS_CATFAT_ERROR_MASK);
334cb3c2c47SWu Hao
335cb3c2c47SWu Hao mutex_unlock(&pdata->lock);
336cb3c2c47SWu Hao }
337cb3c2c47SWu Hao
fme_global_err_init(struct platform_device * pdev,struct dfl_feature * feature)338cb3c2c47SWu Hao static int fme_global_err_init(struct platform_device *pdev,
339cb3c2c47SWu Hao struct dfl_feature *feature)
340cb3c2c47SWu Hao {
341cb3c2c47SWu Hao fme_err_mask(&pdev->dev, false);
342cb3c2c47SWu Hao
343cb3c2c47SWu Hao return 0;
344cb3c2c47SWu Hao }
345cb3c2c47SWu Hao
fme_global_err_uinit(struct platform_device * pdev,struct dfl_feature * feature)346cb3c2c47SWu Hao static void fme_global_err_uinit(struct platform_device *pdev,
347cb3c2c47SWu Hao struct dfl_feature *feature)
348cb3c2c47SWu Hao {
349cb3c2c47SWu Hao fme_err_mask(&pdev->dev, true);
350cb3c2c47SWu Hao }
351cb3c2c47SWu Hao
352*d43f20baSXu Yilun static long
fme_global_error_ioctl(struct platform_device * pdev,struct dfl_feature * feature,unsigned int cmd,unsigned long arg)353*d43f20baSXu Yilun fme_global_error_ioctl(struct platform_device *pdev,
354*d43f20baSXu Yilun struct dfl_feature *feature,
355*d43f20baSXu Yilun unsigned int cmd, unsigned long arg)
356*d43f20baSXu Yilun {
357*d43f20baSXu Yilun switch (cmd) {
358*d43f20baSXu Yilun case DFL_FPGA_FME_ERR_GET_IRQ_NUM:
359*d43f20baSXu Yilun return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
360*d43f20baSXu Yilun case DFL_FPGA_FME_ERR_SET_IRQ:
361*d43f20baSXu Yilun return dfl_feature_ioctl_set_irq(pdev, feature, arg);
362*d43f20baSXu Yilun default:
363*d43f20baSXu Yilun dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
364*d43f20baSXu Yilun return -ENODEV;
365*d43f20baSXu Yilun }
366*d43f20baSXu Yilun }
367*d43f20baSXu Yilun
368cb3c2c47SWu Hao const struct dfl_feature_id fme_global_err_id_table[] = {
369cb3c2c47SWu Hao {.id = FME_FEATURE_ID_GLOBAL_ERR,},
370cb3c2c47SWu Hao {0,}
371cb3c2c47SWu Hao };
372cb3c2c47SWu Hao
373cb3c2c47SWu Hao const struct dfl_feature_ops fme_global_err_ops = {
374cb3c2c47SWu Hao .init = fme_global_err_init,
375cb3c2c47SWu Hao .uinit = fme_global_err_uinit,
376*d43f20baSXu Yilun .ioctl = fme_global_error_ioctl,
377cb3c2c47SWu Hao };
378