xref: /openbmc/linux/arch/powerpc/include/asm/eeh.h (revision 9cdb81c7)
1 /*
2  * Copyright (C) 2001  Dave Engebretsen & Todd Inglett IBM Corporation.
3  * Copyright 2001-2012 IBM Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
18  */
19 
20 #ifndef _POWERPC_EEH_H
21 #define _POWERPC_EEH_H
22 #ifdef __KERNEL__
23 
24 #include <linux/init.h>
25 #include <linux/list.h>
26 #include <linux/string.h>
27 
28 struct pci_dev;
29 struct pci_bus;
30 struct device_node;
31 
32 #ifdef CONFIG_EEH
33 
34 /*
35  * The struct is used to trace EEH state for the associated
36  * PCI device node or PCI device. In future, it might
37  * represent PE as well so that the EEH device to form
38  * another tree except the currently existing tree of PCI
39  * buses and PCI devices
40  */
41 #define EEH_MODE_SUPPORTED	(1<<0)	/* EEH supported on the device	*/
42 #define EEH_MODE_NOCHECK	(1<<1)	/* EEH check should be skipped	*/
43 #define EEH_MODE_ISOLATED	(1<<2)	/* The device has been isolated	*/
44 #define EEH_MODE_RECOVERING	(1<<3)	/* Recovering the device	*/
45 #define EEH_MODE_IRQ_DISABLED	(1<<4)	/* Interrupt disabled		*/
46 
47 struct eeh_dev {
48 	int mode;			/* EEH mode			*/
49 	int class_code;			/* Class code of the device	*/
50 	int config_addr;		/* Config address		*/
51 	int pe_config_addr;		/* PE config address		*/
52 	int check_count;		/* Times of ignored error	*/
53 	int freeze_count;		/* Times of froze up		*/
54 	int false_positives;		/* Times of reported #ff's	*/
55 	u32 config_space[16];		/* Saved PCI config space	*/
56 	struct pci_controller *phb;	/* Associated PHB		*/
57 	struct device_node *dn;		/* Associated device node	*/
58 	struct pci_dev *pdev;		/* Associated PCI device	*/
59 };
60 
61 static inline struct device_node *eeh_dev_to_of_node(struct eeh_dev *edev)
62 {
63 	return edev->dn;
64 }
65 
66 static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev)
67 {
68 	return edev->pdev;
69 }
70 
71 /*
72  * The struct is used to trace the registered EEH operation
73  * callback functions. Actually, those operation callback
74  * functions are heavily platform dependent. That means the
75  * platform should register its own EEH operation callback
76  * functions before any EEH further operations.
77  */
78 #define EEH_OPT_DISABLE		0	/* EEH disable	*/
79 #define EEH_OPT_ENABLE		1	/* EEH enable	*/
80 #define EEH_OPT_THAW_MMIO	2	/* MMIO enable	*/
81 #define EEH_OPT_THAW_DMA	3	/* DMA enable	*/
82 #define EEH_STATE_UNAVAILABLE	(1 << 0)	/* State unavailable	*/
83 #define EEH_STATE_NOT_SUPPORT	(1 << 1)	/* EEH not supported	*/
84 #define EEH_STATE_RESET_ACTIVE	(1 << 2)	/* Active reset		*/
85 #define EEH_STATE_MMIO_ACTIVE	(1 << 3)	/* Active MMIO		*/
86 #define EEH_STATE_DMA_ACTIVE	(1 << 4)	/* Active DMA		*/
87 #define EEH_STATE_MMIO_ENABLED	(1 << 5)	/* MMIO enabled		*/
88 #define EEH_STATE_DMA_ENABLED	(1 << 6)	/* DMA enabled		*/
89 #define EEH_RESET_DEACTIVATE	0	/* Deactivate the PE reset	*/
90 #define EEH_RESET_HOT		1	/* Hot reset			*/
91 #define EEH_RESET_FUNDAMENTAL	3	/* Fundamental reset		*/
92 #define EEH_LOG_TEMP		1	/* EEH temporary error log	*/
93 #define EEH_LOG_PERM		2	/* EEH permanent error log	*/
94 
95 struct eeh_ops {
96 	char *name;
97 	int (*init)(void);
98 	int (*set_option)(struct device_node *dn, int option);
99 	int (*get_pe_addr)(struct device_node *dn);
100 	int (*get_state)(struct device_node *dn, int *state);
101 	int (*reset)(struct device_node *dn, int option);
102 	int (*wait_state)(struct device_node *dn, int max_wait);
103 	int (*get_log)(struct device_node *dn, int severity, char *drv_log, unsigned long len);
104 	int (*configure_bridge)(struct device_node *dn);
105 	int (*read_config)(struct device_node *dn, int where, int size, u32 *val);
106 	int (*write_config)(struct device_node *dn, int where, int size, u32 val);
107 };
108 
109 extern struct eeh_ops *eeh_ops;
110 extern int eeh_subsystem_enabled;
111 
112 /*
113  * Max number of EEH freezes allowed before we consider the device
114  * to be permanently disabled.
115  */
116 #define EEH_MAX_ALLOWED_FREEZES 5
117 
118 void * __devinit eeh_dev_init(struct device_node *dn, void *data);
119 void __devinit eeh_dev_phb_init_dynamic(struct pci_controller *phb);
120 void __init eeh_dev_phb_init(void);
121 void __init eeh_init(void);
122 #ifdef CONFIG_PPC_PSERIES
123 int __init eeh_pseries_init(void);
124 #endif
125 int __init eeh_ops_register(struct eeh_ops *ops);
126 int __exit eeh_ops_unregister(const char *name);
127 unsigned long eeh_check_failure(const volatile void __iomem *token,
128 				unsigned long val);
129 int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev);
130 void __init pci_addr_cache_build(void);
131 void eeh_add_device_tree_early(struct device_node *);
132 void eeh_add_device_tree_late(struct pci_bus *);
133 void eeh_remove_bus_device(struct pci_dev *);
134 
135 /**
136  * EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.
137  *
138  * If this macro yields TRUE, the caller relays to eeh_check_failure()
139  * which does further tests out of line.
140  */
141 #define EEH_POSSIBLE_ERROR(val, type)	((val) == (type)~0 && eeh_subsystem_enabled)
142 
143 /*
144  * Reads from a device which has been isolated by EEH will return
145  * all 1s.  This macro gives an all-1s value of the given size (in
146  * bytes: 1, 2, or 4) for comparing with the result of a read.
147  */
148 #define EEH_IO_ERROR_VALUE(size)	(~0U >> ((4 - (size)) * 8))
149 
150 #else /* !CONFIG_EEH */
151 
152 static inline void *eeh_dev_init(struct device_node *dn, void *data)
153 {
154 	return NULL;
155 }
156 
157 static inline void eeh_dev_phb_init_dynamic(struct pci_controller *phb) { }
158 
159 static inline void eeh_dev_phb_init(void) { }
160 
161 static inline void eeh_init(void) { }
162 
163 #ifdef CONFIG_PPC_PSERIES
164 static inline int eeh_pseries_init(void)
165 {
166 	return 0;
167 }
168 #endif /* CONFIG_PPC_PSERIES */
169 
170 static inline unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val)
171 {
172 	return val;
173 }
174 
175 static inline int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
176 {
177 	return 0;
178 }
179 
180 static inline void pci_addr_cache_build(void) { }
181 
182 static inline void eeh_add_device_tree_early(struct device_node *dn) { }
183 
184 static inline void eeh_add_device_tree_late(struct pci_bus *bus) { }
185 
186 static inline void eeh_remove_bus_device(struct pci_dev *dev) { }
187 #define EEH_POSSIBLE_ERROR(val, type) (0)
188 #define EEH_IO_ERROR_VALUE(size) (-1UL)
189 #endif /* CONFIG_EEH */
190 
191 #ifdef CONFIG_PPC64
192 /*
193  * MMIO read/write operations with EEH support.
194  */
195 static inline u8 eeh_readb(const volatile void __iomem *addr)
196 {
197 	u8 val = in_8(addr);
198 	if (EEH_POSSIBLE_ERROR(val, u8))
199 		return eeh_check_failure(addr, val);
200 	return val;
201 }
202 
203 static inline u16 eeh_readw(const volatile void __iomem *addr)
204 {
205 	u16 val = in_le16(addr);
206 	if (EEH_POSSIBLE_ERROR(val, u16))
207 		return eeh_check_failure(addr, val);
208 	return val;
209 }
210 
211 static inline u32 eeh_readl(const volatile void __iomem *addr)
212 {
213 	u32 val = in_le32(addr);
214 	if (EEH_POSSIBLE_ERROR(val, u32))
215 		return eeh_check_failure(addr, val);
216 	return val;
217 }
218 
219 static inline u64 eeh_readq(const volatile void __iomem *addr)
220 {
221 	u64 val = in_le64(addr);
222 	if (EEH_POSSIBLE_ERROR(val, u64))
223 		return eeh_check_failure(addr, val);
224 	return val;
225 }
226 
227 static inline u16 eeh_readw_be(const volatile void __iomem *addr)
228 {
229 	u16 val = in_be16(addr);
230 	if (EEH_POSSIBLE_ERROR(val, u16))
231 		return eeh_check_failure(addr, val);
232 	return val;
233 }
234 
235 static inline u32 eeh_readl_be(const volatile void __iomem *addr)
236 {
237 	u32 val = in_be32(addr);
238 	if (EEH_POSSIBLE_ERROR(val, u32))
239 		return eeh_check_failure(addr, val);
240 	return val;
241 }
242 
243 static inline u64 eeh_readq_be(const volatile void __iomem *addr)
244 {
245 	u64 val = in_be64(addr);
246 	if (EEH_POSSIBLE_ERROR(val, u64))
247 		return eeh_check_failure(addr, val);
248 	return val;
249 }
250 
251 static inline void eeh_memcpy_fromio(void *dest, const
252 				     volatile void __iomem *src,
253 				     unsigned long n)
254 {
255 	_memcpy_fromio(dest, src, n);
256 
257 	/* Look for ffff's here at dest[n].  Assume that at least 4 bytes
258 	 * were copied. Check all four bytes.
259 	 */
260 	if (n >= 4 && EEH_POSSIBLE_ERROR(*((u32 *)(dest + n - 4)), u32))
261 		eeh_check_failure(src, *((u32 *)(dest + n - 4)));
262 }
263 
264 /* in-string eeh macros */
265 static inline void eeh_readsb(const volatile void __iomem *addr, void * buf,
266 			      int ns)
267 {
268 	_insb(addr, buf, ns);
269 	if (EEH_POSSIBLE_ERROR((*(((u8*)buf)+ns-1)), u8))
270 		eeh_check_failure(addr, *(u8*)buf);
271 }
272 
273 static inline void eeh_readsw(const volatile void __iomem *addr, void * buf,
274 			      int ns)
275 {
276 	_insw(addr, buf, ns);
277 	if (EEH_POSSIBLE_ERROR((*(((u16*)buf)+ns-1)), u16))
278 		eeh_check_failure(addr, *(u16*)buf);
279 }
280 
281 static inline void eeh_readsl(const volatile void __iomem *addr, void * buf,
282 			      int nl)
283 {
284 	_insl(addr, buf, nl);
285 	if (EEH_POSSIBLE_ERROR((*(((u32*)buf)+nl-1)), u32))
286 		eeh_check_failure(addr, *(u32*)buf);
287 }
288 
289 #endif /* CONFIG_PPC64 */
290 #endif /* __KERNEL__ */
291 #endif /* _POWERPC_EEH_H */
292