xref: /openbmc/linux/arch/powerpc/kernel/eeh_pe.c (revision b755c25f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * The file intends to implement PE based on the information from
4  * platforms. Basically, there have 3 types of PEs: PHB/Bus/Device.
5  * All the PEs should be organized as hierarchy tree. The first level
6  * of the tree will be associated to existing PHBs since the particular
7  * PE is only meaningful in one PHB domain.
8  *
9  * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2012.
10  */
11 
12 #include <linux/delay.h>
13 #include <linux/export.h>
14 #include <linux/gfp.h>
15 #include <linux/kernel.h>
16 #include <linux/of.h>
17 #include <linux/pci.h>
18 #include <linux/string.h>
19 
20 #include <asm/pci-bridge.h>
21 #include <asm/ppc-pci.h>
22 
23 static int eeh_pe_aux_size = 0;
24 static LIST_HEAD(eeh_phb_pe);
25 
26 /**
27  * eeh_set_pe_aux_size - Set PE auxillary data size
28  * @size: PE auxillary data size
29  *
30  * Set PE auxillary data size
31  */
32 void eeh_set_pe_aux_size(int size)
33 {
34 	if (size < 0)
35 		return;
36 
37 	eeh_pe_aux_size = size;
38 }
39 
40 /**
41  * eeh_pe_alloc - Allocate PE
42  * @phb: PCI controller
43  * @type: PE type
44  *
45  * Allocate PE instance dynamically.
46  */
47 static struct eeh_pe *eeh_pe_alloc(struct pci_controller *phb, int type)
48 {
49 	struct eeh_pe *pe;
50 	size_t alloc_size;
51 
52 	alloc_size = sizeof(struct eeh_pe);
53 	if (eeh_pe_aux_size) {
54 		alloc_size = ALIGN(alloc_size, cache_line_size());
55 		alloc_size += eeh_pe_aux_size;
56 	}
57 
58 	/* Allocate PHB PE */
59 	pe = kzalloc(alloc_size, GFP_KERNEL);
60 	if (!pe) return NULL;
61 
62 	/* Initialize PHB PE */
63 	pe->type = type;
64 	pe->phb = phb;
65 	INIT_LIST_HEAD(&pe->child_list);
66 	INIT_LIST_HEAD(&pe->edevs);
67 
68 	pe->data = (void *)pe + ALIGN(sizeof(struct eeh_pe),
69 				      cache_line_size());
70 	return pe;
71 }
72 
73 /**
74  * eeh_phb_pe_create - Create PHB PE
75  * @phb: PCI controller
76  *
77  * The function should be called while the PHB is detected during
78  * system boot or PCI hotplug in order to create PHB PE.
79  */
80 int eeh_phb_pe_create(struct pci_controller *phb)
81 {
82 	struct eeh_pe *pe;
83 
84 	/* Allocate PHB PE */
85 	pe = eeh_pe_alloc(phb, EEH_PE_PHB);
86 	if (!pe) {
87 		pr_err("%s: out of memory!\n", __func__);
88 		return -ENOMEM;
89 	}
90 
91 	/* Put it into the list */
92 	list_add_tail(&pe->child, &eeh_phb_pe);
93 
94 	pr_debug("EEH: Add PE for PHB#%x\n", phb->global_number);
95 
96 	return 0;
97 }
98 
99 /**
100  * eeh_wait_state - Wait for PE state
101  * @pe: EEH PE
102  * @max_wait: maximal period in millisecond
103  *
104  * Wait for the state of associated PE. It might take some time
105  * to retrieve the PE's state.
106  */
107 int eeh_wait_state(struct eeh_pe *pe, int max_wait)
108 {
109 	int ret;
110 	int mwait;
111 
112 	/*
113 	 * According to PAPR, the state of PE might be temporarily
114 	 * unavailable. Under the circumstance, we have to wait
115 	 * for indicated time determined by firmware. The maximal
116 	 * wait time is 5 minutes, which is acquired from the original
117 	 * EEH implementation. Also, the original implementation
118 	 * also defined the minimal wait time as 1 second.
119 	 */
120 #define EEH_STATE_MIN_WAIT_TIME	(1000)
121 #define EEH_STATE_MAX_WAIT_TIME	(300 * 1000)
122 
123 	while (1) {
124 		ret = eeh_ops->get_state(pe, &mwait);
125 
126 		if (ret != EEH_STATE_UNAVAILABLE)
127 			return ret;
128 
129 		if (max_wait <= 0) {
130 			pr_warn("%s: Timeout when getting PE's state (%d)\n",
131 				__func__, max_wait);
132 			return EEH_STATE_NOT_SUPPORT;
133 		}
134 
135 		if (mwait < EEH_STATE_MIN_WAIT_TIME) {
136 			pr_warn("%s: Firmware returned bad wait value %d\n",
137 				__func__, mwait);
138 			mwait = EEH_STATE_MIN_WAIT_TIME;
139 		} else if (mwait > EEH_STATE_MAX_WAIT_TIME) {
140 			pr_warn("%s: Firmware returned too long wait value %d\n",
141 				__func__, mwait);
142 			mwait = EEH_STATE_MAX_WAIT_TIME;
143 		}
144 
145 		msleep(min(mwait, max_wait));
146 		max_wait -= mwait;
147 	}
148 }
149 
150 /**
151  * eeh_phb_pe_get - Retrieve PHB PE based on the given PHB
152  * @phb: PCI controller
153  *
154  * The overall PEs form hierarchy tree. The first layer of the
155  * hierarchy tree is composed of PHB PEs. The function is used
156  * to retrieve the corresponding PHB PE according to the given PHB.
157  */
158 struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb)
159 {
160 	struct eeh_pe *pe;
161 
162 	list_for_each_entry(pe, &eeh_phb_pe, child) {
163 		/*
164 		 * Actually, we needn't check the type since
165 		 * the PE for PHB has been determined when that
166 		 * was created.
167 		 */
168 		if ((pe->type & EEH_PE_PHB) && pe->phb == phb)
169 			return pe;
170 	}
171 
172 	return NULL;
173 }
174 
175 /**
176  * eeh_pe_next - Retrieve the next PE in the tree
177  * @pe: current PE
178  * @root: root PE
179  *
180  * The function is used to retrieve the next PE in the
181  * hierarchy PE tree.
182  */
183 struct eeh_pe *eeh_pe_next(struct eeh_pe *pe, struct eeh_pe *root)
184 {
185 	struct list_head *next = pe->child_list.next;
186 
187 	if (next == &pe->child_list) {
188 		while (1) {
189 			if (pe == root)
190 				return NULL;
191 			next = pe->child.next;
192 			if (next != &pe->parent->child_list)
193 				break;
194 			pe = pe->parent;
195 		}
196 	}
197 
198 	return list_entry(next, struct eeh_pe, child);
199 }
200 
201 /**
202  * eeh_pe_traverse - Traverse PEs in the specified PHB
203  * @root: root PE
204  * @fn: callback
205  * @flag: extra parameter to callback
206  *
207  * The function is used to traverse the specified PE and its
208  * child PEs. The traversing is to be terminated once the
209  * callback returns something other than NULL, or no more PEs
210  * to be traversed.
211  */
212 void *eeh_pe_traverse(struct eeh_pe *root,
213 		      eeh_pe_traverse_func fn, void *flag)
214 {
215 	struct eeh_pe *pe;
216 	void *ret;
217 
218 	eeh_for_each_pe(root, pe) {
219 		ret = fn(pe, flag);
220 		if (ret) return ret;
221 	}
222 
223 	return NULL;
224 }
225 
226 /**
227  * eeh_pe_dev_traverse - Traverse the devices from the PE
228  * @root: EEH PE
229  * @fn: function callback
230  * @flag: extra parameter to callback
231  *
232  * The function is used to traverse the devices of the specified
233  * PE and its child PEs.
234  */
235 void eeh_pe_dev_traverse(struct eeh_pe *root,
236 			  eeh_edev_traverse_func fn, void *flag)
237 {
238 	struct eeh_pe *pe;
239 	struct eeh_dev *edev, *tmp;
240 
241 	if (!root) {
242 		pr_warn("%s: Invalid PE %p\n",
243 			__func__, root);
244 		return;
245 	}
246 
247 	/* Traverse root PE */
248 	eeh_for_each_pe(root, pe)
249 		eeh_pe_for_each_dev(pe, edev, tmp)
250 			fn(edev, flag);
251 }
252 
253 /**
254  * __eeh_pe_get - Check the PE address
255  *
256  * For one particular PE, it can be identified by PE address
257  * or tranditional BDF address. BDF address is composed of
258  * Bus/Device/Function number. The extra data referred by flag
259  * indicates which type of address should be used.
260  */
261 static void *__eeh_pe_get(struct eeh_pe *pe, void *flag)
262 {
263 	int *target_pe = flag;
264 
265 	/* PHB PEs are special and should be ignored */
266 	if (pe->type & EEH_PE_PHB)
267 		return NULL;
268 
269 	if (*target_pe == pe->addr)
270 		return pe;
271 
272 	return NULL;
273 }
274 
275 /**
276  * eeh_pe_get - Search PE based on the given address
277  * @phb: PCI controller
278  * @pe_no: PE number
279  *
280  * Search the corresponding PE based on the specified address which
281  * is included in the eeh device. The function is used to check if
282  * the associated PE has been created against the PE address. It's
283  * notable that the PE address has 2 format: traditional PE address
284  * which is composed of PCI bus/device/function number, or unified
285  * PE address.
286  */
287 struct eeh_pe *eeh_pe_get(struct pci_controller *phb, int pe_no)
288 {
289 	struct eeh_pe *root = eeh_phb_pe_get(phb);
290 
291 	return eeh_pe_traverse(root, __eeh_pe_get, &pe_no);
292 }
293 
294 /**
295  * eeh_pe_tree_insert - Add EEH device to parent PE
296  * @edev: EEH device
297  * @new_pe_parent: PE to create additional PEs under
298  *
299  * Add EEH device to the PE in edev->pe_config_addr. If a PE already
300  * exists with that address then @edev is added to that PE. Otherwise
301  * a new PE is created and inserted into the PE tree as a child of
302  * @new_pe_parent.
303  *
304  * If @new_pe_parent is NULL then the new PE will be inserted under
305  * directly under the PHB.
306  */
307 int eeh_pe_tree_insert(struct eeh_dev *edev, struct eeh_pe *new_pe_parent)
308 {
309 	struct pci_controller *hose = edev->controller;
310 	struct eeh_pe *pe, *parent;
311 
312 	/*
313 	 * Search the PE has been existing or not according
314 	 * to the PE address. If that has been existing, the
315 	 * PE should be composed of PCI bus and its subordinate
316 	 * components.
317 	 */
318 	pe = eeh_pe_get(hose, edev->pe_config_addr);
319 	if (pe) {
320 		if (pe->type & EEH_PE_INVALID) {
321 			list_add_tail(&edev->entry, &pe->edevs);
322 			edev->pe = pe;
323 			/*
324 			 * We're running to here because of PCI hotplug caused by
325 			 * EEH recovery. We need clear EEH_PE_INVALID until the top.
326 			 */
327 			parent = pe;
328 			while (parent) {
329 				if (!(parent->type & EEH_PE_INVALID))
330 					break;
331 				parent->type &= ~EEH_PE_INVALID;
332 				parent = parent->parent;
333 			}
334 
335 			eeh_edev_dbg(edev, "Added to existing PE (parent: PE#%x)\n",
336 				     pe->parent->addr);
337 		} else {
338 			/* Mark the PE as type of PCI bus */
339 			pe->type = EEH_PE_BUS;
340 			edev->pe = pe;
341 
342 			/* Put the edev to PE */
343 			list_add_tail(&edev->entry, &pe->edevs);
344 			eeh_edev_dbg(edev, "Added to bus PE\n");
345 		}
346 		return 0;
347 	}
348 
349 	/* Create a new EEH PE */
350 	if (edev->physfn)
351 		pe = eeh_pe_alloc(hose, EEH_PE_VF);
352 	else
353 		pe = eeh_pe_alloc(hose, EEH_PE_DEVICE);
354 	if (!pe) {
355 		pr_err("%s: out of memory!\n", __func__);
356 		return -ENOMEM;
357 	}
358 
359 	pe->addr = edev->pe_config_addr;
360 
361 	/*
362 	 * Put the new EEH PE into hierarchy tree. If the parent
363 	 * can't be found, the newly created PE will be attached
364 	 * to PHB directly. Otherwise, we have to associate the
365 	 * PE with its parent.
366 	 */
367 	if (!new_pe_parent) {
368 		new_pe_parent = eeh_phb_pe_get(hose);
369 		if (!new_pe_parent) {
370 			pr_err("%s: No PHB PE is found (PHB Domain=%d)\n",
371 				__func__, hose->global_number);
372 			edev->pe = NULL;
373 			kfree(pe);
374 			return -EEXIST;
375 		}
376 	}
377 
378 	/* link new PE into the tree */
379 	pe->parent = new_pe_parent;
380 	list_add_tail(&pe->child, &new_pe_parent->child_list);
381 
382 	/*
383 	 * Put the newly created PE into the child list and
384 	 * link the EEH device accordingly.
385 	 */
386 	list_add_tail(&edev->entry, &pe->edevs);
387 	edev->pe = pe;
388 	eeh_edev_dbg(edev, "Added to new (parent: PE#%x)\n",
389 		     new_pe_parent->addr);
390 
391 	return 0;
392 }
393 
394 /**
395  * eeh_pe_tree_remove - Remove one EEH device from the associated PE
396  * @edev: EEH device
397  *
398  * The PE hierarchy tree might be changed when doing PCI hotplug.
399  * Also, the PCI devices or buses could be removed from the system
400  * during EEH recovery. So we have to call the function remove the
401  * corresponding PE accordingly if necessary.
402  */
403 int eeh_pe_tree_remove(struct eeh_dev *edev)
404 {
405 	struct eeh_pe *pe, *parent, *child;
406 	bool keep, recover;
407 	int cnt;
408 
409 	pe = eeh_dev_to_pe(edev);
410 	if (!pe) {
411 		eeh_edev_dbg(edev, "No PE found for device.\n");
412 		return -EEXIST;
413 	}
414 
415 	/* Remove the EEH device */
416 	edev->pe = NULL;
417 	list_del(&edev->entry);
418 
419 	/*
420 	 * Check if the parent PE includes any EEH devices.
421 	 * If not, we should delete that. Also, we should
422 	 * delete the parent PE if it doesn't have associated
423 	 * child PEs and EEH devices.
424 	 */
425 	while (1) {
426 		parent = pe->parent;
427 
428 		/* PHB PEs should never be removed */
429 		if (pe->type & EEH_PE_PHB)
430 			break;
431 
432 		/*
433 		 * XXX: KEEP is set while resetting a PE. I don't think it's
434 		 * ever set without RECOVERING also being set. I could
435 		 * be wrong though so catch that with a WARN.
436 		 */
437 		keep = !!(pe->state & EEH_PE_KEEP);
438 		recover = !!(pe->state & EEH_PE_RECOVERING);
439 		WARN_ON(keep && !recover);
440 
441 		if (!keep && !recover) {
442 			if (list_empty(&pe->edevs) &&
443 			    list_empty(&pe->child_list)) {
444 				list_del(&pe->child);
445 				kfree(pe);
446 			} else {
447 				break;
448 			}
449 		} else {
450 			/*
451 			 * Mark the PE as invalid. At the end of the recovery
452 			 * process any invalid PEs will be garbage collected.
453 			 *
454 			 * We need to delay the free()ing of them since we can
455 			 * remove edev's while traversing the PE tree which
456 			 * might trigger the removal of a PE and we can't
457 			 * deal with that (yet).
458 			 */
459 			if (list_empty(&pe->edevs)) {
460 				cnt = 0;
461 				list_for_each_entry(child, &pe->child_list, child) {
462 					if (!(child->type & EEH_PE_INVALID)) {
463 						cnt++;
464 						break;
465 					}
466 				}
467 
468 				if (!cnt)
469 					pe->type |= EEH_PE_INVALID;
470 				else
471 					break;
472 			}
473 		}
474 
475 		pe = parent;
476 	}
477 
478 	return 0;
479 }
480 
481 /**
482  * eeh_pe_update_time_stamp - Update PE's frozen time stamp
483  * @pe: EEH PE
484  *
485  * We have time stamp for each PE to trace its time of getting
486  * frozen in last hour. The function should be called to update
487  * the time stamp on first error of the specific PE. On the other
488  * handle, we needn't account for errors happened in last hour.
489  */
490 void eeh_pe_update_time_stamp(struct eeh_pe *pe)
491 {
492 	time64_t tstamp;
493 
494 	if (!pe) return;
495 
496 	if (pe->freeze_count <= 0) {
497 		pe->freeze_count = 0;
498 		pe->tstamp = ktime_get_seconds();
499 	} else {
500 		tstamp = ktime_get_seconds();
501 		if (tstamp - pe->tstamp > 3600) {
502 			pe->tstamp = tstamp;
503 			pe->freeze_count = 0;
504 		}
505 	}
506 }
507 
508 /**
509  * eeh_pe_state_mark - Mark specified state for PE and its associated device
510  * @pe: EEH PE
511  *
512  * EEH error affects the current PE and its child PEs. The function
513  * is used to mark appropriate state for the affected PEs and the
514  * associated devices.
515  */
516 void eeh_pe_state_mark(struct eeh_pe *root, int state)
517 {
518 	struct eeh_pe *pe;
519 
520 	eeh_for_each_pe(root, pe)
521 		if (!(pe->state & EEH_PE_REMOVED))
522 			pe->state |= state;
523 }
524 EXPORT_SYMBOL_GPL(eeh_pe_state_mark);
525 
526 /**
527  * eeh_pe_mark_isolated
528  * @pe: EEH PE
529  *
530  * Record that a PE has been isolated by marking the PE and it's children as
531  * EEH_PE_ISOLATED (and EEH_PE_CFG_BLOCKED, if required) and their PCI devices
532  * as pci_channel_io_frozen.
533  */
534 void eeh_pe_mark_isolated(struct eeh_pe *root)
535 {
536 	struct eeh_pe *pe;
537 	struct eeh_dev *edev;
538 	struct pci_dev *pdev;
539 
540 	eeh_pe_state_mark(root, EEH_PE_ISOLATED);
541 	eeh_for_each_pe(root, pe) {
542 		list_for_each_entry(edev, &pe->edevs, entry) {
543 			pdev = eeh_dev_to_pci_dev(edev);
544 			if (pdev)
545 				pdev->error_state = pci_channel_io_frozen;
546 		}
547 		/* Block PCI config access if required */
548 		if (pe->state & EEH_PE_CFG_RESTRICTED)
549 			pe->state |= EEH_PE_CFG_BLOCKED;
550 	}
551 }
552 EXPORT_SYMBOL_GPL(eeh_pe_mark_isolated);
553 
554 static void __eeh_pe_dev_mode_mark(struct eeh_dev *edev, void *flag)
555 {
556 	int mode = *((int *)flag);
557 
558 	edev->mode |= mode;
559 }
560 
561 /**
562  * eeh_pe_dev_state_mark - Mark state for all device under the PE
563  * @pe: EEH PE
564  *
565  * Mark specific state for all child devices of the PE.
566  */
567 void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode)
568 {
569 	eeh_pe_dev_traverse(pe, __eeh_pe_dev_mode_mark, &mode);
570 }
571 
572 /**
573  * eeh_pe_state_clear - Clear state for the PE
574  * @data: EEH PE
575  * @state: state
576  * @include_passed: include passed-through devices?
577  *
578  * The function is used to clear the indicated state from the
579  * given PE. Besides, we also clear the check count of the PE
580  * as well.
581  */
582 void eeh_pe_state_clear(struct eeh_pe *root, int state, bool include_passed)
583 {
584 	struct eeh_pe *pe;
585 	struct eeh_dev *edev, *tmp;
586 	struct pci_dev *pdev;
587 
588 	eeh_for_each_pe(root, pe) {
589 		/* Keep the state of permanently removed PE intact */
590 		if (pe->state & EEH_PE_REMOVED)
591 			continue;
592 
593 		if (!include_passed && eeh_pe_passed(pe))
594 			continue;
595 
596 		pe->state &= ~state;
597 
598 		/*
599 		 * Special treatment on clearing isolated state. Clear
600 		 * check count since last isolation and put all affected
601 		 * devices to normal state.
602 		 */
603 		if (!(state & EEH_PE_ISOLATED))
604 			continue;
605 
606 		pe->check_count = 0;
607 		eeh_pe_for_each_dev(pe, edev, tmp) {
608 			pdev = eeh_dev_to_pci_dev(edev);
609 			if (!pdev)
610 				continue;
611 
612 			pdev->error_state = pci_channel_io_normal;
613 		}
614 
615 		/* Unblock PCI config access if required */
616 		if (pe->state & EEH_PE_CFG_RESTRICTED)
617 			pe->state &= ~EEH_PE_CFG_BLOCKED;
618 	}
619 }
620 
621 /*
622  * Some PCI bridges (e.g. PLX bridges) have primary/secondary
623  * buses assigned explicitly by firmware, and we probably have
624  * lost that after reset. So we have to delay the check until
625  * the PCI-CFG registers have been restored for the parent
626  * bridge.
627  *
628  * Don't use normal PCI-CFG accessors, which probably has been
629  * blocked on normal path during the stage. So we need utilize
630  * eeh operations, which is always permitted.
631  */
632 static void eeh_bridge_check_link(struct eeh_dev *edev)
633 {
634 	int cap;
635 	uint32_t val;
636 	int timeout = 0;
637 
638 	/*
639 	 * We only check root port and downstream ports of
640 	 * PCIe switches
641 	 */
642 	if (!(edev->mode & (EEH_DEV_ROOT_PORT | EEH_DEV_DS_PORT)))
643 		return;
644 
645 	eeh_edev_dbg(edev, "Checking PCIe link...\n");
646 
647 	/* Check slot status */
648 	cap = edev->pcie_cap;
649 	eeh_ops->read_config(edev, cap + PCI_EXP_SLTSTA, 2, &val);
650 	if (!(val & PCI_EXP_SLTSTA_PDS)) {
651 		eeh_edev_dbg(edev, "No card in the slot (0x%04x) !\n", val);
652 		return;
653 	}
654 
655 	/* Check power status if we have the capability */
656 	eeh_ops->read_config(edev, cap + PCI_EXP_SLTCAP, 2, &val);
657 	if (val & PCI_EXP_SLTCAP_PCP) {
658 		eeh_ops->read_config(edev, cap + PCI_EXP_SLTCTL, 2, &val);
659 		if (val & PCI_EXP_SLTCTL_PCC) {
660 			eeh_edev_dbg(edev, "In power-off state, power it on ...\n");
661 			val &= ~(PCI_EXP_SLTCTL_PCC | PCI_EXP_SLTCTL_PIC);
662 			val |= (0x0100 & PCI_EXP_SLTCTL_PIC);
663 			eeh_ops->write_config(edev, cap + PCI_EXP_SLTCTL, 2, val);
664 			msleep(2 * 1000);
665 		}
666 	}
667 
668 	/* Enable link */
669 	eeh_ops->read_config(edev, cap + PCI_EXP_LNKCTL, 2, &val);
670 	val &= ~PCI_EXP_LNKCTL_LD;
671 	eeh_ops->write_config(edev, cap + PCI_EXP_LNKCTL, 2, val);
672 
673 	/* Check link */
674 	if (!edev->pdev->link_active_reporting) {
675 		eeh_edev_dbg(edev, "No link reporting capability\n");
676 		msleep(1000);
677 		return;
678 	}
679 
680 	/* Wait the link is up until timeout (5s) */
681 	timeout = 0;
682 	while (timeout < 5000) {
683 		msleep(20);
684 		timeout += 20;
685 
686 		eeh_ops->read_config(edev, cap + PCI_EXP_LNKSTA, 2, &val);
687 		if (val & PCI_EXP_LNKSTA_DLLLA)
688 			break;
689 	}
690 
691 	if (val & PCI_EXP_LNKSTA_DLLLA)
692 		eeh_edev_dbg(edev, "Link up (%s)\n",
693 			 (val & PCI_EXP_LNKSTA_CLS_2_5GB) ? "2.5GB" : "5GB");
694 	else
695 		eeh_edev_dbg(edev, "Link not ready (0x%04x)\n", val);
696 }
697 
698 #define BYTE_SWAP(OFF)	(8*((OFF)/4)+3-(OFF))
699 #define SAVED_BYTE(OFF)	(((u8 *)(edev->config_space))[BYTE_SWAP(OFF)])
700 
701 static void eeh_restore_bridge_bars(struct eeh_dev *edev)
702 {
703 	int i;
704 
705 	/*
706 	 * Device BARs: 0x10 - 0x18
707 	 * Bus numbers and windows: 0x18 - 0x30
708 	 */
709 	for (i = 4; i < 13; i++)
710 		eeh_ops->write_config(edev, i*4, 4, edev->config_space[i]);
711 	/* Rom: 0x38 */
712 	eeh_ops->write_config(edev, 14*4, 4, edev->config_space[14]);
713 
714 	/* Cache line & Latency timer: 0xC 0xD */
715 	eeh_ops->write_config(edev, PCI_CACHE_LINE_SIZE, 1,
716                 SAVED_BYTE(PCI_CACHE_LINE_SIZE));
717 	eeh_ops->write_config(edev, PCI_LATENCY_TIMER, 1,
718 		SAVED_BYTE(PCI_LATENCY_TIMER));
719 	/* Max latency, min grant, interrupt ping and line: 0x3C */
720 	eeh_ops->write_config(edev, 15*4, 4, edev->config_space[15]);
721 
722 	/* PCI Command: 0x4 */
723 	eeh_ops->write_config(edev, PCI_COMMAND, 4, edev->config_space[1] |
724 			      PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
725 
726 	/* Check the PCIe link is ready */
727 	eeh_bridge_check_link(edev);
728 }
729 
730 static void eeh_restore_device_bars(struct eeh_dev *edev)
731 {
732 	int i;
733 	u32 cmd;
734 
735 	for (i = 4; i < 10; i++)
736 		eeh_ops->write_config(edev, i*4, 4, edev->config_space[i]);
737 	/* 12 == Expansion ROM Address */
738 	eeh_ops->write_config(edev, 12*4, 4, edev->config_space[12]);
739 
740 	eeh_ops->write_config(edev, PCI_CACHE_LINE_SIZE, 1,
741 		SAVED_BYTE(PCI_CACHE_LINE_SIZE));
742 	eeh_ops->write_config(edev, PCI_LATENCY_TIMER, 1,
743 		SAVED_BYTE(PCI_LATENCY_TIMER));
744 
745 	/* max latency, min grant, interrupt pin and line */
746 	eeh_ops->write_config(edev, 15*4, 4, edev->config_space[15]);
747 
748 	/*
749 	 * Restore PERR & SERR bits, some devices require it,
750 	 * don't touch the other command bits
751 	 */
752 	eeh_ops->read_config(edev, PCI_COMMAND, 4, &cmd);
753 	if (edev->config_space[1] & PCI_COMMAND_PARITY)
754 		cmd |= PCI_COMMAND_PARITY;
755 	else
756 		cmd &= ~PCI_COMMAND_PARITY;
757 	if (edev->config_space[1] & PCI_COMMAND_SERR)
758 		cmd |= PCI_COMMAND_SERR;
759 	else
760 		cmd &= ~PCI_COMMAND_SERR;
761 	eeh_ops->write_config(edev, PCI_COMMAND, 4, cmd);
762 }
763 
764 /**
765  * eeh_restore_one_device_bars - Restore the Base Address Registers for one device
766  * @data: EEH device
767  * @flag: Unused
768  *
769  * Loads the PCI configuration space base address registers,
770  * the expansion ROM base address, the latency timer, and etc.
771  * from the saved values in the device node.
772  */
773 static void eeh_restore_one_device_bars(struct eeh_dev *edev, void *flag)
774 {
775 	/* Do special restore for bridges */
776 	if (edev->mode & EEH_DEV_BRIDGE)
777 		eeh_restore_bridge_bars(edev);
778 	else
779 		eeh_restore_device_bars(edev);
780 
781 	if (eeh_ops->restore_config)
782 		eeh_ops->restore_config(edev);
783 }
784 
785 /**
786  * eeh_pe_restore_bars - Restore the PCI config space info
787  * @pe: EEH PE
788  *
789  * This routine performs a recursive walk to the children
790  * of this device as well.
791  */
792 void eeh_pe_restore_bars(struct eeh_pe *pe)
793 {
794 	/*
795 	 * We needn't take the EEH lock since eeh_pe_dev_traverse()
796 	 * will take that.
797 	 */
798 	eeh_pe_dev_traverse(pe, eeh_restore_one_device_bars, NULL);
799 }
800 
801 /**
802  * eeh_pe_loc_get - Retrieve location code binding to the given PE
803  * @pe: EEH PE
804  *
805  * Retrieve the location code of the given PE. If the primary PE bus
806  * is root bus, we will grab location code from PHB device tree node
807  * or root port. Otherwise, the upstream bridge's device tree node
808  * of the primary PE bus will be checked for the location code.
809  */
810 const char *eeh_pe_loc_get(struct eeh_pe *pe)
811 {
812 	struct pci_bus *bus = eeh_pe_bus_get(pe);
813 	struct device_node *dn;
814 	const char *loc = NULL;
815 
816 	while (bus) {
817 		dn = pci_bus_to_OF_node(bus);
818 		if (!dn) {
819 			bus = bus->parent;
820 			continue;
821 		}
822 
823 		if (pci_is_root_bus(bus))
824 			loc = of_get_property(dn, "ibm,io-base-loc-code", NULL);
825 		else
826 			loc = of_get_property(dn, "ibm,slot-location-code",
827 					      NULL);
828 
829 		if (loc)
830 			return loc;
831 
832 		bus = bus->parent;
833 	}
834 
835 	return "N/A";
836 }
837 
838 /**
839  * eeh_pe_bus_get - Retrieve PCI bus according to the given PE
840  * @pe: EEH PE
841  *
842  * Retrieve the PCI bus according to the given PE. Basically,
843  * there're 3 types of PEs: PHB/Bus/Device. For PHB PE, the
844  * primary PCI bus will be retrieved. The parent bus will be
845  * returned for BUS PE. However, we don't have associated PCI
846  * bus for DEVICE PE.
847  */
848 struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
849 {
850 	struct eeh_dev *edev;
851 	struct pci_dev *pdev;
852 
853 	if (pe->type & EEH_PE_PHB)
854 		return pe->phb->bus;
855 
856 	/* The primary bus might be cached during probe time */
857 	if (pe->state & EEH_PE_PRI_BUS)
858 		return pe->bus;
859 
860 	/* Retrieve the parent PCI bus of first (top) PCI device */
861 	edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, entry);
862 	pdev = eeh_dev_to_pci_dev(edev);
863 	if (pdev)
864 		return pdev->bus;
865 
866 	return NULL;
867 }
868