xref: /openbmc/linux/arch/parisc/kernel/inventory.c (revision b830f94f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * inventory.c
4  *
5  * Copyright (c) 1999 The Puffin Group (David Kennedy and Alex deVries)
6  * Copyright (c) 2001 Matthew Wilcox for Hewlett-Packard
7  *
8  * These are the routines to discover what hardware exists in this box.
9  * This task is complicated by there being 3 different ways of
10  * performing an inventory, depending largely on the age of the box.
11  * The recommended way to do this is to check to see whether the machine
12  * is a `Snake' first, then try System Map, then try PAT.  We try System
13  * Map before checking for a Snake -- this probably doesn't cause any
14  * problems, but...
15  */
16 
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/init.h>
20 #include <linux/slab.h>
21 #include <linux/mm.h>
22 #include <asm/hardware.h>
23 #include <asm/io.h>
24 #include <asm/mmzone.h>
25 #include <asm/pdc.h>
26 #include <asm/pdcpat.h>
27 #include <asm/processor.h>
28 #include <asm/page.h>
29 #include <asm/parisc-device.h>
30 #include <asm/tlbflush.h>
31 
32 /*
33 ** Debug options
34 ** DEBUG_PAT Dump details which PDC PAT provides about ranges/devices.
35 */
36 #undef DEBUG_PAT
37 
38 int pdc_type __ro_after_init = PDC_TYPE_ILLEGAL;
39 
40 /* cell number and location (PAT firmware only) */
41 unsigned long parisc_cell_num __ro_after_init;
42 unsigned long parisc_cell_loc __ro_after_init;
43 unsigned long parisc_pat_pdc_cap __ro_after_init;
44 
45 
46 void __init setup_pdc(void)
47 {
48 	long status;
49 	unsigned int bus_id;
50 	struct pdc_system_map_mod_info module_result;
51 	struct pdc_module_path module_path;
52 	struct pdc_model model;
53 #ifdef CONFIG_64BIT
54 	struct pdc_pat_cell_num cell_info;
55 #endif
56 
57 	/* Determine the pdc "type" used on this machine */
58 
59 	printk(KERN_INFO "Determining PDC firmware type: ");
60 
61 	status = pdc_system_map_find_mods(&module_result, &module_path, 0);
62 	if (status == PDC_OK) {
63 		pdc_type = PDC_TYPE_SYSTEM_MAP;
64 		pr_cont("System Map.\n");
65 		return;
66 	}
67 
68 	/*
69 	 * If the machine doesn't support PDC_SYSTEM_MAP then either it
70 	 * is a pdc pat box, or it is an older box. All 64 bit capable
71 	 * machines are either pdc pat boxes or they support PDC_SYSTEM_MAP.
72 	 */
73 
74 	/*
75 	 * TODO: We should test for 64 bit capability and give a
76 	 * clearer message.
77 	 */
78 
79 #ifdef CONFIG_64BIT
80 	status = pdc_pat_cell_get_number(&cell_info);
81 	if (status == PDC_OK) {
82 		unsigned long legacy_rev, pat_rev;
83 		pdc_type = PDC_TYPE_PAT;
84 		pr_cont("64 bit PAT.\n");
85 		parisc_cell_num = cell_info.cell_num;
86 		parisc_cell_loc = cell_info.cell_loc;
87 		pr_info("PAT: Running on cell %lu and location %lu.\n",
88 			parisc_cell_num, parisc_cell_loc);
89 		status = pdc_pat_pd_get_pdc_revisions(&legacy_rev,
90 			&pat_rev, &parisc_pat_pdc_cap);
91 		pr_info("PAT: legacy revision 0x%lx, pat_rev 0x%lx, pdc_cap 0x%lx, S-PTLB %d, HPMC_RENDEZ %d.\n",
92 			legacy_rev, pat_rev, parisc_pat_pdc_cap,
93 			parisc_pat_pdc_cap
94 			 & PDC_PAT_CAPABILITY_BIT_SIMULTANEOUS_PTLB ? 1:0,
95 			parisc_pat_pdc_cap
96 			 & PDC_PAT_CAPABILITY_BIT_PDC_HPMC_RENDEZ   ? 1:0);
97 		return;
98 	}
99 #endif
100 
101 	/* Check the CPU's bus ID.  There's probably a better test.  */
102 
103 	status = pdc_model_info(&model);
104 
105 	bus_id = (model.hversion >> (4 + 7)) & 0x1f;
106 
107 	switch (bus_id) {
108 	case 0x4:		/* 720, 730, 750, 735, 755 */
109 	case 0x6:		/* 705, 710 */
110 	case 0x7:		/* 715, 725 */
111 	case 0x8:		/* 745, 747, 742 */
112 	case 0xA:		/* 712 and similar */
113 	case 0xC:		/* 715/64, at least */
114 
115 		pdc_type = PDC_TYPE_SNAKE;
116 		pr_cont("Snake.\n");
117 		return;
118 
119 	default:		/* Everything else */
120 
121 		pr_cont("Unsupported.\n");
122 		panic("If this is a 64-bit machine, please try a 64-bit kernel.\n");
123 	}
124 }
125 
126 #define PDC_PAGE_ADJ_SHIFT (PAGE_SHIFT - 12) /* pdc pages are always 4k */
127 
128 static void __init
129 set_pmem_entry(physmem_range_t *pmem_ptr, unsigned long start,
130 	       unsigned long pages4k)
131 {
132 	/* Rather than aligning and potentially throwing away
133 	 * memory, we'll assume that any ranges are already
134 	 * nicely aligned with any reasonable page size, and
135 	 * panic if they are not (it's more likely that the
136 	 * pdc info is bad in this case).
137 	 */
138 
139 	if (unlikely( ((start & (PAGE_SIZE - 1)) != 0)
140 	    || ((pages4k & ((1UL << PDC_PAGE_ADJ_SHIFT) - 1)) != 0) )) {
141 
142 		panic("Memory range doesn't align with page size!\n");
143 	}
144 
145 	pmem_ptr->start_pfn = (start >> PAGE_SHIFT);
146 	pmem_ptr->pages = (pages4k >> PDC_PAGE_ADJ_SHIFT);
147 }
148 
149 static void __init pagezero_memconfig(void)
150 {
151 	unsigned long npages;
152 
153 	/* Use the 32 bit information from page zero to create a single
154 	 * entry in the pmem_ranges[] table.
155 	 *
156 	 * We currently don't support machines with contiguous memory
157 	 * >= 4 Gb, who report that memory using 64 bit only fields
158 	 * on page zero. It's not worth doing until it can be tested,
159 	 * and it is not clear we can support those machines for other
160 	 * reasons.
161 	 *
162 	 * If that support is done in the future, this is where it
163 	 * should be done.
164 	 */
165 
166 	npages = (PAGE_ALIGN(PAGE0->imm_max_mem) >> PAGE_SHIFT);
167 	set_pmem_entry(pmem_ranges,0UL,npages);
168 	npmem_ranges = 1;
169 }
170 
171 #ifdef CONFIG_64BIT
172 
173 /* All of the PDC PAT specific code is 64-bit only */
174 
175 /*
176 **  The module object is filled via PDC_PAT_CELL[Return Cell Module].
177 **  If a module is found, register module will get the IODC bytes via
178 **  pdc_iodc_read() using the PA view of conf_base_addr for the hpa parameter.
179 **
180 **  The IO view can be used by PDC_PAT_CELL[Return Cell Module]
181 **  only for SBAs and LBAs.  This view will cause an invalid
182 **  argument error for all other cell module types.
183 **
184 */
185 
186 static int __init
187 pat_query_module(ulong pcell_loc, ulong mod_index)
188 {
189 	pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
190 	unsigned long bytecnt;
191 	unsigned long temp;	/* 64-bit scratch value */
192 	long status;		/* PDC return value status */
193 	struct parisc_device *dev;
194 
195 	pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL);
196 	if (!pa_pdc_cell)
197 		panic("couldn't allocate memory for PDC_PAT_CELL!");
198 
199 	/* return cell module (PA or Processor view) */
200 	status = pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
201 				     PA_VIEW, pa_pdc_cell);
202 
203 	if (status != PDC_OK) {
204 		/* no more cell modules or error */
205 		kfree(pa_pdc_cell);
206 		return status;
207 	}
208 
209 	temp = pa_pdc_cell->cba;
210 	dev = alloc_pa_dev(PAT_GET_CBA(temp), &(pa_pdc_cell->mod_path));
211 	if (!dev) {
212 		kfree(pa_pdc_cell);
213 		return PDC_OK;
214 	}
215 
216 	/* alloc_pa_dev sets dev->hpa */
217 
218 	/*
219 	** save parameters in the parisc_device
220 	** (The idea being the device driver will call pdc_pat_cell_module()
221 	** and store the results in its own data structure.)
222 	*/
223 	dev->pcell_loc = pcell_loc;
224 	dev->mod_index = mod_index;
225 
226 	/* save generic info returned from the call */
227 	/* REVISIT: who is the consumer of this? not sure yet... */
228 	dev->mod_info = pa_pdc_cell->mod_info;	/* pass to PAT_GET_ENTITY() */
229 	dev->pmod_loc = pa_pdc_cell->mod_location;
230 	dev->mod0 = pa_pdc_cell->mod[0];
231 
232 	register_parisc_device(dev);	/* advertise device */
233 
234 #ifdef DEBUG_PAT
235 	/* dump what we see so far... */
236 	switch (PAT_GET_ENTITY(dev->mod_info)) {
237 		pdc_pat_cell_mod_maddr_block_t io_pdc_cell;
238 		unsigned long i;
239 
240 	case PAT_ENTITY_PROC:
241 		printk(KERN_DEBUG "PAT_ENTITY_PROC: id_eid 0x%lx\n",
242 			pa_pdc_cell->mod[0]);
243 		break;
244 
245 	case PAT_ENTITY_MEM:
246 		printk(KERN_DEBUG
247 			"PAT_ENTITY_MEM: amount 0x%lx min_gni_base 0x%lx min_gni_len 0x%lx\n",
248 			pa_pdc_cell->mod[0], pa_pdc_cell->mod[1],
249 			pa_pdc_cell->mod[2]);
250 		break;
251 	case PAT_ENTITY_CA:
252 		printk(KERN_DEBUG "PAT_ENTITY_CA: %ld\n", pcell_loc);
253 		break;
254 
255 	case PAT_ENTITY_PBC:
256 		printk(KERN_DEBUG "PAT_ENTITY_PBC: ");
257 		goto print_ranges;
258 
259 	case PAT_ENTITY_SBA:
260 		printk(KERN_DEBUG "PAT_ENTITY_SBA: ");
261 		goto print_ranges;
262 
263 	case PAT_ENTITY_LBA:
264 		printk(KERN_DEBUG "PAT_ENTITY_LBA: ");
265 
266  print_ranges:
267 		pdc_pat_cell_module(&bytecnt, pcell_loc, mod_index,
268 				    IO_VIEW, &io_pdc_cell);
269 		printk(KERN_DEBUG "ranges %ld\n", pa_pdc_cell->mod[1]);
270 		for (i = 0; i < pa_pdc_cell->mod[1]; i++) {
271 			printk(KERN_DEBUG
272 				"  PA_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
273 				i, pa_pdc_cell->mod[2 + i * 3],	/* type */
274 				pa_pdc_cell->mod[3 + i * 3],	/* start */
275 				pa_pdc_cell->mod[4 + i * 3]);	/* finish (ie end) */
276 			printk(KERN_DEBUG
277 				"  IO_VIEW %ld: 0x%016lx 0x%016lx 0x%016lx\n",
278 				i, io_pdc_cell.mod[2 + i * 3],	/* type */
279 				io_pdc_cell.mod[3 + i * 3],	/* start */
280 				io_pdc_cell.mod[4 + i * 3]);	/* finish (ie end) */
281 		}
282 		printk(KERN_DEBUG "\n");
283 		break;
284 	}
285 #endif /* DEBUG_PAT */
286 
287 	kfree(pa_pdc_cell);
288 
289 	return PDC_OK;
290 }
291 
292 
293 /* pat pdc can return information about a variety of different
294  * types of memory (e.g. firmware,i/o, etc) but we only care about
295  * the usable physical ram right now. Since the firmware specific
296  * information is allocated on the stack, we'll be generous, in
297  * case there is a lot of other information we don't care about.
298  */
299 
300 #define PAT_MAX_RANGES (4 * MAX_PHYSMEM_RANGES)
301 
302 static void __init pat_memconfig(void)
303 {
304 	unsigned long actual_len;
305 	struct pdc_pat_pd_addr_map_entry mem_table[PAT_MAX_RANGES+1];
306 	struct pdc_pat_pd_addr_map_entry *mtbl_ptr;
307 	physmem_range_t *pmem_ptr;
308 	long status;
309 	int entries;
310 	unsigned long length;
311 	int i;
312 
313 	length = (PAT_MAX_RANGES + 1) * sizeof(struct pdc_pat_pd_addr_map_entry);
314 
315 	status = pdc_pat_pd_get_addr_map(&actual_len, mem_table, length, 0L);
316 
317 	if ((status != PDC_OK)
318 	    || ((actual_len % sizeof(struct pdc_pat_pd_addr_map_entry)) != 0)) {
319 
320 		/* The above pdc call shouldn't fail, but, just in
321 		 * case, just use the PAGE0 info.
322 		 */
323 
324 		printk("\n\n\n");
325 		printk(KERN_WARNING "WARNING! Could not get full memory configuration. "
326 			"All memory may not be used!\n\n\n");
327 		pagezero_memconfig();
328 		return;
329 	}
330 
331 	entries = actual_len / sizeof(struct pdc_pat_pd_addr_map_entry);
332 
333 	if (entries > PAT_MAX_RANGES) {
334 		printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
335 		printk(KERN_WARNING "Some memory may not be used!\n");
336 	}
337 
338 	/* Copy information into the firmware independent pmem_ranges
339 	 * array, skipping types we don't care about. Notice we said
340 	 * "may" above. We'll use all the entries that were returned.
341 	 */
342 
343 	npmem_ranges = 0;
344 	mtbl_ptr = mem_table;
345 	pmem_ptr = pmem_ranges; /* Global firmware independent table */
346 	for (i = 0; i < entries; i++,mtbl_ptr++) {
347 		if (   (mtbl_ptr->entry_type != PAT_MEMORY_DESCRIPTOR)
348 		    || (mtbl_ptr->memory_type != PAT_MEMTYPE_MEMORY)
349 		    || (mtbl_ptr->pages == 0)
350 		    || (   (mtbl_ptr->memory_usage != PAT_MEMUSE_GENERAL)
351 			&& (mtbl_ptr->memory_usage != PAT_MEMUSE_GI)
352 			&& (mtbl_ptr->memory_usage != PAT_MEMUSE_GNI) ) ) {
353 
354 			continue;
355 		}
356 
357 		if (npmem_ranges == MAX_PHYSMEM_RANGES) {
358 			printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
359 			printk(KERN_WARNING "Some memory will not be used!\n");
360 			break;
361 		}
362 
363 		set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
364 		npmem_ranges++;
365 	}
366 }
367 
368 static int __init pat_inventory(void)
369 {
370 	int status;
371 	ulong mod_index = 0;
372 	struct pdc_pat_cell_num cell_info;
373 
374 	/*
375 	** Note:  Prelude (and it's successors: Lclass, A400/500) only
376 	**        implement PDC_PAT_CELL sub-options 0 and 2.
377 	*/
378 	status = pdc_pat_cell_get_number(&cell_info);
379 	if (status != PDC_OK) {
380 		return 0;
381 	}
382 
383 #ifdef DEBUG_PAT
384 	printk(KERN_DEBUG "CELL_GET_NUMBER: 0x%lx 0x%lx\n", cell_info.cell_num,
385 	       cell_info.cell_loc);
386 #endif
387 
388 	while (PDC_OK == pat_query_module(cell_info.cell_loc, mod_index)) {
389 		mod_index++;
390 	}
391 
392 	return mod_index;
393 }
394 
395 /* We only look for extended memory ranges on a 64 bit capable box */
396 static void __init sprockets_memconfig(void)
397 {
398 	struct pdc_memory_table_raddr r_addr;
399 	struct pdc_memory_table mem_table[MAX_PHYSMEM_RANGES];
400 	struct pdc_memory_table *mtbl_ptr;
401 	physmem_range_t *pmem_ptr;
402 	long status;
403 	int entries;
404 	int i;
405 
406 	status = pdc_mem_mem_table(&r_addr,mem_table,
407 				(unsigned long)MAX_PHYSMEM_RANGES);
408 
409 	if (status != PDC_OK) {
410 
411 		/* The above pdc call only works on boxes with sprockets
412 		 * firmware (newer B,C,J class). Other non PAT PDC machines
413 		 * do support more than 3.75 Gb of memory, but we don't
414 		 * support them yet.
415 		 */
416 
417 		pagezero_memconfig();
418 		return;
419 	}
420 
421 	if (r_addr.entries_total > MAX_PHYSMEM_RANGES) {
422 		printk(KERN_WARNING "This Machine has more memory ranges than we support!\n");
423 		printk(KERN_WARNING "Some memory will not be used!\n");
424 	}
425 
426 	entries = (int)r_addr.entries_returned;
427 
428 	npmem_ranges = 0;
429 	mtbl_ptr = mem_table;
430 	pmem_ptr = pmem_ranges; /* Global firmware independent table */
431 	for (i = 0; i < entries; i++,mtbl_ptr++) {
432 		set_pmem_entry(pmem_ptr++,mtbl_ptr->paddr,mtbl_ptr->pages);
433 		npmem_ranges++;
434 	}
435 }
436 
437 #else   /* !CONFIG_64BIT */
438 
439 #define pat_inventory() do { } while (0)
440 #define pat_memconfig() do { } while (0)
441 #define sprockets_memconfig() pagezero_memconfig()
442 
443 #endif	/* !CONFIG_64BIT */
444 
445 
446 #ifndef CONFIG_PA20
447 
448 /* Code to support Snake machines (7[2350], 7[235]5, 715/Scorpio) */
449 
450 static struct parisc_device * __init
451 legacy_create_device(struct pdc_memory_map *r_addr,
452 		struct pdc_module_path *module_path)
453 {
454 	struct parisc_device *dev;
455 	int status = pdc_mem_map_hpa(r_addr, module_path);
456 	if (status != PDC_OK)
457 		return NULL;
458 
459 	dev = alloc_pa_dev(r_addr->hpa, &module_path->path);
460 	if (dev == NULL)
461 		return NULL;
462 
463 	register_parisc_device(dev);
464 	return dev;
465 }
466 
467 /**
468  * snake_inventory
469  *
470  * Before PDC_SYSTEM_MAP was invented, the PDC_MEM_MAP call was used.
471  * To use it, we initialise the mod_path.bc to 0xff and try all values of
472  * mod to get the HPA for the top-level devices.  Bus adapters may have
473  * sub-devices which are discovered by setting bc[5] to 0 and bc[4] to the
474  * module, then trying all possible functions.
475  */
476 static void __init snake_inventory(void)
477 {
478 	int mod;
479 	for (mod = 0; mod < 16; mod++) {
480 		struct parisc_device *dev;
481 		struct pdc_module_path module_path;
482 		struct pdc_memory_map r_addr;
483 		unsigned int func;
484 
485 		memset(module_path.path.bc, 0xff, 6);
486 		module_path.path.mod = mod;
487 		dev = legacy_create_device(&r_addr, &module_path);
488 		if ((!dev) || (dev->id.hw_type != HPHW_BA))
489 			continue;
490 
491 		memset(module_path.path.bc, 0xff, 4);
492 		module_path.path.bc[4] = mod;
493 
494 		for (func = 0; func < 16; func++) {
495 			module_path.path.bc[5] = 0;
496 			module_path.path.mod = func;
497 			legacy_create_device(&r_addr, &module_path);
498 		}
499 	}
500 }
501 
502 #else /* CONFIG_PA20 */
503 #define snake_inventory() do { } while (0)
504 #endif  /* CONFIG_PA20 */
505 
506 /* Common 32/64 bit based code goes here */
507 
508 /**
509  * add_system_map_addresses - Add additional addresses to the parisc device.
510  * @dev: The parisc device.
511  * @num_addrs: Then number of addresses to add;
512  * @module_instance: The system_map module instance.
513  *
514  * This function adds any additional addresses reported by the system_map
515  * firmware to the parisc device.
516  */
517 static void __init
518 add_system_map_addresses(struct parisc_device *dev, int num_addrs,
519 			 int module_instance)
520 {
521 	int i;
522 	long status;
523 	struct pdc_system_map_addr_info addr_result;
524 
525 	dev->addr = kmalloc_array(num_addrs, sizeof(*dev->addr), GFP_KERNEL);
526 	if(!dev->addr) {
527 		printk(KERN_ERR "%s %s(): memory allocation failure\n",
528 		       __FILE__, __func__);
529 		return;
530 	}
531 
532 	for(i = 1; i <= num_addrs; ++i) {
533 		status = pdc_system_map_find_addrs(&addr_result,
534 						   module_instance, i);
535 		if(PDC_OK == status) {
536 			dev->addr[dev->num_addrs] = (unsigned long)addr_result.mod_addr;
537 			dev->num_addrs++;
538 		} else {
539 			printk(KERN_WARNING
540 			       "Bad PDC_FIND_ADDRESS status return (%ld) for index %d\n",
541 			       status, i);
542 		}
543 	}
544 }
545 
546 /**
547  * system_map_inventory - Retrieve firmware devices via SYSTEM_MAP.
548  *
549  * This function attempts to retrieve and register all the devices firmware
550  * knows about via the SYSTEM_MAP PDC call.
551  */
552 static void __init system_map_inventory(void)
553 {
554 	int i;
555 	long status = PDC_OK;
556 
557 	for (i = 0; i < 256; i++) {
558 		struct parisc_device *dev;
559 		struct pdc_system_map_mod_info module_result;
560 		struct pdc_module_path module_path;
561 
562 		status = pdc_system_map_find_mods(&module_result,
563 				&module_path, i);
564 		if ((status == PDC_BAD_PROC) || (status == PDC_NE_MOD))
565 			break;
566 		if (status != PDC_OK)
567 			continue;
568 
569 		dev = alloc_pa_dev(module_result.mod_addr, &module_path.path);
570 		if (!dev)
571 			continue;
572 
573 		register_parisc_device(dev);
574 
575 		/* if available, get the additional addresses for a module */
576 		if (!module_result.add_addrs)
577 			continue;
578 
579 		add_system_map_addresses(dev, module_result.add_addrs, i);
580 	}
581 
582 	walk_central_bus();
583 	return;
584 }
585 
586 void __init do_memory_inventory(void)
587 {
588 	switch (pdc_type) {
589 
590 	case PDC_TYPE_PAT:
591 		pat_memconfig();
592 		break;
593 
594 	case PDC_TYPE_SYSTEM_MAP:
595 		sprockets_memconfig();
596 		break;
597 
598 	case PDC_TYPE_SNAKE:
599 		pagezero_memconfig();
600 		return;
601 
602 	default:
603 		panic("Unknown PDC type!\n");
604 	}
605 
606 	if (npmem_ranges == 0 || pmem_ranges[0].start_pfn != 0) {
607 		printk(KERN_WARNING "Bad memory configuration returned!\n");
608 		printk(KERN_WARNING "Some memory may not be used!\n");
609 		pagezero_memconfig();
610 	}
611 }
612 
613 void __init do_device_inventory(void)
614 {
615 	printk(KERN_INFO "Searching for devices...\n");
616 
617 	init_parisc_bus();
618 
619 	switch (pdc_type) {
620 
621 	case PDC_TYPE_PAT:
622 		pat_inventory();
623 		break;
624 
625 	case PDC_TYPE_SYSTEM_MAP:
626 		system_map_inventory();
627 		break;
628 
629 	case PDC_TYPE_SNAKE:
630 		snake_inventory();
631 		break;
632 
633 	default:
634 		panic("Unknown PDC type!\n");
635 	}
636 	printk(KERN_INFO "Found devices:\n");
637 	print_parisc_devices();
638 
639 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
640 	pa_serialize_tlb_flushes = machine_has_merced_bus();
641 	if (pa_serialize_tlb_flushes)
642 		pr_info("Merced bus found: Enable PxTLB serialization.\n");
643 #endif
644 }
645