1 /* 2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8 9 #include <linux/seq_file.h> 10 #include <linux/fs.h> 11 #include <linux/delay.h> 12 #include <linux/root_dev.h> 13 #include <linux/clk.h> 14 #include <linux/clk-provider.h> 15 #include <linux/clocksource.h> 16 #include <linux/console.h> 17 #include <linux/module.h> 18 #include <linux/cpu.h> 19 #include <linux/of_fdt.h> 20 #include <linux/of.h> 21 #include <linux/cache.h> 22 #include <asm/sections.h> 23 #include <asm/arcregs.h> 24 #include <asm/tlb.h> 25 #include <asm/setup.h> 26 #include <asm/page.h> 27 #include <asm/irq.h> 28 #include <asm/unwind.h> 29 #include <asm/mach_desc.h> 30 #include <asm/smp.h> 31 32 #define FIX_PTR(x) __asm__ __volatile__(";" : "+r"(x)) 33 34 unsigned int intr_to_DE_cnt; 35 36 /* Part of U-boot ABI: see head.S */ 37 int __initdata uboot_tag; 38 char __initdata *uboot_arg; 39 40 const struct machine_desc *machine_desc; 41 42 struct task_struct *_current_task[NR_CPUS]; /* For stack switching */ 43 44 struct cpuinfo_arc cpuinfo_arc700[NR_CPUS]; 45 46 static const struct id_to_str arc_cpu_rel[] = { 47 #ifdef CONFIG_ISA_ARCOMPACT 48 { 0x34, "R4.10"}, 49 { 0x35, "R4.11"}, 50 #else 51 { 0x51, "R2.0" }, 52 { 0x52, "R2.1" }, 53 { 0x53, "R3.0" }, 54 { 0x54, "R3.10a" }, 55 #endif 56 { 0x00, NULL } 57 }; 58 59 static const struct id_to_str arc_cpu_nm[] = { 60 #ifdef CONFIG_ISA_ARCOMPACT 61 { 0x20, "ARC 600" }, 62 { 0x30, "ARC 770" }, /* 750 identified seperately */ 63 #else 64 { 0x40, "ARC EM" }, 65 { 0x50, "ARC HS38" }, 66 { 0x54, "ARC HS48" }, 67 #endif 68 { 0x00, "Unknown" } 69 }; 70 71 static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu) 72 { 73 if (is_isa_arcompact()) { 74 struct bcr_iccm_arcompact iccm; 75 struct bcr_dccm_arcompact dccm; 76 77 READ_BCR(ARC_REG_ICCM_BUILD, iccm); 78 if (iccm.ver) { 79 cpu->iccm.sz = 4096 << iccm.sz; /* 8K to 512K */ 80 cpu->iccm.base_addr = iccm.base << 16; 81 } 82 83 READ_BCR(ARC_REG_DCCM_BUILD, dccm); 84 if (dccm.ver) { 85 unsigned long base; 86 cpu->dccm.sz = 2048 << dccm.sz; /* 2K to 256K */ 87 88 base = read_aux_reg(ARC_REG_DCCM_BASE_BUILD); 89 cpu->dccm.base_addr = base & ~0xF; 90 } 91 } else { 92 struct bcr_iccm_arcv2 iccm; 93 struct bcr_dccm_arcv2 dccm; 94 unsigned long region; 95 96 READ_BCR(ARC_REG_ICCM_BUILD, iccm); 97 if (iccm.ver) { 98 cpu->iccm.sz = 256 << iccm.sz00; /* 512B to 16M */ 99 if (iccm.sz00 == 0xF && iccm.sz01 > 0) 100 cpu->iccm.sz <<= iccm.sz01; 101 102 region = read_aux_reg(ARC_REG_AUX_ICCM); 103 cpu->iccm.base_addr = region & 0xF0000000; 104 } 105 106 READ_BCR(ARC_REG_DCCM_BUILD, dccm); 107 if (dccm.ver) { 108 cpu->dccm.sz = 256 << dccm.sz0; 109 if (dccm.sz0 == 0xF && dccm.sz1 > 0) 110 cpu->dccm.sz <<= dccm.sz1; 111 112 region = read_aux_reg(ARC_REG_AUX_DCCM); 113 cpu->dccm.base_addr = region & 0xF0000000; 114 } 115 } 116 } 117 118 static void read_arc_build_cfg_regs(void) 119 { 120 struct bcr_timer timer; 121 struct bcr_generic bcr; 122 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; 123 const struct id_to_str *tbl; 124 struct bcr_isa_arcv2 isa; 125 126 FIX_PTR(cpu); 127 128 READ_BCR(AUX_IDENTITY, cpu->core); 129 130 for (tbl = &arc_cpu_rel[0]; tbl->id != 0; tbl++) { 131 if (cpu->core.family == tbl->id) { 132 cpu->details = tbl->str; 133 break; 134 } 135 } 136 137 for (tbl = &arc_cpu_nm[0]; tbl->id != 0; tbl++) { 138 if ((cpu->core.family & 0xF4) == tbl->id) 139 break; 140 } 141 cpu->name = tbl->str; 142 143 READ_BCR(ARC_REG_TIMERS_BCR, timer); 144 cpu->extn.timer0 = timer.t0; 145 cpu->extn.timer1 = timer.t1; 146 cpu->extn.rtc = timer.rtc; 147 148 cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE); 149 150 READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy); 151 152 cpu->extn.norm = read_aux_reg(ARC_REG_NORM_BCR) > 1 ? 1 : 0; /* 2,3 */ 153 cpu->extn.barrel = read_aux_reg(ARC_REG_BARREL_BCR) > 1 ? 1 : 0; /* 2,3 */ 154 cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0; /* 1,3 */ 155 cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0; 156 cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */ 157 cpu->extn.swape = (cpu->core.family >= 0x34) ? 1 : 158 IS_ENABLED(CONFIG_ARC_HAS_SWAPE); 159 160 READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem); 161 162 /* Read CCM BCRs for boot reporting even if not enabled in Kconfig */ 163 read_decode_ccm_bcr(cpu); 164 165 read_decode_mmu_bcr(); 166 read_decode_cache_bcr(); 167 168 if (is_isa_arcompact()) { 169 struct bcr_fp_arcompact sp, dp; 170 struct bcr_bpu_arcompact bpu; 171 172 READ_BCR(ARC_REG_FP_BCR, sp); 173 READ_BCR(ARC_REG_DPFP_BCR, dp); 174 cpu->extn.fpu_sp = sp.ver ? 1 : 0; 175 cpu->extn.fpu_dp = dp.ver ? 1 : 0; 176 177 READ_BCR(ARC_REG_BPU_BCR, bpu); 178 cpu->bpu.ver = bpu.ver; 179 cpu->bpu.full = bpu.fam ? 1 : 0; 180 if (bpu.ent) { 181 cpu->bpu.num_cache = 256 << (bpu.ent - 1); 182 cpu->bpu.num_pred = 256 << (bpu.ent - 1); 183 } 184 } else { 185 struct bcr_fp_arcv2 spdp; 186 struct bcr_bpu_arcv2 bpu; 187 188 READ_BCR(ARC_REG_FP_V2_BCR, spdp); 189 cpu->extn.fpu_sp = spdp.sp ? 1 : 0; 190 cpu->extn.fpu_dp = spdp.dp ? 1 : 0; 191 192 READ_BCR(ARC_REG_BPU_BCR, bpu); 193 cpu->bpu.ver = bpu.ver; 194 cpu->bpu.full = bpu.ft; 195 cpu->bpu.num_cache = 256 << bpu.bce; 196 cpu->bpu.num_pred = 2048 << bpu.pte; 197 198 if (cpu->core.family >= 0x54) { 199 unsigned int exec_ctrl; 200 201 READ_BCR(AUX_EXEC_CTRL, exec_ctrl); 202 cpu->extn.dual_enb = !(exec_ctrl & 1); 203 204 /* dual issue always present for this core */ 205 cpu->extn.dual = 1; 206 } 207 } 208 209 READ_BCR(ARC_REG_AP_BCR, bcr); 210 cpu->extn.ap = bcr.ver ? 1 : 0; 211 212 READ_BCR(ARC_REG_SMART_BCR, bcr); 213 cpu->extn.smart = bcr.ver ? 1 : 0; 214 215 READ_BCR(ARC_REG_RTT_BCR, bcr); 216 cpu->extn.rtt = bcr.ver ? 1 : 0; 217 218 cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt; 219 220 READ_BCR(ARC_REG_ISA_CFG_BCR, isa); 221 222 /* some hacks for lack of feature BCR info in old ARC700 cores */ 223 if (is_isa_arcompact()) { 224 if (!isa.ver) /* ISA BCR absent, use Kconfig info */ 225 cpu->isa.atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC); 226 else { 227 /* ARC700_BUILD only has 2 bits of isa info */ 228 struct bcr_generic bcr = *(struct bcr_generic *)&isa; 229 cpu->isa.atomic = bcr.info & 1; 230 } 231 232 cpu->isa.be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); 233 234 /* there's no direct way to distinguish 750 vs. 770 */ 235 if (unlikely(cpu->core.family < 0x34 || cpu->mmu.ver < 3)) 236 cpu->name = "ARC750"; 237 } else { 238 cpu->isa = isa; 239 } 240 } 241 242 static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) 243 { 244 struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id]; 245 struct bcr_identity *core = &cpu->core; 246 int i, n = 0; 247 248 FIX_PTR(cpu); 249 250 n += scnprintf(buf + n, len - n, 251 "\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n", 252 core->family, core->cpu_id, core->chip_id); 253 254 n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s%s%s\n", 255 cpu_id, cpu->name, cpu->details, 256 is_isa_arcompact() ? "ARCompact" : "ARCv2", 257 IS_AVAIL1(cpu->isa.be, "[Big-Endian]"), 258 IS_AVAIL3(cpu->extn.dual, cpu->extn.dual_enb, " Dual-Issue ")); 259 260 n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s%s%s\nISA Extn\t: ", 261 IS_AVAIL1(cpu->extn.timer0, "Timer0 "), 262 IS_AVAIL1(cpu->extn.timer1, "Timer1 "), 263 IS_AVAIL2(cpu->extn.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT), 264 IS_AVAIL2(cpu->extn.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT)); 265 266 n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s", 267 IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC), 268 IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64), 269 IS_AVAIL1(cpu->isa.unalign, "unalign (not used)")); 270 271 if (i) 272 n += scnprintf(buf + n, len - n, "\n\t\t: "); 273 274 if (cpu->extn_mpy.ver) { 275 if (cpu->extn_mpy.ver <= 0x2) { /* ARCompact */ 276 n += scnprintf(buf + n, len - n, "mpy "); 277 } else { 278 int opt = 2; /* stock MPY/MPYH */ 279 280 if (cpu->extn_mpy.dsp) /* OPT 7-9 */ 281 opt = cpu->extn_mpy.dsp + 6; 282 283 n += scnprintf(buf + n, len - n, "mpy[opt %d] ", opt); 284 } 285 } 286 287 n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n", 288 IS_AVAIL1(cpu->isa.div_rem, "div_rem "), 289 IS_AVAIL1(cpu->extn.norm, "norm "), 290 IS_AVAIL1(cpu->extn.barrel, "barrel-shift "), 291 IS_AVAIL1(cpu->extn.swap, "swap "), 292 IS_AVAIL1(cpu->extn.minmax, "minmax "), 293 IS_AVAIL1(cpu->extn.crc, "crc "), 294 IS_AVAIL2(cpu->extn.swape, "swape", CONFIG_ARC_HAS_SWAPE)); 295 296 if (cpu->bpu.ver) 297 n += scnprintf(buf + n, len - n, 298 "BPU\t\t: %s%s match, cache:%d, Predict Table:%d", 299 IS_AVAIL1(cpu->bpu.full, "full"), 300 IS_AVAIL1(!cpu->bpu.full, "partial"), 301 cpu->bpu.num_cache, cpu->bpu.num_pred); 302 303 if (is_isa_arcv2()) { 304 struct bcr_lpb lpb; 305 306 READ_BCR(ARC_REG_LPB_BUILD, lpb); 307 if (lpb.ver) { 308 unsigned int ctl; 309 ctl = read_aux_reg(ARC_REG_LPB_CTRL); 310 311 n += scnprintf(buf + n, len - n, " Loop Buffer:%d %s", 312 lpb.entries, 313 IS_DISABLED_RUN(!ctl)); 314 } 315 } 316 317 n += scnprintf(buf + n, len - n, "\n"); 318 return buf; 319 } 320 321 static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len) 322 { 323 int n = 0; 324 struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id]; 325 326 FIX_PTR(cpu); 327 328 n += scnprintf(buf + n, len - n, "Vector Table\t: %#x\n", cpu->vec_base); 329 330 if (cpu->extn.fpu_sp || cpu->extn.fpu_dp) 331 n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n", 332 IS_AVAIL1(cpu->extn.fpu_sp, "SP "), 333 IS_AVAIL1(cpu->extn.fpu_dp, "DP ")); 334 335 if (cpu->extn.debug) 336 n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s%s\n", 337 IS_AVAIL1(cpu->extn.ap, "ActionPoint "), 338 IS_AVAIL1(cpu->extn.smart, "smaRT "), 339 IS_AVAIL1(cpu->extn.rtt, "RTT ")); 340 341 if (cpu->dccm.sz || cpu->iccm.sz) 342 n += scnprintf(buf + n, len - n, "Extn [CCM]\t: DCCM @ %x, %d KB / ICCM: @ %x, %d KB\n", 343 cpu->dccm.base_addr, TO_KB(cpu->dccm.sz), 344 cpu->iccm.base_addr, TO_KB(cpu->iccm.sz)); 345 346 if (is_isa_arcv2()) { 347 348 /* Error Protection: ECC/Parity */ 349 struct bcr_erp erp; 350 READ_BCR(ARC_REG_ERP_BUILD, erp); 351 352 if (erp.ver) { 353 struct ctl_erp ctl; 354 READ_BCR(ARC_REG_ERP_CTRL, ctl); 355 356 /* inverted bits: 0 means enabled */ 357 n += scnprintf(buf + n, len - n, "Extn [ECC]\t: %s%s%s%s%s%s\n", 358 IS_AVAIL3(erp.ic, !ctl.dpi, "IC "), 359 IS_AVAIL3(erp.dc, !ctl.dpd, "DC "), 360 IS_AVAIL3(erp.mmu, !ctl.mpd, "MMU ")); 361 } 362 } 363 364 n += scnprintf(buf + n, len - n, "OS ABI [v%d]\t: %s\n", 365 EF_ARC_OSABI_CURRENT >> 8, 366 EF_ARC_OSABI_CURRENT == EF_ARC_OSABI_V3 ? 367 "no-legacy-syscalls" : "64-bit data any register aligned"); 368 369 return buf; 370 } 371 372 static void arc_chk_core_config(void) 373 { 374 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; 375 int saved = 0, present = 0; 376 char *opt_nm = NULL; 377 378 if (!cpu->extn.timer0) 379 panic("Timer0 is not present!\n"); 380 381 if (!cpu->extn.timer1) 382 panic("Timer1 is not present!\n"); 383 384 #ifdef CONFIG_ARC_HAS_DCCM 385 /* 386 * DCCM can be arbit placed in hardware. 387 * Make sure it's placement/sz matches what Linux is built with 388 */ 389 if ((unsigned int)__arc_dccm_base != cpu->dccm.base_addr) 390 panic("Linux built with incorrect DCCM Base address\n"); 391 392 if (CONFIG_ARC_DCCM_SZ != cpu->dccm.sz) 393 panic("Linux built with incorrect DCCM Size\n"); 394 #endif 395 396 #ifdef CONFIG_ARC_HAS_ICCM 397 if (CONFIG_ARC_ICCM_SZ != cpu->iccm.sz) 398 panic("Linux built with incorrect ICCM Size\n"); 399 #endif 400 401 /* 402 * FP hardware/software config sanity 403 * -If hardware present, kernel needs to save/restore FPU state 404 * -If not, it will crash trying to save/restore the non-existant regs 405 */ 406 407 if (is_isa_arcompact()) { 408 opt_nm = "CONFIG_ARC_FPU_SAVE_RESTORE"; 409 saved = IS_ENABLED(CONFIG_ARC_FPU_SAVE_RESTORE); 410 411 /* only DPDP checked since SP has no arch visible regs */ 412 present = cpu->extn.fpu_dp; 413 } else { 414 opt_nm = "CONFIG_ARC_HAS_ACCL_REGS"; 415 saved = IS_ENABLED(CONFIG_ARC_HAS_ACCL_REGS); 416 417 /* Accumulator Low:High pair (r58:59) present if DSP MPY or FPU */ 418 present = cpu->extn_mpy.dsp | cpu->extn.fpu_sp | cpu->extn.fpu_dp; 419 } 420 421 if (present && !saved) 422 pr_warn("Enable %s for working apps\n", opt_nm); 423 else if (!present && saved) 424 panic("Disable %s, hardware NOT present\n", opt_nm); 425 } 426 427 /* 428 * Initialize and setup the processor core 429 * This is called by all the CPUs thus should not do special case stuff 430 * such as only for boot CPU etc 431 */ 432 433 void setup_processor(void) 434 { 435 char str[512]; 436 int cpu_id = smp_processor_id(); 437 438 read_arc_build_cfg_regs(); 439 arc_init_IRQ(); 440 441 pr_info("%s", arc_cpu_mumbojumbo(cpu_id, str, sizeof(str))); 442 443 arc_mmu_init(); 444 arc_cache_init(); 445 446 pr_info("%s", arc_extn_mumbojumbo(cpu_id, str, sizeof(str))); 447 pr_info("%s", arc_platform_smp_cpuinfo()); 448 449 arc_chk_core_config(); 450 } 451 452 static inline int is_kernel(unsigned long addr) 453 { 454 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end) 455 return 1; 456 return 0; 457 } 458 459 void __init setup_arch(char **cmdline_p) 460 { 461 #ifdef CONFIG_ARC_UBOOT_SUPPORT 462 /* make sure that uboot passed pointer to cmdline/dtb is valid */ 463 if (uboot_tag && is_kernel((unsigned long)uboot_arg)) 464 panic("Invalid uboot arg\n"); 465 466 /* See if u-boot passed an external Device Tree blob */ 467 machine_desc = setup_machine_fdt(uboot_arg); /* uboot_tag == 2 */ 468 if (!machine_desc) 469 #endif 470 { 471 /* No, so try the embedded one */ 472 machine_desc = setup_machine_fdt(__dtb_start); 473 if (!machine_desc) 474 panic("Embedded DT invalid\n"); 475 476 /* 477 * If we are here, it is established that @uboot_arg didn't 478 * point to DT blob. Instead if u-boot says it is cmdline, 479 * append to embedded DT cmdline. 480 * setup_machine_fdt() would have populated @boot_command_line 481 */ 482 if (uboot_tag == 1) { 483 /* Ensure a whitespace between the 2 cmdlines */ 484 strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); 485 strlcat(boot_command_line, uboot_arg, 486 COMMAND_LINE_SIZE); 487 } 488 } 489 490 /* Save unparsed command line copy for /proc/cmdline */ 491 *cmdline_p = boot_command_line; 492 493 /* To force early parsing of things like mem=xxx */ 494 parse_early_param(); 495 496 /* Platform/board specific: e.g. early console registration */ 497 if (machine_desc->init_early) 498 machine_desc->init_early(); 499 500 smp_init_cpus(); 501 502 setup_processor(); 503 setup_arch_memory(); 504 505 /* copy flat DT out of .init and then unflatten it */ 506 unflatten_and_copy_device_tree(); 507 508 /* Can be issue if someone passes cmd line arg "ro" 509 * But that is unlikely so keeping it as it is 510 */ 511 root_mountflags &= ~MS_RDONLY; 512 513 #if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE) 514 conswitchp = &dummy_con; 515 #endif 516 517 arc_unwind_init(); 518 } 519 520 /* 521 * Called from start_kernel() - boot CPU only 522 */ 523 void __init time_init(void) 524 { 525 of_clk_init(NULL); 526 timer_probe(); 527 } 528 529 static int __init customize_machine(void) 530 { 531 if (machine_desc->init_machine) 532 machine_desc->init_machine(); 533 534 return 0; 535 } 536 arch_initcall(customize_machine); 537 538 static int __init init_late_machine(void) 539 { 540 if (machine_desc->init_late) 541 machine_desc->init_late(); 542 543 return 0; 544 } 545 late_initcall(init_late_machine); 546 /* 547 * Get CPU information for use by the procfs. 548 */ 549 550 #define cpu_to_ptr(c) ((void *)(0xFFFF0000 | (unsigned int)(c))) 551 #define ptr_to_cpu(p) (~0xFFFF0000UL & (unsigned int)(p)) 552 553 static int show_cpuinfo(struct seq_file *m, void *v) 554 { 555 char *str; 556 int cpu_id = ptr_to_cpu(v); 557 struct device *cpu_dev = get_cpu_device(cpu_id); 558 struct clk *cpu_clk; 559 unsigned long freq = 0; 560 561 if (!cpu_online(cpu_id)) { 562 seq_printf(m, "processor [%d]\t: Offline\n", cpu_id); 563 goto done; 564 } 565 566 str = (char *)__get_free_page(GFP_KERNEL); 567 if (!str) 568 goto done; 569 570 seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE)); 571 572 cpu_clk = clk_get(cpu_dev, NULL); 573 if (IS_ERR(cpu_clk)) { 574 seq_printf(m, "CPU speed \t: Cannot get clock for processor [%d]\n", 575 cpu_id); 576 } else { 577 freq = clk_get_rate(cpu_clk); 578 } 579 if (freq) 580 seq_printf(m, "CPU speed\t: %lu.%02lu Mhz\n", 581 freq / 1000000, (freq / 10000) % 100); 582 583 seq_printf(m, "Bogo MIPS\t: %lu.%02lu\n", 584 loops_per_jiffy / (500000 / HZ), 585 (loops_per_jiffy / (5000 / HZ)) % 100); 586 587 seq_printf(m, arc_mmu_mumbojumbo(cpu_id, str, PAGE_SIZE)); 588 seq_printf(m, arc_cache_mumbojumbo(cpu_id, str, PAGE_SIZE)); 589 seq_printf(m, arc_extn_mumbojumbo(cpu_id, str, PAGE_SIZE)); 590 seq_printf(m, arc_platform_smp_cpuinfo()); 591 592 free_page((unsigned long)str); 593 done: 594 seq_printf(m, "\n"); 595 596 return 0; 597 } 598 599 static void *c_start(struct seq_file *m, loff_t *pos) 600 { 601 /* 602 * Callback returns cpu-id to iterator for show routine, NULL to stop. 603 * However since NULL is also a valid cpu-id (0), we use a round-about 604 * way to pass it w/o having to kmalloc/free a 2 byte string. 605 * Encode cpu-id as 0xFFcccc, which is decoded by show routine. 606 */ 607 return *pos < nr_cpu_ids ? cpu_to_ptr(*pos) : NULL; 608 } 609 610 static void *c_next(struct seq_file *m, void *v, loff_t *pos) 611 { 612 ++*pos; 613 return c_start(m, pos); 614 } 615 616 static void c_stop(struct seq_file *m, void *v) 617 { 618 } 619 620 const struct seq_operations cpuinfo_op = { 621 .start = c_start, 622 .next = c_next, 623 .stop = c_stop, 624 .show = show_cpuinfo 625 }; 626 627 static DEFINE_PER_CPU(struct cpu, cpu_topology); 628 629 static int __init topology_init(void) 630 { 631 int cpu; 632 633 for_each_present_cpu(cpu) 634 register_cpu(&per_cpu(cpu_topology, cpu), cpu); 635 636 return 0; 637 } 638 639 subsys_initcall(topology_init); 640