Lines Matching +full:cpu +full:- +full:nr
1 // SPDX-License-Identifier: GPL-2.0
28 #include <linux/cpu.h>
63 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
66 [0 ... NR_CPUS-1] = CPU_MASK_NONE };
69 [0 ... NR_CPUS - 1] = CPU_MASK_NONE };
87 seq_printf(m, "CPU%d:\t\tonline\n", i); in smp_info()
96 "Cpu%dClkTck\t: %016lx\n", in smp_bogo()
127 current_thread_info()->new_child = 0; in smp_callin()
131 current->active_mm = &init_mm; in smp_callin()
133 /* inform the notifiers about the new cpu */ in smp_callin()
148 printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id()); in cpu_panic()
156 * initiates the synchonization instead of the slave. -DaveM
177 t0 = tick_ops->get_tick(); in get_delta()
184 t1 = tick_ops->get_tick(); in get_delta()
186 if (t1 - t0 < best_t1 - best_t0) in get_delta()
190 *rt = best_t1 - best_t0; in get_delta()
191 *master = best_tm - best_t0; in get_delta()
197 return tcenter - best_tm; in get_delta()
227 adjust_latency += -delta; in smp_synchronize_tick_client()
228 adj = -delta + adjust_latency/4; in smp_synchronize_tick_client()
230 adj = -delta; in smp_synchronize_tick_client()
232 tick_ops->add_tick(adj); in smp_synchronize_tick_client()
250 printk(KERN_INFO "CPU %d: synchronized TICK with master CPU " in smp_synchronize_tick_client()
255 static void smp_start_sync_tick_client(int cpu);
257 static void smp_synchronize_one_tick(int cpu) in smp_synchronize_one_tick() argument
263 smp_start_sync_tick_client(cpu); in smp_synchronize_one_tick()
280 go[SLAVE] = tick_ops->get_tick(); in smp_synchronize_one_tick()
288 static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, in ldom_startcpu_cpuid() argument
302 num_kernel_image_mappings - 1), in ldom_startcpu_cpuid()
311 hdesc->cpu = cpu; in ldom_startcpu_cpuid()
312 hdesc->num_mappings = num_kernel_image_mappings; in ldom_startcpu_cpuid()
314 tb = &trap_block[cpu]; in ldom_startcpu_cpuid()
316 hdesc->fault_info_va = (unsigned long) &tb->fault_info; in ldom_startcpu_cpuid()
317 hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info); in ldom_startcpu_cpuid()
319 hdesc->thread_reg = thread_reg; in ldom_startcpu_cpuid()
324 for (i = 0; i < hdesc->num_mappings; i++) { in ldom_startcpu_cpuid()
325 hdesc->maps[i].vaddr = tte_vaddr; in ldom_startcpu_cpuid()
326 hdesc->maps[i].tte = tte_data; in ldom_startcpu_cpuid()
333 hv_err = sun4v_cpu_start(cpu, trampoline_ra, in ldom_startcpu_cpuid()
344 /* The OBP cpu startup callback truncates the 3rd arg cookie to
345 * 32-bits (I think) so to be safe we have it read the pointer
346 * contained here so we work on >4GB machines. -DaveM
350 static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle) in smp_boot_one_cpu() argument
365 ldom_startcpu_cpuid(cpu, in smp_boot_one_cpu()
370 prom_startcpu_cpuid(cpu, entry, cookie); in smp_boot_one_cpu()
372 struct device_node *dp = of_find_node_by_cpuid(cpu); in smp_boot_one_cpu()
374 prom_startcpu(dp->phandle, entry, cookie); in smp_boot_one_cpu()
386 printk("Processor %d is stuck.\n", cpu); in smp_boot_one_cpu()
387 ret = -ENODEV; in smp_boot_one_cpu()
396 static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu) in spitfire_xcall_helper() argument
403 cpu = (((cpu & 0x3c) << 1) | in spitfire_xcall_helper()
404 ((cpu & 0x40) >> 4) | in spitfire_xcall_helper()
405 (cpu & 0x3)); in spitfire_xcall_helper()
408 target = (cpu << 14) | 0x70; in spitfire_xcall_helper()
415 * ADDR 0x20) for the dummy read. -DaveM in spitfire_xcall_helper()
447 stuck -= 1; in spitfire_xcall_helper()
454 printk("CPU[%d]: mondo stuckage result[%016llx]\n", in spitfire_xcall_helper()
470 cpu_list = __va(tb->cpu_list_pa); in spitfire_xcall_deliver()
471 mondo = __va(tb->cpu_mondo_block_pa); in spitfire_xcall_deliver()
479 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
489 cpu_list = __va(tb->cpu_list_pa); in cheetah_xcall_deliver()
490 mondo = __va(tb->cpu_mondo_block_pa); in cheetah_xcall_deliver()
493 * busy/nack fields hard-coded by ITID number for this Ultra-III in cheetah_xcall_deliver()
523 u64 target, nr; in cheetah_xcall_deliver() local
525 nr = cpu_list[i]; in cheetah_xcall_deliver()
526 if (nr == 0xffff) in cheetah_xcall_deliver()
529 target = (nr << 14) | 0x70; in cheetah_xcall_deliver()
531 busy_mask |= (0x1UL << (nr * 2)); in cheetah_xcall_deliver()
578 if (!--stuck) in cheetah_xcall_deliver()
587 * of freezing up on this cpu. in cheetah_xcall_deliver()
589 printk("CPU[%d]: mondo stuckage result[%016llx]\n", in cheetah_xcall_deliver()
603 u64 check_mask, nr; in cheetah_xcall_deliver() local
605 nr = cpu_list[i]; in cheetah_xcall_deliver()
606 if (nr == 0xffff) in cheetah_xcall_deliver()
610 check_mask = (0x2UL << (2*nr)); in cheetah_xcall_deliver()
631 /* Multi-cpu list version.
634 * Sometimes not all cpus receive the mondo, requiring us to re-send
637 * Occasionally a target cpu strand is borrowed briefly by hypervisor to
639 * service time, 1 second overall wait is reasonable for 1 cpu.
640 * Here two in-between mondo check wait time are defined: 2 usec for
641 * single cpu quick turn around and up to 100usec for large cpu count.
655 u16 cpu; in hypervisor_xcall_deliver() local
658 cpu_list = __va(tb->cpu_list_pa); in hypervisor_xcall_deliver()
670 tb->cpu_list_pa, in hypervisor_xcall_deliver()
671 tb->cpu_mondo_block_pa); in hypervisor_xcall_deliver()
677 /* If not these non-fatal errors, panic */ in hypervisor_xcall_deliver()
687 * Re-pack cpu_list with the cpus remain to be retried in the in hypervisor_xcall_deliver()
688 * front - this simplifies tracking the truly stalled cpus. in hypervisor_xcall_deliver()
691 * cpu list entries to the value 0xffff. in hypervisor_xcall_deliver()
696 * ECPUERROR means at least one target cpu is in error state, in hypervisor_xcall_deliver()
697 * it's usually safe to skip the faulty cpu and retry. in hypervisor_xcall_deliver()
699 * ENOCPU means one of the target cpu doesn't belong to the in hypervisor_xcall_deliver()
701 * fatal and it's okay to skip the offlined cpu. in hypervisor_xcall_deliver()
706 cpu = cpu_list[i]; in hypervisor_xcall_deliver()
707 if (likely(cpu == 0xffff)) { in hypervisor_xcall_deliver()
710 (sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) { in hypervisor_xcall_deliver()
711 ecpuerror_id = cpu + 1; in hypervisor_xcall_deliver()
712 } else if (status == HV_ENOCPU && !cpu_online(cpu)) { in hypervisor_xcall_deliver()
713 enocpu_id = cpu + 1; in hypervisor_xcall_deliver()
715 cpu_list[rem++] = cpu; in hypervisor_xcall_deliver()
719 /* No cpu remained, we're done. */ in hypervisor_xcall_deliver()
723 /* Otherwise, update the cpu count for retry. */ in hypervisor_xcall_deliver()
738 /* or, was any target cpu busy processing other mondos? */ in hypervisor_xcall_deliver()
753 * their cpu mondo queue work. in hypervisor_xcall_deliver()
763 pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n", in hypervisor_xcall_deliver()
764 this_cpu, ecpuerror_id - 1); in hypervisor_xcall_deliver()
766 pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n", in hypervisor_xcall_deliver()
767 this_cpu, enocpu_id - 1); in hypervisor_xcall_deliver()
773 pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n", in hypervisor_xcall_deliver()
774 this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa); in hypervisor_xcall_deliver()
778 /* some cpus being non-responsive to the cpu mondo */ in hypervisor_xcall_deliver()
779 …pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total ta… in hypervisor_xcall_deliver()
796 * corrupt both our mondo block and cpu list state. in xcall_deliver()
809 mondo = __va(tb->cpu_mondo_block_pa); in xcall_deliver()
815 cpu_list = __va(tb->cpu_list_pa); in xcall_deliver()
817 /* Setup the initial cpu list. */ in xcall_deliver()
850 static void smp_start_sync_tick_client(int cpu) in smp_start_sync_tick_client() argument
853 cpumask_of(cpu)); in smp_start_sync_tick_client()
865 void arch_send_call_function_single_ipi(int cpu) in arch_send_call_function_single_ipi() argument
868 cpumask_of(cpu)); in arch_send_call_function_single_ipi()
892 /* It is not valid to test "current->active_mm == mm" here. in tsb_sync()
896 * current cpu's trap block PGD physical address. in tsb_sync()
898 if (tp->pgd_paddr == __pa(mm->pgd)) in tsb_sync()
926 unsigned int i, nr = folio_nr_pages(folio); in __local_flush_dcache_folio() local
929 for (i = 0; i < nr; i++) in __local_flush_dcache_folio()
937 for (i = 0; i < nr; i++) in __local_flush_dcache_folio()
943 void smp_flush_dcache_folio_impl(struct folio *folio, int cpu) in smp_flush_dcache_folio_impl() argument
956 if (cpu == this_cpu) { in smp_flush_dcache_folio_impl()
958 } else if (cpu_online(cpu)) { in smp_flush_dcache_folio_impl()
972 unsigned int i, nr = folio_nr_pages(folio); in smp_flush_dcache_folio_impl() local
974 for (i = 0; i < nr; i++) { in smp_flush_dcache_folio_impl()
976 (u64) pg_addr, cpumask_of(cpu)); in smp_flush_dcache_folio_impl()
1013 unsigned int i, nr = folio_nr_pages(folio); in flush_dcache_folio_all() local
1015 for (i = 0; i < nr; i++) { in flush_dcache_folio_all()
1055 * mm->cpu_vm_mask is a bit mask of which cpus an address
1060 /* This currently is only used by the hugetlb arch pre-fault
1061 * hook on UltraSPARC-III+ and later when changing the pagesize
1066 u32 ctx = CTX_HWBITS(mm->context); in smp_flush_tlb_mm()
1081 unsigned long nr; member
1089 __flush_tlb_pending(t->ctx, t->nr, t->vaddrs); in tlb_pending_func()
1092 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) in smp_flush_tlb_pending() argument
1094 u32 ctx = CTX_HWBITS(mm->context); in smp_flush_tlb_pending()
1100 info.nr = nr; in smp_flush_tlb_pending()
1106 __flush_tlb_pending(ctx, nr, vaddrs); in smp_flush_tlb_pending()
1113 unsigned long context = CTX_HWBITS(mm->context); in smp_flush_tlb_page()
1138 /* CPU capture. */
1154 printk("CPU[%d]: Sending penguins to jail...", in smp_capture()
1172 printk("CPU[%d]: Giving pardon to " in smp_release()
1260 if (cpu_data(i).proc_id == -1) { in smp_fill_in_sib_core_maps()
1273 int __cpu_up(unsigned int cpu, struct task_struct *tidle) in __cpu_up() argument
1275 int ret = smp_boot_one_cpu(cpu, tidle); in __cpu_up()
1278 cpumask_set_cpu(cpu, &smp_commenced_mask); in __cpu_up()
1279 while (!cpu_online(cpu)) in __cpu_up()
1281 if (!cpu_online(cpu)) { in __cpu_up()
1282 ret = -ENODEV; in __cpu_up()
1288 smp_synchronize_one_tick(cpu); in __cpu_up()
1297 int cpu = smp_processor_id(); in cpu_play_dead() local
1303 struct trap_per_cpu *tb = &trap_block[cpu]; in cpu_play_dead()
1306 tb->cpu_mondo_pa, 0); in cpu_play_dead()
1308 tb->dev_mondo_pa, 0); in cpu_play_dead()
1310 tb->resum_mondo_pa, 0); in cpu_play_dead()
1312 tb->nonresum_mondo_pa, 0); in cpu_play_dead()
1315 cpumask_clear_cpu(cpu, &smp_commenced_mask); in cpu_play_dead()
1332 int cpu = smp_processor_id(); in __cpu_disable() local
1336 for_each_cpu(i, &cpu_core_map[cpu]) in __cpu_disable()
1337 cpumask_clear_cpu(cpu, &cpu_core_map[i]); in __cpu_disable()
1338 cpumask_clear(&cpu_core_map[cpu]); in __cpu_disable()
1340 for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu)) in __cpu_disable()
1341 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i)); in __cpu_disable()
1342 cpumask_clear(&per_cpu(cpu_sibling_map, cpu)); in __cpu_disable()
1344 c = &cpu_data(cpu); in __cpu_disable()
1346 c->core_id = 0; in __cpu_disable()
1347 c->proc_id = -1; in __cpu_disable()
1351 /* Make sure no interrupts point to this cpu. */ in __cpu_disable()
1358 set_cpu_online(cpu, false); in __cpu_disable()
1365 void __cpu_die(unsigned int cpu) in __cpu_die() argument
1371 if (!cpumask_test_cpu(cpu, &smp_commenced_mask)) in __cpu_die()
1375 if (cpumask_test_cpu(cpu, &smp_commenced_mask)) { in __cpu_die()
1376 printk(KERN_ERR "CPU %u didn't die...\n", cpu); in __cpu_die()
1383 hv_err = sun4v_cpu_stop(cpu); in __cpu_die()
1385 set_cpu_present(cpu, false); in __cpu_die()
1388 } while (--limit > 0); in __cpu_die()
1402 static void send_cpu_ipi(int cpu) in send_cpu_ipi() argument
1405 0, 0, cpumask_of(cpu)); in send_cpu_ipi()
1420 static unsigned long send_cpu_poke(int cpu) in send_cpu_poke() argument
1424 per_cpu(poke, cpu) = true; in send_cpu_poke()
1425 hv_err = sun4v_cpu_poke(cpu); in send_cpu_poke()
1427 per_cpu(poke, cpu) = false; in send_cpu_poke()
1435 void arch_smp_send_reschedule(int cpu) in arch_smp_send_reschedule() argument
1437 if (cpu == smp_processor_id()) { in arch_smp_send_reschedule()
1443 /* Use cpu poke to resume idle cpu if supported. */ in arch_smp_send_reschedule()
1444 if (cpu_poke && idle_cpu(cpu)) { in arch_smp_send_reschedule()
1447 ret = send_cpu_poke(cpu); in arch_smp_send_reschedule()
1453 * - cpu poke not supported in arch_smp_send_reschedule()
1454 * - cpu not idle in arch_smp_send_reschedule()
1455 * - send_cpu_poke() returns with error in arch_smp_send_reschedule()
1457 send_cpu_ipi(cpu); in arch_smp_send_reschedule()
1476 /* CPU POKE is registered. */ in smp_init_cpu_poke()
1498 int cpu; in smp_send_stop() local
1505 for_each_online_cpu(cpu) { in smp_send_stop()
1506 if (cpu == this_cpu) in smp_send_stop()
1509 set_cpu_online(cpu, false); in smp_send_stop()
1513 hv_err = sun4v_cpu_stop(cpu); in smp_send_stop()
1519 prom_stopcpu_cpuid(cpu); in smp_send_stop()
1533 static int __init pcpu_cpu_to_node(int cpu) in pcpu_cpu_to_node() argument
1535 return cpu_to_node(cpu); in pcpu_cpu_to_node()
1541 unsigned int cpu; in setup_per_cpu_areas() local
1542 int rc = -EINVAL; in setup_per_cpu_areas()
1560 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; in setup_per_cpu_areas()
1561 for_each_possible_cpu(cpu) in setup_per_cpu_areas()
1562 __per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; in setup_per_cpu_areas()
1564 /* Setup %g5 for the boot cpu. */ in setup_per_cpu_areas()