Lines Matching +full:tcs +full:- +full:wait

1 // SPDX-License-Identifier: GPL-2.0-or-later
33 #include <asm/r4k-timer.h>
34 #include <asm/mips-cps.h>
46 /* Number of TCs (or siblings in Intel speak) per CPU core */
50 /* representing the TCs (or siblings in Intel speak) of each logical CPU */
54 /* representing the core map of multi-core chips of each logical CPU */
139 /* Re-calculate the mask */ in calculate_cpu_foreign_map()
360 mp_ops->init_secondary(); in start_secondary()
395 * irq will be enabled in ->smp_finish(), enabling it too early in start_secondary()
399 mp_ops->smp_finish(); in start_secondary()
429 current_thread_info()->cpu = 0; in smp_prepare_cpus()
430 mp_ops->prepare_cpus(max_cpus); in smp_prepare_cpus()
443 if (mp_ops->prepare_boot_cpu) in smp_prepare_boot_cpu()
444 mp_ops->prepare_boot_cpu(); in smp_prepare_boot_cpu()
453 err = mp_ops->boot_secondary(cpu, tidle); in __cpu_up()
457 /* Wait for CPU to start and be ready to sync counters */ in __cpu_up()
461 return -EIO; in __cpu_up()
466 /* Wait for CPU to finish startup & mark itself online before return */ in __cpu_up()
530 * multithreaded address spaces, inter-CPU interrupts have to be sent.
531 * Another case where inter-CPU interrupts are required is when the target
542 if (atomic_read(&mm->mm_users) == 0) in flush_tlb_mm()
549 * No need to worry about other CPUs - the ginvt in in flush_tlb_mm()
552 } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { in flush_tlb_mm()
577 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); in flush_tlb_range_ipi()
582 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
602 } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { in flush_tlb_range()
613 int exec = vma->vm_flags & VM_EXEC; in flush_tlb_range()
634 local_flush_tlb_kernel_range(fd->addr1, fd->addr2); in flush_tlb_kernel_range_ipi()
651 local_flush_tlb_page(fd->vma, fd->addr1); in flush_tlb_page_ipi()
662 write_c0_memorymapid(cpu_asid(0, vma->vm_mm)); in flush_tlb_page()
669 } else if ((atomic_read(&vma->vm_mm->mm_users) != 1) || in flush_tlb_page()
670 (current->mm != vma->vm_mm)) { in flush_tlb_page()
688 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) in flush_tlb_page()
689 set_cpu_context(cpu, vma->vm_mm, 1); in flush_tlb_page()
714 if (mp_ops->cleanup_dead_cpu) in arch_cpuhp_cleanup_dead_cpu()
715 mp_ops->cleanup_dead_cpu(cpu); in arch_cpuhp_cleanup_dead_cpu()