Home
last modified time | relevance | path

Searched refs:this_cpu_read (Results 1 – 25 of 109) sorted by relevance

12345

/openbmc/linux/arch/x86/mm/
H A Dtlb.c205 if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid)) in clear_asid_other()
230 if (this_cpu_read(cpu_tlbstate.invalidate_other)) in choose_new_asid()
234 if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) != in choose_new_asid()
318 WARN_ON(!this_cpu_read(cpu_tlbstate_shared.is_lazy)); in leave_mm()
362 if (this_cpu_read(cpu_info.smt_active)) { in l1d_flush_evaluate()
391 prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_spec); in cond_mitigation()
488 cr4_update_pce_mm(this_cpu_read(cpu_tlbstate.loaded_mm)); in cr4_update_pce()
680 if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm) in enter_lazy_tlb()
780 if (this_cpu_read(cpu_tlbstate_shared.is_lazy)) { in flush_tlb_func()
1096 build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd, in __get_current_cr3_fast()
[all …]
/openbmc/linux/kernel/trace/
H A Dtrace_preemptirq.c44 if (this_cpu_read(tracing_irq_cpu)) { in trace_hardirqs_on_prepare()
55 if (this_cpu_read(tracing_irq_cpu)) { in trace_hardirqs_on()
75 if (!this_cpu_read(tracing_irq_cpu)) { in trace_hardirqs_off_finish()
89 if (!this_cpu_read(tracing_irq_cpu)) { in trace_hardirqs_off()
/openbmc/linux/drivers/irqchip/
H A Dirq-csky-mpintc.c56 (this_cpu_read(intcl_reg) + INTCL_CFGR) : (INTCG_base + INTCG_CICFGR)))
75 void __iomem *reg_base = this_cpu_read(intcl_reg); in csky_mpintc_handler()
83 void __iomem *reg_base = this_cpu_read(intcl_reg); in csky_mpintc_unmask()
92 void __iomem *reg_base = this_cpu_read(intcl_reg); in csky_mpintc_mask()
99 void __iomem *reg_base = this_cpu_read(intcl_reg); in csky_mpintc_eoi()
215 void __iomem *reg_base = this_cpu_read(intcl_reg); in csky_mpintc_send_ipi()
/openbmc/linux/arch/x86/include/asm/xen/
H A Dhypervisor.h84 enum xen_lazy_mode old_mode = this_cpu_read(xen_lazy_mode); in enter_lazy()
98 BUG_ON(this_cpu_read(xen_lazy_mode) != mode); in leave_lazy()
100 if (this_cpu_read(xen_lazy_nesting) == 0) in leave_lazy()
/openbmc/linux/arch/arm64/kernel/
H A Dtopology.c155 prev_const_cnt = this_cpu_read(arch_const_cycles_prev); in amu_scale_freq_tick()
156 prev_core_cnt = this_cpu_read(arch_core_cycles_prev); in amu_scale_freq_tick()
160 const_cnt = this_cpu_read(arch_const_cycles_prev); in amu_scale_freq_tick()
161 core_cnt = this_cpu_read(arch_core_cycles_prev); in amu_scale_freq_tick()
176 scale *= this_cpu_read(arch_max_freq_scale); in amu_scale_freq_tick()
/openbmc/linux/arch/x86/include/asm/
H A Dkvmclock.h13 return &this_cpu_read(hv_clock_per_cpu)->pvti; in this_cpu_pvti()
18 return this_cpu_read(hv_clock_per_cpu); in this_cpu_hvclock()
H A Dswitch_to.h58 if (unlikely(this_cpu_read(cpu_tss_rw.x86_tss.ss1) == thread->sysenter_cs)) in refresh_sysenter_cs()
/openbmc/linux/arch/x86/kernel/
H A Ddumpstack_32.c40 unsigned long *begin = (unsigned long *)this_cpu_read(pcpu_hot.hardirq_stack_ptr); in in_hardirq_stack()
65 unsigned long *begin = (unsigned long *)this_cpu_read(pcpu_hot.softirq_stack_ptr); in in_softirq_stack()
102 info->next_sp = (unsigned long *)this_cpu_read(cpu_tss_rw.x86_tss.sp); in in_doublefault_stack()
H A Dnmi.c504 if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { in DEFINE_IDTENTRY_RAW()
546 local_db_restore(this_cpu_read(nmi_dr7)); in DEFINE_IDTENTRY_RAW()
550 if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) in DEFINE_IDTENTRY_RAW()
551 write_cr2(this_cpu_read(nmi_cr2)); in DEFINE_IDTENTRY_RAW()
/openbmc/linux/kernel/
H A Dwatchdog_perf.c155 perf_event_enable(this_cpu_read(watchdog_ev)); in watchdog_hardlockup_enable()
165 struct perf_event *event = this_cpu_read(watchdog_ev); in watchdog_hardlockup_disable()
262 perf_event_release_kernel(this_cpu_read(watchdog_ev)); in watchdog_hardlockup_probe()
/openbmc/linux/arch/loongarch/kernel/
H A Dkfpu.c27 WARN_ON(this_cpu_read(in_kernel_fpu)); in kernel_fpu_begin()
55 WARN_ON(!this_cpu_read(in_kernel_fpu)); in kernel_fpu_end()
/openbmc/linux/include/linux/
H A Dlockdep.h611 #define __lockdep_enabled (debug_locks && !this_cpu_read(lockdep_recursion))
615 WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \
620 WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \
625 WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
630 WARN_ON_ONCE(__lockdep_enabled && (this_cpu_read(hardirq_context) || \
631 !this_cpu_read(hardirqs_enabled))); \
639 !this_cpu_read(hardirqs_enabled))); \
647 this_cpu_read(hardirqs_enabled))); \
/openbmc/linux/arch/x86/kernel/cpu/mce/
H A Damd.c428 if (!this_cpu_read(threshold_banks) && !tr->set_lvt_off) in threshold_restart_bank()
687 for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank) { in mce_amd_feature_init()
832 for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank) in amd_deferred_error_interrupt()
871 struct threshold_bank **bp = this_cpu_read(threshold_banks); in amd_threshold_interrupt()
882 for (bank = 0; bank < this_cpu_read(mce_num_banks); ++bank) { in amd_threshold_interrupt()
1174 struct device *dev = this_cpu_read(mce_device); in threshold_create_bank()
1302 unsigned int bank, numbanks = this_cpu_read(mce_num_banks); in __threshold_remove_device()
1316 struct threshold_bank **bp = this_cpu_read(threshold_banks); in mce_threshold_remove_device()
1351 bp = this_cpu_read(threshold_banks); in mce_threshold_create_device()
1355 numbanks = this_cpu_read(mce_num_banks); in mce_threshold_create_device()
[all …]
H A Dcore.c688 for (i = 0; i < this_cpu_read(mce_num_banks); i++) { in machine_check_poll()
891 for (i = 0; i < this_cpu_read(mce_num_banks); i++) { in mce_no_way_out()
1206 for (i = 0; i < this_cpu_read(mce_num_banks); i++) { in mce_clear_state()
1256 for (i = 0; i < this_cpu_read(mce_num_banks); i++) { in __mc_scan_banks()
1737 u8 n_banks = this_cpu_read(mce_num_banks); in __mcheck_cpu_mce_banks_init()
1812 for (i = 0; i < this_cpu_read(mce_num_banks); i++) { in __mcheck_cpu_init_clear_banks()
1838 for (i = 0; i < this_cpu_read(mce_num_banks); i++) { in __mcheck_cpu_check_banks()
2018 if (this_cpu_read(mce_num_banks) > 8) in mce_zhaoxin_feature_init()
2244 if (bank >= this_cpu_read(mce_num_banks)) { in mce_disable_bank()
2333 for (i = 0; i < this_cpu_read(mce_num_banks); i++) { in mce_disable_error_reporting()
[all …]
/openbmc/linux/Documentation/translations/zh_CN/core-api/
H A Dthis_cpu_ops.rst46 this_cpu_read(pcp)
75 z = this_cpu_read(x);
232 且更容易与 ``()`` 结合。第二种形式也与 ``this_cpu_read()`` 和大家的使用方式一致。
/openbmc/linux/arch/arm64/include/asm/
H A Dsimd.h41 !this_cpu_read(fpsimd_context_busy); in may_use_simd()
/openbmc/linux/kernel/printk/
H A Dprintk_safe.c41 if (this_cpu_read(printk_context) || in_nmi()) in vprintk()
/openbmc/linux/tools/testing/radix-tree/linux/
H A Dpercpu.h7 #define this_cpu_read(var) var macro
/openbmc/linux/arch/x86/xen/
H A Dmulticalls.h51 local_irq_restore(this_cpu_read(xen_mc_irq_flags)); in xen_mc_issue()
H A Dsuspend.c44 wrmsrl(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl)); in xen_vcpu_notify_restore()
/openbmc/linux/include/crypto/internal/
H A Dsimd.h64 (may_use_simd() && !this_cpu_read(crypto_simd_disabled_for_test))
/openbmc/linux/arch/x86/kernel/fpu/
H A Dcontext.h38 return fpu == this_cpu_read(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu; in fpregs_state_valid()
/openbmc/linux/arch/powerpc/lib/
H A Dcode-patching.c132 free_vm_area(this_cpu_read(cpu_patching_context.area)); in text_area_cpu_down()
192 put_patching_mm(this_cpu_read(cpu_patching_context.mm), in text_area_cpu_down_mm()
193 this_cpu_read(cpu_patching_context.addr)); in text_area_cpu_down_mm()
/openbmc/linux/drivers/accessibility/speakup/
H A Dfakekey.c86 return this_cpu_read(reporting_keystroke); in speakup_fake_key_pressed()
/openbmc/linux/net/ipv4/netfilter/
H A Dnf_dup_ipv4.c55 if (this_cpu_read(nf_skb_duplicated)) in nf_dup_ipv4()

12345