lockdep.c (c1cc4784ce6e8cceff1013709abd74bcbf7fbf24) lockdep.c (0584df9c12f449124d0bfef9899e5365604ee7a9)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * kernel/lockdep.c
4 *
5 * Runtime locking correctness validator
6 *
7 * Started by Ingo Molnar:
8 *

--- 381 unchanged lines hidden (view full) ---

390{
391 task->lockdep_depth = 0; /* no locks held yet */
392 task->curr_chain_key = INITIAL_CHAIN_KEY;
393 task->lockdep_recursion = 0;
394}
395
396static __always_inline void lockdep_recursion_finish(void)
397{
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * kernel/lockdep.c
4 *
5 * Runtime locking correctness validator
6 *
7 * Started by Ingo Molnar:
8 *

--- 381 unchanged lines hidden (view full) ---

390{
391 task->lockdep_depth = 0; /* no locks held yet */
392 task->curr_chain_key = INITIAL_CHAIN_KEY;
393 task->lockdep_recursion = 0;
394}
395
396static __always_inline void lockdep_recursion_finish(void)
397{
398 if (WARN_ON_ONCE(--current->lockdep_recursion))
398 if (WARN_ON_ONCE((--current->lockdep_recursion) & LOCKDEP_RECURSION_MASK))
399 current->lockdep_recursion = 0;
400}
401
402void lockdep_set_selftest_task(struct task_struct *task)
403{
404 lockdep_selftest_task_struct = task;
405}
406

--- 1650 unchanged lines hidden (view full) ---

2057 pr_warn("\n");
2058 pr_warn("=====================================================\n");
2059 pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
2060 irqclass, irqclass);
2061 print_kernel_ident();
2062 pr_warn("-----------------------------------------------------\n");
2063 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
2064 curr->comm, task_pid_nr(curr),
399 current->lockdep_recursion = 0;
400}
401
402void lockdep_set_selftest_task(struct task_struct *task)
403{
404 lockdep_selftest_task_struct = task;
405}
406

--- 1650 unchanged lines hidden (view full) ---

2057 pr_warn("\n");
2058 pr_warn("=====================================================\n");
2059 pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
2060 irqclass, irqclass);
2061 print_kernel_ident();
2062 pr_warn("-----------------------------------------------------\n");
2063 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
2064 curr->comm, task_pid_nr(curr),
2065 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
2065 lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT,
2066 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
2066 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
2067 curr->hardirqs_enabled,
2067 lockdep_hardirqs_enabled(),
2068 curr->softirqs_enabled);
2069 print_lock(next);
2070
2071 pr_warn("\nand this task is already holding:\n");
2072 print_lock(prev);
2073 pr_warn("which would create a new lock dependency:\n");
2074 print_lock_name(hlock_class(prev));
2075 pr_cont(" ->");

--- 1250 unchanged lines hidden (view full) ---

3326 print_kernel_ident();
3327 pr_warn("--------------------------------\n");
3328
3329 pr_warn("inconsistent {%s} -> {%s} usage.\n",
3330 usage_str[prev_bit], usage_str[new_bit]);
3331
3332 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
3333 curr->comm, task_pid_nr(curr),
2068 curr->softirqs_enabled);
2069 print_lock(next);
2070
2071 pr_warn("\nand this task is already holding:\n");
2072 print_lock(prev);
2073 pr_warn("which would create a new lock dependency:\n");
2074 print_lock_name(hlock_class(prev));
2075 pr_cont(" ->");

--- 1250 unchanged lines hidden (view full) ---

3326 print_kernel_ident();
3327 pr_warn("--------------------------------\n");
3328
3329 pr_warn("inconsistent {%s} -> {%s} usage.\n",
3330 usage_str[prev_bit], usage_str[new_bit]);
3331
3332 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
3333 curr->comm, task_pid_nr(curr),
3334 lockdep_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
3334 lockdep_hardirq_context(), hardirq_count() >> HARDIRQ_SHIFT,
3335 lockdep_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
3335 lockdep_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
3336 lockdep_hardirqs_enabled(curr),
3336 lockdep_hardirqs_enabled(),
3337 lockdep_softirqs_enabled(curr));
3338 print_lock(this);
3339
3340 pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
3341 print_lock_trace(hlock_class(this)->usage_traces[prev_bit], 1);
3342
3343 print_irqtrace_events(curr);
3344 pr_warn("\nother info that might help us debug this:\n");

--- 134 unchanged lines hidden (view full) ---

3479
3480 print_irq_inversion_bug(curr, &root, target_entry,
3481 this, 0, irqclass);
3482 return 0;
3483}
3484
3485void print_irqtrace_events(struct task_struct *curr)
3486{
3337 lockdep_softirqs_enabled(curr));
3338 print_lock(this);
3339
3340 pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
3341 print_lock_trace(hlock_class(this)->usage_traces[prev_bit], 1);
3342
3343 print_irqtrace_events(curr);
3344 pr_warn("\nother info that might help us debug this:\n");

--- 134 unchanged lines hidden (view full) ---

3479
3480 print_irq_inversion_bug(curr, &root, target_entry,
3481 this, 0, irqclass);
3482 return 0;
3483}
3484
3485void print_irqtrace_events(struct task_struct *curr)
3486{
3487 printk("irq event stamp: %u\n", curr->irq_events);
3487 const struct irqtrace_events *trace = &curr->irqtrace;
3488
3489 printk("irq event stamp: %u\n", trace->irq_events);
3488 printk("hardirqs last enabled at (%u): [<%px>] %pS\n",
3490 printk("hardirqs last enabled at (%u): [<%px>] %pS\n",
3489 curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip,
3490 (void *)curr->hardirq_enable_ip);
3491 trace->hardirq_enable_event, (void *)trace->hardirq_enable_ip,
3492 (void *)trace->hardirq_enable_ip);
3491 printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
3493 printk("hardirqs last disabled at (%u): [<%px>] %pS\n",
3492 curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip,
3493 (void *)curr->hardirq_disable_ip);
3494 trace->hardirq_disable_event, (void *)trace->hardirq_disable_ip,
3495 (void *)trace->hardirq_disable_ip);
3494 printk("softirqs last enabled at (%u): [<%px>] %pS\n",
3496 printk("softirqs last enabled at (%u): [<%px>] %pS\n",
3495 curr->softirq_enable_event, (void *)curr->softirq_enable_ip,
3496 (void *)curr->softirq_enable_ip);
3497 trace->softirq_enable_event, (void *)trace->softirq_enable_ip,
3498 (void *)trace->softirq_enable_ip);
3497 printk("softirqs last disabled at (%u): [<%px>] %pS\n",
3499 printk("softirqs last disabled at (%u): [<%px>] %pS\n",
3498 curr->softirq_disable_event, (void *)curr->softirq_disable_ip,
3499 (void *)curr->softirq_disable_ip);
3500 trace->softirq_disable_event, (void *)trace->softirq_disable_ip,
3501 (void *)trace->softirq_disable_ip);
3500}
3501
3502static int HARDIRQ_verbose(struct lock_class *class)
3503{
3504#if HARDIRQ_VERBOSE
3505 return class_filter(class);
3506#endif
3507 return 0;

--- 133 unchanged lines hidden (view full) ---

3641 *
3642 * Invoked before a possible transition to RCU idle from exit to user or
3643 * guest mode. This ensures that all RCU operations are done before RCU
3644 * stops watching. After the RCU transition lockdep_hardirqs_on() has to be
3645 * invoked to set the final state.
3646 */
3647void lockdep_hardirqs_on_prepare(unsigned long ip)
3648{
3502}
3503
3504static int HARDIRQ_verbose(struct lock_class *class)
3505{
3506#if HARDIRQ_VERBOSE
3507 return class_filter(class);
3508#endif
3509 return 0;

--- 133 unchanged lines hidden (view full) ---

3643 *
3644 * Invoked before a possible transition to RCU idle from exit to user or
3645 * guest mode. This ensures that all RCU operations are done before RCU
3646 * stops watching. After the RCU transition lockdep_hardirqs_on() has to be
3647 * invoked to set the final state.
3648 */
3649void lockdep_hardirqs_on_prepare(unsigned long ip)
3650{
3649 if (unlikely(!debug_locks || current->lockdep_recursion))
3651 if (unlikely(!debug_locks))
3650 return;
3651
3652 return;
3653
3652 if (unlikely(current->hardirqs_enabled)) {
3654 /*
3655 * NMIs do not (and cannot) track lock dependencies, nothing to do.
3656 */
3657 if (unlikely(in_nmi()))
3658 return;
3659
3660 if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK))
3661 return;
3662
3663 if (unlikely(lockdep_hardirqs_enabled())) {
3653 /*
3654 * Neither irq nor preemption are disabled here
3655 * so this is racy by nature but losing one hit
3656 * in a stat is not a big deal.
3657 */
3658 __debug_atomic_inc(redundant_hardirqs_on);
3659 return;
3660 }

--- 11 unchanged lines hidden (view full) ---

3672 */
3673 if (DEBUG_LOCKS_WARN_ON(early_boot_irqs_disabled))
3674 return;
3675
3676 /*
3677 * Can't allow enabling interrupts while in an interrupt handler,
3678 * that's general bad form and such. Recursion, limited stack etc..
3679 */
3664 /*
3665 * Neither irq nor preemption are disabled here
3666 * so this is racy by nature but losing one hit
3667 * in a stat is not a big deal.
3668 */
3669 __debug_atomic_inc(redundant_hardirqs_on);
3670 return;
3671 }

--- 11 unchanged lines hidden (view full) ---

3683 */
3684 if (DEBUG_LOCKS_WARN_ON(early_boot_irqs_disabled))
3685 return;
3686
3687 /*
3688 * Can't allow enabling interrupts while in an interrupt handler,
3689 * that's general bad form and such. Recursion, limited stack etc..
3690 */
3680 if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
3691 if (DEBUG_LOCKS_WARN_ON(lockdep_hardirq_context()))
3681 return;
3682
3683 current->hardirq_chain_key = current->curr_chain_key;
3684
3685 current->lockdep_recursion++;
3686 __trace_hardirqs_on_caller();
3687 lockdep_recursion_finish();
3688}
3689EXPORT_SYMBOL_GPL(lockdep_hardirqs_on_prepare);
3690
3691void noinstr lockdep_hardirqs_on(unsigned long ip)
3692{
3692 return;
3693
3694 current->hardirq_chain_key = current->curr_chain_key;
3695
3696 current->lockdep_recursion++;
3697 __trace_hardirqs_on_caller();
3698 lockdep_recursion_finish();
3699}
3700EXPORT_SYMBOL_GPL(lockdep_hardirqs_on_prepare);
3701
3702void noinstr lockdep_hardirqs_on(unsigned long ip)
3703{
3693 struct task_struct *curr = current;
3704 struct irqtrace_events *trace = &current->irqtrace;
3694
3705
3695 if (unlikely(!debug_locks || curr->lockdep_recursion))
3706 if (unlikely(!debug_locks))
3696 return;
3697
3707 return;
3708
3698 if (curr->hardirqs_enabled) {
3709 /*
3710 * NMIs can happen in the middle of local_irq_{en,dis}able() where the
3711 * tracking state and hardware state are out of sync.
3712 *
3713 * NMIs must save lockdep_hardirqs_enabled() to restore IRQ state from,
3714 * and not rely on hardware state like normal interrupts.
3715 */
3716 if (unlikely(in_nmi())) {
3699 /*
3717 /*
3718 * Skip:
3719 * - recursion check, because NMI can hit lockdep;
3720 * - hardware state check, because above;
3721 * - chain_key check, see lockdep_hardirqs_on_prepare().
3722 */
3723 goto skip_checks;
3724 }
3725
3726 if (unlikely(current->lockdep_recursion & LOCKDEP_RECURSION_MASK))
3727 return;
3728
3729 if (lockdep_hardirqs_enabled()) {
3730 /*
3700 * Neither irq nor preemption are disabled here
3701 * so this is racy by nature but losing one hit
3702 * in a stat is not a big deal.
3703 */
3704 __debug_atomic_inc(redundant_hardirqs_on);
3705 return;
3706 }
3707

--- 7 unchanged lines hidden (view full) ---

3715
3716 /*
3717 * Ensure the lock stack remained unchanged between
3718 * lockdep_hardirqs_on_prepare() and lockdep_hardirqs_on().
3719 */
3720 DEBUG_LOCKS_WARN_ON(current->hardirq_chain_key !=
3721 current->curr_chain_key);
3722
3731 * Neither irq nor preemption are disabled here
3732 * so this is racy by nature but losing one hit
3733 * in a stat is not a big deal.
3734 */
3735 __debug_atomic_inc(redundant_hardirqs_on);
3736 return;
3737 }
3738

--- 7 unchanged lines hidden (view full) ---

3746
3747 /*
3748 * Ensure the lock stack remained unchanged between
3749 * lockdep_hardirqs_on_prepare() and lockdep_hardirqs_on().
3750 */
3751 DEBUG_LOCKS_WARN_ON(current->hardirq_chain_key !=
3752 current->curr_chain_key);
3753
3754skip_checks:
3723 /* we'll do an OFF -> ON transition: */
3755 /* we'll do an OFF -> ON transition: */
3724 curr->hardirqs_enabled = 1;
3725 curr->hardirq_enable_ip = ip;
3726 curr->hardirq_enable_event = ++curr->irq_events;
3756 this_cpu_write(hardirqs_enabled, 1);
3757 trace->hardirq_enable_ip = ip;
3758 trace->hardirq_enable_event = ++trace->irq_events;
3727 debug_atomic_inc(hardirqs_on_events);
3728}
3729EXPORT_SYMBOL_GPL(lockdep_hardirqs_on);
3730
3731/*
3732 * Hardirqs were disabled:
3733 */
3734void noinstr lockdep_hardirqs_off(unsigned long ip)
3735{
3759 debug_atomic_inc(hardirqs_on_events);
3760}
3761EXPORT_SYMBOL_GPL(lockdep_hardirqs_on);
3762
3763/*
3764 * Hardirqs were disabled:
3765 */
3766void noinstr lockdep_hardirqs_off(unsigned long ip)
3767{
3736 struct task_struct *curr = current;
3768 if (unlikely(!debug_locks))
3769 return;
3737
3770
3738 if (unlikely(!debug_locks || curr->lockdep_recursion))
3771 /*
3772 * Matching lockdep_hardirqs_on(), allow NMIs in the middle of lockdep;
3773 * they will restore the software state. This ensures the software
3774 * state is consistent inside NMIs as well.
3775 */
3776 if (unlikely(!in_nmi() && (current->lockdep_recursion & LOCKDEP_RECURSION_MASK)))
3739 return;
3740
3741 /*
3742 * So we're supposed to get called after you mask local IRQs, but for
3743 * some reason the hardware doesn't quite think you did a proper job.
3744 */
3745 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3746 return;
3747
3777 return;
3778
3779 /*
3780 * So we're supposed to get called after you mask local IRQs, but for
3781 * some reason the hardware doesn't quite think you did a proper job.
3782 */
3783 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3784 return;
3785
3748 if (curr->hardirqs_enabled) {
3786 if (lockdep_hardirqs_enabled()) {
3787 struct irqtrace_events *trace = &current->irqtrace;
3788
3749 /*
3750 * We have done an ON -> OFF transition:
3751 */
3789 /*
3790 * We have done an ON -> OFF transition:
3791 */
3752 curr->hardirqs_enabled = 0;
3753 curr->hardirq_disable_ip = ip;
3754 curr->hardirq_disable_event = ++curr->irq_events;
3792 this_cpu_write(hardirqs_enabled, 0);
3793 trace->hardirq_disable_ip = ip;
3794 trace->hardirq_disable_event = ++trace->irq_events;
3755 debug_atomic_inc(hardirqs_off_events);
3756 } else {
3757 debug_atomic_inc(redundant_hardirqs_off);
3758 }
3759}
3760EXPORT_SYMBOL_GPL(lockdep_hardirqs_off);
3761
3762/*
3763 * Softirqs will be enabled:
3764 */
3765void lockdep_softirqs_on(unsigned long ip)
3766{
3795 debug_atomic_inc(hardirqs_off_events);
3796 } else {
3797 debug_atomic_inc(redundant_hardirqs_off);
3798 }
3799}
3800EXPORT_SYMBOL_GPL(lockdep_hardirqs_off);
3801
3802/*
3803 * Softirqs will be enabled:
3804 */
3805void lockdep_softirqs_on(unsigned long ip)
3806{
3767 struct task_struct *curr = current;
3807 struct irqtrace_events *trace = &current->irqtrace;
3768
3769 if (unlikely(!debug_locks || current->lockdep_recursion))
3770 return;
3771
3772 /*
3773 * We fancy IRQs being disabled here, see softirq.c, avoids
3774 * funny state and nesting things.
3775 */
3776 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3777 return;
3778
3808
3809 if (unlikely(!debug_locks || current->lockdep_recursion))
3810 return;
3811
3812 /*
3813 * We fancy IRQs being disabled here, see softirq.c, avoids
3814 * funny state and nesting things.
3815 */
3816 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3817 return;
3818
3779 if (curr->softirqs_enabled) {
3819 if (current->softirqs_enabled) {
3780 debug_atomic_inc(redundant_softirqs_on);
3781 return;
3782 }
3783
3784 current->lockdep_recursion++;
3785 /*
3786 * We'll do an OFF -> ON transition:
3787 */
3820 debug_atomic_inc(redundant_softirqs_on);
3821 return;
3822 }
3823
3824 current->lockdep_recursion++;
3825 /*
3826 * We'll do an OFF -> ON transition:
3827 */
3788 curr->softirqs_enabled = 1;
3789 curr->softirq_enable_ip = ip;
3790 curr->softirq_enable_event = ++curr->irq_events;
3828 current->softirqs_enabled = 1;
3829 trace->softirq_enable_ip = ip;
3830 trace->softirq_enable_event = ++trace->irq_events;
3791 debug_atomic_inc(softirqs_on_events);
3792 /*
3793 * We are going to turn softirqs on, so set the
3794 * usage bit for all held locks, if hardirqs are
3795 * enabled too:
3796 */
3831 debug_atomic_inc(softirqs_on_events);
3832 /*
3833 * We are going to turn softirqs on, so set the
3834 * usage bit for all held locks, if hardirqs are
3835 * enabled too:
3836 */
3797 if (curr->hardirqs_enabled)
3798 mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
3837 if (lockdep_hardirqs_enabled())
3838 mark_held_locks(current, LOCK_ENABLED_SOFTIRQ);
3799 lockdep_recursion_finish();
3800}
3801
3802/*
3803 * Softirqs were disabled:
3804 */
3805void lockdep_softirqs_off(unsigned long ip)
3806{
3839 lockdep_recursion_finish();
3840}
3841
3842/*
3843 * Softirqs were disabled:
3844 */
3845void lockdep_softirqs_off(unsigned long ip)
3846{
3807 struct task_struct *curr = current;
3808
3809 if (unlikely(!debug_locks || current->lockdep_recursion))
3810 return;
3811
3812 /*
3813 * We fancy IRQs being disabled here, see softirq.c
3814 */
3815 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3816 return;
3817
3847 if (unlikely(!debug_locks || current->lockdep_recursion))
3848 return;
3849
3850 /*
3851 * We fancy IRQs being disabled here, see softirq.c
3852 */
3853 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3854 return;
3855
3818 if (curr->softirqs_enabled) {
3856 if (current->softirqs_enabled) {
3857 struct irqtrace_events *trace = &current->irqtrace;
3858
3819 /*
3820 * We have done an ON -> OFF transition:
3821 */
3859 /*
3860 * We have done an ON -> OFF transition:
3861 */
3822 curr->softirqs_enabled = 0;
3823 curr->softirq_disable_ip = ip;
3824 curr->softirq_disable_event = ++curr->irq_events;
3862 current->softirqs_enabled = 0;
3863 trace->softirq_disable_ip = ip;
3864 trace->softirq_disable_event = ++trace->irq_events;
3825 debug_atomic_inc(softirqs_off_events);
3826 /*
3827 * Whoops, we wanted softirqs off, so why aren't they?
3828 */
3829 DEBUG_LOCKS_WARN_ON(!softirq_count());
3830 } else
3831 debug_atomic_inc(redundant_softirqs_off);
3832}

--- 5 unchanged lines hidden (view full) ---

3838 goto lock_used;
3839
3840 /*
3841 * If non-trylock use in a hardirq or softirq context, then
3842 * mark the lock as used in these contexts:
3843 */
3844 if (!hlock->trylock) {
3845 if (hlock->read) {
3865 debug_atomic_inc(softirqs_off_events);
3866 /*
3867 * Whoops, we wanted softirqs off, so why aren't they?
3868 */
3869 DEBUG_LOCKS_WARN_ON(!softirq_count());
3870 } else
3871 debug_atomic_inc(redundant_softirqs_off);
3872}

--- 5 unchanged lines hidden (view full) ---

3878 goto lock_used;
3879
3880 /*
3881 * If non-trylock use in a hardirq or softirq context, then
3882 * mark the lock as used in these contexts:
3883 */
3884 if (!hlock->trylock) {
3885 if (hlock->read) {
3846 if (curr->hardirq_context)
3886 if (lockdep_hardirq_context())
3847 if (!mark_lock(curr, hlock,
3848 LOCK_USED_IN_HARDIRQ_READ))
3849 return 0;
3850 if (curr->softirq_context)
3851 if (!mark_lock(curr, hlock,
3852 LOCK_USED_IN_SOFTIRQ_READ))
3853 return 0;
3854 } else {
3887 if (!mark_lock(curr, hlock,
3888 LOCK_USED_IN_HARDIRQ_READ))
3889 return 0;
3890 if (curr->softirq_context)
3891 if (!mark_lock(curr, hlock,
3892 LOCK_USED_IN_SOFTIRQ_READ))
3893 return 0;
3894 } else {
3855 if (curr->hardirq_context)
3895 if (lockdep_hardirq_context())
3856 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
3857 return 0;
3858 if (curr->softirq_context)
3859 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
3860 return 0;
3861 }
3862 }
3863 if (!hlock->hardirqs_off) {

--- 21 unchanged lines hidden (view full) ---

3885 if (!mark_lock(curr, hlock, LOCK_USED))
3886 return 0;
3887
3888 return 1;
3889}
3890
3891static inline unsigned int task_irq_context(struct task_struct *task)
3892{
3896 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
3897 return 0;
3898 if (curr->softirq_context)
3899 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
3900 return 0;
3901 }
3902 }
3903 if (!hlock->hardirqs_off) {

--- 21 unchanged lines hidden (view full) ---

3925 if (!mark_lock(curr, hlock, LOCK_USED))
3926 return 0;
3927
3928 return 1;
3929}
3930
3931static inline unsigned int task_irq_context(struct task_struct *task)
3932{
3893 return LOCK_CHAIN_HARDIRQ_CONTEXT * !!task->hardirq_context +
3933 return LOCK_CHAIN_HARDIRQ_CONTEXT * !!lockdep_hardirq_context() +
3894 LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context;
3895}
3896
3897static int separate_irq_context(struct task_struct *curr,
3898 struct held_lock *hlock)
3899{
3900 unsigned int depth = curr->lockdep_depth;
3901

--- 76 unchanged lines hidden (view full) ---

3978}
3979
3980static inline short task_wait_context(struct task_struct *curr)
3981{
3982 /*
3983 * Set appropriate wait type for the context; for IRQs we have to take
3984 * into account force_irqthread as that is implied by PREEMPT_RT.
3985 */
3934 LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context;
3935}
3936
3937static int separate_irq_context(struct task_struct *curr,
3938 struct held_lock *hlock)
3939{
3940 unsigned int depth = curr->lockdep_depth;
3941

--- 76 unchanged lines hidden (view full) ---

4018}
4019
4020static inline short task_wait_context(struct task_struct *curr)
4021{
4022 /*
4023 * Set appropriate wait type for the context; for IRQs we have to take
4024 * into account force_irqthread as that is implied by PREEMPT_RT.
4025 */
3986 if (curr->hardirq_context) {
4026 if (lockdep_hardirq_context()) {
3987 /*
3988 * Check if force_irqthreads will run us threaded.
3989 */
3990 if (curr->hardirq_threaded || curr->irq_config)
3991 return LD_WAIT_CONFIG;
3992
3993 return LD_WAIT_SPIN;
3994 } else if (curr->softirq_context) {

--- 826 unchanged lines hidden (view full) ---

4821 */
4822static void check_flags(unsigned long flags)
4823{
4824#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP)
4825 if (!debug_locks)
4826 return;
4827
4828 if (irqs_disabled_flags(flags)) {
4027 /*
4028 * Check if force_irqthreads will run us threaded.
4029 */
4030 if (curr->hardirq_threaded || curr->irq_config)
4031 return LD_WAIT_CONFIG;
4032
4033 return LD_WAIT_SPIN;
4034 } else if (curr->softirq_context) {

--- 826 unchanged lines hidden (view full) ---

4861 */
4862static void check_flags(unsigned long flags)
4863{
4864#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP)
4865 if (!debug_locks)
4866 return;
4867
4868 if (irqs_disabled_flags(flags)) {
4829 if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
4869 if (DEBUG_LOCKS_WARN_ON(lockdep_hardirqs_enabled())) {
4830 printk("possible reason: unannotated irqs-off.\n");
4831 }
4832 } else {
4870 printk("possible reason: unannotated irqs-off.\n");
4871 }
4872 } else {
4833 if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
4873 if (DEBUG_LOCKS_WARN_ON(!lockdep_hardirqs_enabled())) {
4834 printk("possible reason: unannotated irqs-on.\n");
4835 }
4836 }
4837
4838 /*
4839 * We dont accurately track softirq state in e.g.
4840 * hardirq contexts (such as on 4KSTACKS), so only
4841 * check if not in hardirq contexts:

--- 1004 unchanged lines hidden (view full) ---

5846 pr_warn("WARNING: suspicious RCU usage\n");
5847 print_kernel_ident();
5848 pr_warn("-----------------------------\n");
5849 pr_warn("%s:%d %s!\n", file, line, s);
5850 pr_warn("\nother info that might help us debug this:\n\n");
5851 pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
5852 !rcu_lockdep_current_cpu_online()
5853 ? "RCU used illegally from offline CPU!\n"
4874 printk("possible reason: unannotated irqs-on.\n");
4875 }
4876 }
4877
4878 /*
4879 * We dont accurately track softirq state in e.g.
4880 * hardirq contexts (such as on 4KSTACKS), so only
4881 * check if not in hardirq contexts:

--- 1004 unchanged lines hidden (view full) ---

5886 pr_warn("WARNING: suspicious RCU usage\n");
5887 print_kernel_ident();
5888 pr_warn("-----------------------------\n");
5889 pr_warn("%s:%d %s!\n", file, line, s);
5890 pr_warn("\nother info that might help us debug this:\n\n");
5891 pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
5892 !rcu_lockdep_current_cpu_online()
5893 ? "RCU used illegally from offline CPU!\n"
5854 : "",
5894 : !rcu_is_watching()
5895 ? "RCU used illegally from idle CPU!\n"
5896 : "",
5855 rcu_scheduler_active, debug_locks);
5856
5857 /*
5858 * If a CPU is in the RCU-free window in idle (ie: in the section
5859 * between rcu_idle_enter() and rcu_idle_exit(), then RCU
5860 * considers that CPU to be in an "extended quiescent state",
5861 * which means that RCU will be completely ignoring that CPU.
5862 * Therefore, rcu_read_lock() and friends have absolutely no

--- 20 unchanged lines hidden ---
5897 rcu_scheduler_active, debug_locks);
5898
5899 /*
5900 * If a CPU is in the RCU-free window in idle (ie: in the section
5901 * between rcu_idle_enter() and rcu_idle_exit(), then RCU
5902 * considers that CPU to be in an "extended quiescent state",
5903 * which means that RCU will be completely ignoring that CPU.
5904 * Therefore, rcu_read_lock() and friends have absolutely no

--- 20 unchanged lines hidden ---