Lines Matching refs:curr

1939 	struct task_struct *curr = current;  in print_circular_bug_header()  local
1950 curr->comm, task_pid_nr(curr)); in print_circular_bug_header()
2029 struct task_struct *curr = current; in print_circular_bug() local
2057 lockdep_print_held_locks(curr); in print_circular_bug()
2559 print_bad_irq_dependency(struct task_struct *curr, in print_bad_irq_dependency() argument
2580 curr->comm, task_pid_nr(curr), in print_bad_irq_dependency()
2582 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, in print_bad_irq_dependency()
2584 curr->softirqs_enabled); in print_bad_irq_dependency()
2613 lockdep_print_held_locks(curr); in print_bad_irq_dependency()
2788 static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, in check_irq_usage() argument
2865 print_bad_irq_dependency(curr, &this, &that, in check_irq_usage()
2876 static inline int check_irq_usage(struct task_struct *curr, in check_irq_usage() argument
2983 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, in print_deadlock_bug() argument
2997 curr->comm, task_pid_nr(curr)); in print_deadlock_bug()
3009 lockdep_print_held_locks(curr); in print_deadlock_bug()
3026 check_deadlock(struct task_struct *curr, struct held_lock *next) in check_deadlock() argument
3033 for (i = 0; i < curr->lockdep_depth; i++) { in check_deadlock()
3034 prev = curr->held_locks + i; in check_deadlock()
3062 print_deadlock_bug(curr, prev, next); in check_deadlock()
3091 check_prev_add(struct task_struct *curr, struct held_lock *prev, in check_prev_add() argument
3138 if (!check_irq_usage(curr, prev, next)) in check_prev_add()
3227 check_prevs_add(struct task_struct *curr, struct held_lock *next) in check_prevs_add() argument
3230 int depth = curr->lockdep_depth; in check_prevs_add()
3244 if (curr->held_locks[depth].irq_context != in check_prevs_add()
3245 curr->held_locks[depth-1].irq_context) in check_prevs_add()
3249 u16 distance = curr->lockdep_depth - depth + 1; in check_prevs_add()
3250 hlock = curr->held_locks + depth - 1; in check_prevs_add()
3253 int ret = check_prev_add(curr, hlock, next, distance, &trace); in check_prevs_add()
3276 if (curr->held_locks[depth].irq_context != in check_prevs_add()
3277 curr->held_locks[depth-1].irq_context) in check_prevs_add()
3337 #define for_each_chain_block(bucket, prev, curr) \ argument
3338 for ((prev) = -1, (curr) = chain_block_buckets[bucket]; \
3339 (curr) >= 0; \
3340 (prev) = (curr), (curr) = chain_block_next(curr))
3384 int prev, curr; in add_chain_block() local
3406 for_each_chain_block(0, prev, curr) { in add_chain_block()
3407 if (size >= chain_block_size(curr)) in add_chain_block()
3410 init_chain_block(offset, curr, 0, size); in add_chain_block()
3462 int bucket, curr, size; in alloc_chain_hlocks() local
3481 curr = chain_block_buckets[bucket]; in alloc_chain_hlocks()
3484 if (curr >= 0) { in alloc_chain_hlocks()
3485 del_chain_block(bucket, req, chain_block_next(curr)); in alloc_chain_hlocks()
3486 return curr; in alloc_chain_hlocks()
3489 curr = chain_block_buckets[0]; in alloc_chain_hlocks()
3496 if (curr >= 0) { in alloc_chain_hlocks()
3497 size = chain_block_size(curr); in alloc_chain_hlocks()
3499 del_chain_block(0, size, chain_block_next(curr)); in alloc_chain_hlocks()
3501 add_chain_block(curr + req, size - req); in alloc_chain_hlocks()
3502 return curr; in alloc_chain_hlocks()
3511 curr = chain_block_buckets[bucket]; in alloc_chain_hlocks()
3512 if (curr < 0) in alloc_chain_hlocks()
3515 del_chain_block(bucket, size, chain_block_next(curr)); in alloc_chain_hlocks()
3516 add_chain_block(curr + req, size - req); in alloc_chain_hlocks()
3517 return curr; in alloc_chain_hlocks()
3539 static inline int get_first_held_lock(struct task_struct *curr, in get_first_held_lock() argument
3545 for (i = curr->lockdep_depth - 1; i >= 0; i--) { in get_first_held_lock()
3546 hlock_curr = curr->held_locks + i; in get_first_held_lock()
3570 print_chain_keys_held_locks(struct task_struct *curr, struct held_lock *hlock_next) in print_chain_keys_held_locks() argument
3574 int depth = curr->lockdep_depth; in print_chain_keys_held_locks()
3575 int i = get_first_held_lock(curr, hlock_next); in print_chain_keys_held_locks()
3580 hlock = curr->held_locks + i; in print_chain_keys_held_locks()
3606 static void print_collision(struct task_struct *curr, in print_collision() argument
3619 print_chain_keys_held_locks(curr, hlock_next); in print_collision()
3635 static int check_no_collision(struct task_struct *curr, in check_no_collision() argument
3642 i = get_first_held_lock(curr, hlock); in check_no_collision()
3644 if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1))) { in check_no_collision()
3645 print_collision(curr, hlock, chain); in check_no_collision()
3650 id = hlock_id(&curr->held_locks[i]); in check_no_collision()
3653 print_collision(curr, hlock, chain); in check_no_collision()
3695 static inline int add_chain_cache(struct task_struct *curr, in add_chain_cache() argument
3722 i = get_first_held_lock(curr, hlock); in add_chain_cache()
3723 chain->depth = curr->lockdep_depth + 1 - i; in add_chain_cache()
3726 BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks)); in add_chain_cache()
3741 int lock_id = hlock_id(curr->held_locks + i); in add_chain_cache()
3777 static inline int lookup_chain_cache_add(struct task_struct *curr, in lookup_chain_cache_add() argument
3786 if (!check_no_collision(curr, hlock, chain)) in lookup_chain_cache_add()
3816 if (!add_chain_cache(curr, hlock, chain_key)) in lookup_chain_cache_add()
3822 static int validate_chain(struct task_struct *curr, in validate_chain() argument
3837 lookup_chain_cache_add(curr, hlock, chain_key)) { in validate_chain()
3856 int ret = check_deadlock(curr, hlock); in validate_chain()
3869 if (!check_prevs_add(curr, hlock)) in validate_chain()
3883 static inline int validate_chain(struct task_struct *curr, in validate_chain() argument
3897 static void check_chain_key(struct task_struct *curr) in check_chain_key() argument
3904 for (i = 0; i < curr->lockdep_depth; i++) { in check_chain_key()
3905 hlock = curr->held_locks + i; in check_chain_key()
3913 curr->lockdep_depth, i, in check_chain_key()
3932 if (chain_key != curr->curr_chain_key) { in check_chain_key()
3939 curr->lockdep_depth, i, in check_chain_key()
3941 (unsigned long long)curr->curr_chain_key); in check_chain_key()
3947 static int mark_lock(struct task_struct *curr, struct held_lock *this,
3968 print_usage_bug(struct task_struct *curr, struct held_lock *this, in print_usage_bug() argument
3984 curr->comm, task_pid_nr(curr), in print_usage_bug()
3986 lockdep_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, in print_usage_bug()
3988 lockdep_softirqs_enabled(curr)); in print_usage_bug()
3994 print_irqtrace_events(curr); in print_usage_bug()
3998 lockdep_print_held_locks(curr); in print_usage_bug()
4008 valid_state(struct task_struct *curr, struct held_lock *this, in valid_state() argument
4013 print_usage_bug(curr, this, bad_bit, new_bit); in valid_state()
4024 print_irq_inversion_bug(struct task_struct *curr, in print_irq_inversion_bug() argument
4042 curr->comm, task_pid_nr(curr)); in print_irq_inversion_bug()
4071 lockdep_print_held_locks(curr); in print_irq_inversion_bug()
4088 check_usage_forwards(struct task_struct *curr, struct held_lock *this, in check_usage_forwards() argument
4108 print_irq_inversion_bug(curr, &root, target_entry, in check_usage_forwards()
4111 print_irq_inversion_bug(curr, &root, target_entry, in check_usage_forwards()
4123 check_usage_backwards(struct task_struct *curr, struct held_lock *this, in check_usage_backwards() argument
4143 print_irq_inversion_bug(curr, &root, target_entry, in check_usage_backwards()
4146 print_irq_inversion_bug(curr, &root, target_entry, in check_usage_backwards()
4153 void print_irqtrace_events(struct task_struct *curr) in print_irqtrace_events() argument
4155 const struct irqtrace_events *trace = &curr->irqtrace; in print_irqtrace_events()
4205 mark_lock_irq(struct task_struct *curr, struct held_lock *this, in mark_lock_irq() argument
4216 if (!valid_state(curr, this, new_bit, excl_bit)) in mark_lock_irq()
4222 if (!read && !valid_state(curr, this, new_bit, in mark_lock_irq()
4236 if (!check_usage_backwards(curr, this, excl_bit)) in mark_lock_irq()
4243 if (!check_usage_forwards(curr, this, excl_bit)) in mark_lock_irq()
4257 mark_held_locks(struct task_struct *curr, enum lock_usage_bit base_bit) in mark_held_locks() argument
4262 for (i = 0; i < curr->lockdep_depth; i++) { in mark_held_locks()
4264 hlock = curr->held_locks + i; in mark_held_locks()
4274 if (!mark_lock(curr, hlock, hlock_bit)) in mark_held_locks()
4286 struct task_struct *curr = current; in __trace_hardirqs_on_caller() local
4292 if (!mark_held_locks(curr, LOCK_ENABLED_HARDIRQ)) in __trace_hardirqs_on_caller()
4299 if (curr->softirqs_enabled) in __trace_hardirqs_on_caller()
4300 mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ); in __trace_hardirqs_on_caller()
4543 mark_usage(struct task_struct *curr, struct held_lock *hlock, int check) in mark_usage() argument
4555 if (!mark_lock(curr, hlock, in mark_usage()
4558 if (curr->softirq_context) in mark_usage()
4559 if (!mark_lock(curr, hlock, in mark_usage()
4564 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ)) in mark_usage()
4566 if (curr->softirq_context) in mark_usage()
4567 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ)) in mark_usage()
4579 if (!mark_lock(curr, hlock, in mark_usage()
4582 if (curr->softirqs_enabled) in mark_usage()
4583 if (!mark_lock(curr, hlock, in mark_usage()
4587 if (!mark_lock(curr, hlock, in mark_usage()
4590 if (curr->softirqs_enabled) in mark_usage()
4591 if (!mark_lock(curr, hlock, in mark_usage()
4599 if (!mark_lock(curr, hlock, LOCK_USED)) in mark_usage()
4611 static int separate_irq_context(struct task_struct *curr, in separate_irq_context() argument
4614 unsigned int depth = curr->lockdep_depth; in separate_irq_context()
4622 prev_hlock = curr->held_locks + depth-1; in separate_irq_context()
4637 static int mark_lock(struct task_struct *curr, struct held_lock *this, in mark_lock() argument
4678 ret = mark_lock_irq(curr, this, new_bit); in mark_lock()
4692 print_irqtrace_events(curr); in mark_lock()
4699 static inline short task_wait_context(struct task_struct *curr) in task_wait_context() argument
4709 if (curr->hardirq_threaded || curr->irq_config) in task_wait_context()
4713 } else if (curr->softirq_context) { in task_wait_context()
4724 print_lock_invalid_wait_context(struct task_struct *curr, in print_lock_invalid_wait_context() argument
4740 pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr)); in print_lock_invalid_wait_context()
4745 curr_inner = task_wait_context(curr); in print_lock_invalid_wait_context()
4748 lockdep_print_held_locks(curr); in print_lock_invalid_wait_context()
4771 static int check_wait_context(struct task_struct *curr, struct held_lock *next) in check_wait_context() argument
4787 for (depth = curr->lockdep_depth - 1; depth >= 0; depth--) { in check_wait_context()
4788 struct held_lock *prev = curr->held_locks + depth; in check_wait_context()
4794 curr_inner = task_wait_context(curr); in check_wait_context()
4796 for (; depth < curr->lockdep_depth; depth++) { in check_wait_context()
4797 struct held_lock *prev = curr->held_locks + depth; in check_wait_context()
4821 return print_lock_invalid_wait_context(curr, next); in check_wait_context()
4829 mark_usage(struct task_struct *curr, struct held_lock *hlock, int check) in mark_usage() argument
4839 static inline int separate_irq_context(struct task_struct *curr, in separate_irq_context() argument
4845 static inline int check_wait_context(struct task_struct *curr, in check_wait_context() argument
4949 print_lock_nested_lock_not_held(struct task_struct *curr, in print_lock_nested_lock_not_held() argument
4963 pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr)); in print_lock_nested_lock_not_held()
4973 lockdep_print_held_locks(curr); in print_lock_nested_lock_not_held()
4994 struct task_struct *curr = current; in __lock_acquire() local
5034 depth = curr->lockdep_depth; in __lock_acquire()
5045 hlock = curr->held_locks + depth - 1; in __lock_acquire()
5063 hlock = curr->held_locks + depth; in __lock_acquire()
5074 hlock->irq_context = task_irq_context(curr); in __lock_acquire()
5087 if (check_wait_context(curr, hlock)) in __lock_acquire()
5091 if (!mark_usage(curr, hlock, check)) in __lock_acquire()
5110 chain_key = curr->curr_chain_key; in __lock_acquire()
5121 if (separate_irq_context(curr, hlock)) { in __lock_acquire()
5128 print_lock_nested_lock_not_held(curr, hlock); in __lock_acquire()
5137 if (!validate_chain(curr, hlock, chain_head, chain_key)) in __lock_acquire()
5144 curr->curr_chain_key = chain_key; in __lock_acquire()
5145 curr->lockdep_depth++; in __lock_acquire()
5146 check_chain_key(curr); in __lock_acquire()
5151 if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) { in __lock_acquire()
5155 curr->lockdep_depth, MAX_LOCK_DEPTH); in __lock_acquire()
5164 if (unlikely(curr->lockdep_depth > max_lockdep_depth)) in __lock_acquire()
5165 max_lockdep_depth = curr->lockdep_depth; in __lock_acquire()
5170 static void print_unlock_imbalance_bug(struct task_struct *curr, in print_unlock_imbalance_bug() argument
5185 curr->comm, task_pid_nr(curr)); in print_unlock_imbalance_bug()
5191 lockdep_print_held_locks(curr); in print_unlock_imbalance_bug()
5234 static struct held_lock *find_held_lock(struct task_struct *curr, in find_held_lock() argument
5242 hlock = curr->held_locks + i; in find_held_lock()
5269 static int reacquire_held_locks(struct task_struct *curr, unsigned int depth, in reacquire_held_locks() argument
5278 for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) { in reacquire_held_locks()
5306 struct task_struct *curr = current; in __lock_set_class() local
5315 depth = curr->lockdep_depth; in __lock_set_class()
5323 hlock = find_held_lock(curr, lock, depth, &i); in __lock_set_class()
5325 print_unlock_imbalance_bug(curr, lock, ip); in __lock_set_class()
5336 curr->lockdep_depth = i; in __lock_set_class()
5337 curr->curr_chain_key = hlock->prev_chain_key; in __lock_set_class()
5339 if (reacquire_held_locks(curr, depth, i, &merged)) in __lock_set_class()
5346 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged)) in __lock_set_class()
5353 struct task_struct *curr = current; in __lock_downgrade() local
5361 depth = curr->lockdep_depth; in __lock_downgrade()
5369 hlock = find_held_lock(curr, lock, depth, &i); in __lock_downgrade()
5371 print_unlock_imbalance_bug(curr, lock, ip); in __lock_downgrade()
5375 curr->lockdep_depth = i; in __lock_downgrade()
5376 curr->curr_chain_key = hlock->prev_chain_key; in __lock_downgrade()
5382 if (reacquire_held_locks(curr, depth, i, &merged)) in __lock_downgrade()
5393 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) in __lock_downgrade()
5407 struct task_struct *curr = current; in __lock_release() local
5415 depth = curr->lockdep_depth; in __lock_release()
5421 print_unlock_imbalance_bug(curr, lock, ip); in __lock_release()
5429 hlock = find_held_lock(curr, lock, depth, &i); in __lock_release()
5431 print_unlock_imbalance_bug(curr, lock, ip); in __lock_release()
5458 curr->lockdep_depth = i; in __lock_release()
5459 curr->curr_chain_key = hlock->prev_chain_key; in __lock_release()
5468 if (reacquire_held_locks(curr, depth, i + 1, &merged)) in __lock_release()
5476 DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - merged); in __lock_release()
5489 struct task_struct *curr = current; in __lock_is_held() local
5492 for (i = 0; i < curr->lockdep_depth; i++) { in __lock_is_held()
5493 struct held_lock *hlock = curr->held_locks + i; in __lock_is_held()
5509 struct task_struct *curr = current; in __lock_pin_lock() local
5515 for (i = 0; i < curr->lockdep_depth; i++) { in __lock_pin_lock()
5516 struct held_lock *hlock = curr->held_locks + i; in __lock_pin_lock()
5536 struct task_struct *curr = current; in __lock_repin_lock() local
5542 for (i = 0; i < curr->lockdep_depth; i++) { in __lock_repin_lock()
5543 struct held_lock *hlock = curr->held_locks + i; in __lock_repin_lock()
5556 struct task_struct *curr = current; in __lock_unpin_lock() local
5562 for (i = 0; i < curr->lockdep_depth; i++) { in __lock_unpin_lock()
5563 struct held_lock *hlock = curr->held_locks + i; in __lock_unpin_lock()
5889 static void print_lock_contention_bug(struct task_struct *curr, in print_lock_contention_bug() argument
5904 curr->comm, task_pid_nr(curr)); in print_lock_contention_bug()
5910 lockdep_print_held_locks(curr); in print_lock_contention_bug()
5919 struct task_struct *curr = current; in __lock_contended() local
5925 depth = curr->lockdep_depth; in __lock_contended()
5933 hlock = find_held_lock(curr, lock, depth, &i); in __lock_contended()
5935 print_lock_contention_bug(curr, lock, ip); in __lock_contended()
5960 struct task_struct *curr = current; in __lock_acquired() local
5967 depth = curr->lockdep_depth; in __lock_acquired()
5975 hlock = find_held_lock(curr, lock, depth, &i); in __lock_acquired()
5977 print_lock_contention_bug(curr, lock, _RET_IP_); in __lock_acquired()
6519 print_freed_lock_bug(struct task_struct *curr, const void *mem_from, in print_freed_lock_bug() argument
6533 curr->comm, task_pid_nr(curr), mem_from, mem_to-1); in print_freed_lock_bug()
6535 lockdep_print_held_locks(curr); in print_freed_lock_bug()
6555 struct task_struct *curr = current; in debug_check_no_locks_freed() local
6564 for (i = 0; i < curr->lockdep_depth; i++) { in debug_check_no_locks_freed()
6565 hlock = curr->held_locks + i; in debug_check_no_locks_freed()
6571 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock); in debug_check_no_locks_freed()
6646 struct task_struct *curr = current; in lockdep_sys_exit() local
6648 if (unlikely(curr->lockdep_depth)) { in lockdep_sys_exit()
6657 curr->comm, curr->pid); in lockdep_sys_exit()
6658 lockdep_print_held_locks(curr); in lockdep_sys_exit()
6670 struct task_struct *curr = current; in lockdep_rcu_suspicious() local
6710 lockdep_print_held_locks(curr); in lockdep_rcu_suspicious()