Lines Matching refs:krcp

2871 	struct kfree_rcu_cpu *krcp;  member
2944 struct kfree_rcu_cpu *krcp; in krc_this_cpu_lock() local
2947 krcp = this_cpu_ptr(&krc); in krc_this_cpu_lock()
2948 raw_spin_lock(&krcp->lock); in krc_this_cpu_lock()
2950 return krcp; in krc_this_cpu_lock()
2954 krc_this_cpu_unlock(struct kfree_rcu_cpu *krcp, unsigned long flags) in krc_this_cpu_unlock() argument
2956 raw_spin_unlock_irqrestore(&krcp->lock, flags); in krc_this_cpu_unlock()
2960 get_cached_bnode(struct kfree_rcu_cpu *krcp) in get_cached_bnode() argument
2962 if (!krcp->nr_bkv_objs) in get_cached_bnode()
2965 WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs - 1); in get_cached_bnode()
2967 llist_del_first(&krcp->bkvcache); in get_cached_bnode()
2971 put_cached_bnode(struct kfree_rcu_cpu *krcp, in put_cached_bnode() argument
2975 if (krcp->nr_bkv_objs >= rcu_min_cached_objs) in put_cached_bnode()
2978 llist_add((struct llist_node *) bnode, &krcp->bkvcache); in put_cached_bnode()
2979 WRITE_ONCE(krcp->nr_bkv_objs, krcp->nr_bkv_objs + 1); in put_cached_bnode()
2984 drain_page_cache(struct kfree_rcu_cpu *krcp) in drain_page_cache() argument
2993 raw_spin_lock_irqsave(&krcp->lock, flags); in drain_page_cache()
2994 page_list = llist_del_all(&krcp->bkvcache); in drain_page_cache()
2995 WRITE_ONCE(krcp->nr_bkv_objs, 0); in drain_page_cache()
2996 raw_spin_unlock_irqrestore(&krcp->lock, flags); in drain_page_cache()
3007 kvfree_rcu_bulk(struct kfree_rcu_cpu *krcp, in kvfree_rcu_bulk() argument
3033 raw_spin_lock_irqsave(&krcp->lock, flags); in kvfree_rcu_bulk()
3034 if (put_cached_bnode(krcp, bnode)) in kvfree_rcu_bulk()
3036 raw_spin_unlock_irqrestore(&krcp->lock, flags); in kvfree_rcu_bulk()
3076 struct kfree_rcu_cpu *krcp; in kfree_rcu_work() local
3083 krcp = krwp->krcp; in kfree_rcu_work()
3085 raw_spin_lock_irqsave(&krcp->lock, flags); in kfree_rcu_work()
3094 raw_spin_unlock_irqrestore(&krcp->lock, flags); in kfree_rcu_work()
3100 kvfree_rcu_bulk(krcp, bnode, i); in kfree_rcu_work()
3115 need_offload_krc(struct kfree_rcu_cpu *krcp) in need_offload_krc() argument
3120 if (!list_empty(&krcp->bulk_head[i])) in need_offload_krc()
3123 return !!READ_ONCE(krcp->head); in need_offload_krc()
3138 static int krc_count(struct kfree_rcu_cpu *krcp) in krc_count() argument
3140 int sum = atomic_read(&krcp->head_count); in krc_count()
3144 sum += atomic_read(&krcp->bulk_count[i]); in krc_count()
3150 schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp) in schedule_delayed_monitor_work() argument
3154 delay = krc_count(krcp) >= KVFREE_BULK_MAX_ENTR ? 1:KFREE_DRAIN_JIFFIES; in schedule_delayed_monitor_work()
3155 if (delayed_work_pending(&krcp->monitor_work)) { in schedule_delayed_monitor_work()
3156 delay_left = krcp->monitor_work.timer.expires - jiffies; in schedule_delayed_monitor_work()
3158 mod_delayed_work(system_wq, &krcp->monitor_work, delay); in schedule_delayed_monitor_work()
3161 queue_delayed_work(system_wq, &krcp->monitor_work, delay); in schedule_delayed_monitor_work()
3165 kvfree_rcu_drain_ready(struct kfree_rcu_cpu *krcp) in kvfree_rcu_drain_ready() argument
3173 raw_spin_lock_irqsave(&krcp->lock, flags); in kvfree_rcu_drain_ready()
3177 list_for_each_entry_safe_reverse(bnode, n, &krcp->bulk_head[i], list) { in kvfree_rcu_drain_ready()
3181 atomic_sub(bnode->nr_records, &krcp->bulk_count[i]); in kvfree_rcu_drain_ready()
3186 if (krcp->head && poll_state_synchronize_rcu(krcp->head_gp_snap)) { in kvfree_rcu_drain_ready()
3187 head_ready = krcp->head; in kvfree_rcu_drain_ready()
3188 atomic_set(&krcp->head_count, 0); in kvfree_rcu_drain_ready()
3189 WRITE_ONCE(krcp->head, NULL); in kvfree_rcu_drain_ready()
3191 raw_spin_unlock_irqrestore(&krcp->lock, flags); in kvfree_rcu_drain_ready()
3195 kvfree_rcu_bulk(krcp, bnode, i); in kvfree_rcu_drain_ready()
3207 struct kfree_rcu_cpu *krcp = container_of(work, in kfree_rcu_monitor() local
3213 kvfree_rcu_drain_ready(krcp); in kfree_rcu_monitor()
3215 raw_spin_lock_irqsave(&krcp->lock, flags); in kfree_rcu_monitor()
3219 struct kfree_rcu_cpu_work *krwp = &(krcp->krw_arr[i]); in kfree_rcu_monitor()
3228 if (need_offload_krc(krcp)) { in kfree_rcu_monitor()
3233 atomic_set(&krcp->bulk_count[j], 0); in kfree_rcu_monitor()
3234 list_replace_init(&krcp->bulk_head[j], in kfree_rcu_monitor()
3242 krwp->head_free = krcp->head; in kfree_rcu_monitor()
3244 atomic_set(&krcp->head_count, 0); in kfree_rcu_monitor()
3245 WRITE_ONCE(krcp->head, NULL); in kfree_rcu_monitor()
3257 raw_spin_unlock_irqrestore(&krcp->lock, flags); in kfree_rcu_monitor()
3264 if (need_offload_krc(krcp)) in kfree_rcu_monitor()
3265 schedule_delayed_monitor_work(krcp); in kfree_rcu_monitor()
3271 struct kfree_rcu_cpu *krcp = in schedule_page_work_fn() local
3274 queue_delayed_work(system_highpri_wq, &krcp->page_cache_work, 0); in schedule_page_work_fn()
3281 struct kfree_rcu_cpu *krcp = in fill_page_cache_func() local
3289 nr_pages = atomic_read(&krcp->backoff_page_cache_fill) ? in fill_page_cache_func()
3292 for (i = READ_ONCE(krcp->nr_bkv_objs); i < nr_pages; i++) { in fill_page_cache_func()
3299 raw_spin_lock_irqsave(&krcp->lock, flags); in fill_page_cache_func()
3300 pushed = put_cached_bnode(krcp, bnode); in fill_page_cache_func()
3301 raw_spin_unlock_irqrestore(&krcp->lock, flags); in fill_page_cache_func()
3309 atomic_set(&krcp->work_in_progress, 0); in fill_page_cache_func()
3310 atomic_set(&krcp->backoff_page_cache_fill, 0); in fill_page_cache_func()
3314 run_page_cache_worker(struct kfree_rcu_cpu *krcp) in run_page_cache_worker() argument
3321 !atomic_xchg(&krcp->work_in_progress, 1)) { in run_page_cache_worker()
3322 if (atomic_read(&krcp->backoff_page_cache_fill)) { in run_page_cache_worker()
3324 &krcp->page_cache_work, in run_page_cache_worker()
3327 hrtimer_init(&krcp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in run_page_cache_worker()
3328 krcp->hrtimer.function = schedule_page_work_fn; in run_page_cache_worker()
3329 hrtimer_start(&krcp->hrtimer, 0, HRTIMER_MODE_REL); in run_page_cache_worker()
3341 add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp, in add_ptr_to_bulk_krc_lock() argument
3347 *krcp = krc_this_cpu_lock(flags); in add_ptr_to_bulk_krc_lock()
3348 if (unlikely(!(*krcp)->initialized)) in add_ptr_to_bulk_krc_lock()
3352 bnode = list_first_entry_or_null(&(*krcp)->bulk_head[idx], in add_ptr_to_bulk_krc_lock()
3357 bnode = get_cached_bnode(*krcp); in add_ptr_to_bulk_krc_lock()
3359 krc_this_cpu_unlock(*krcp, *flags); in add_ptr_to_bulk_krc_lock()
3374 raw_spin_lock_irqsave(&(*krcp)->lock, *flags); in add_ptr_to_bulk_krc_lock()
3382 list_add(&bnode->list, &(*krcp)->bulk_head[idx]); in add_ptr_to_bulk_krc_lock()
3388 atomic_inc(&(*krcp)->bulk_count[idx]); in add_ptr_to_bulk_krc_lock()
3408 struct kfree_rcu_cpu *krcp; in kvfree_call_rcu() local
3432 success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head); in kvfree_call_rcu()
3434 run_page_cache_worker(krcp); in kvfree_call_rcu()
3441 head->next = krcp->head; in kvfree_call_rcu()
3442 WRITE_ONCE(krcp->head, head); in kvfree_call_rcu()
3443 atomic_inc(&krcp->head_count); in kvfree_call_rcu()
3446 krcp->head_gp_snap = get_state_synchronize_rcu(); in kvfree_call_rcu()
3460 schedule_delayed_monitor_work(krcp); in kvfree_call_rcu()
3463 krc_this_cpu_unlock(krcp, flags); in kvfree_call_rcu()
3486 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); in kfree_rcu_shrink_count() local
3488 count += krc_count(krcp); in kfree_rcu_shrink_count()
3489 count += READ_ONCE(krcp->nr_bkv_objs); in kfree_rcu_shrink_count()
3490 atomic_set(&krcp->backoff_page_cache_fill, 1); in kfree_rcu_shrink_count()
3503 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); in kfree_rcu_shrink_scan() local
3505 count = krc_count(krcp); in kfree_rcu_shrink_scan()
3506 count += drain_page_cache(krcp); in kfree_rcu_shrink_scan()
3507 kfree_rcu_monitor(&krcp->monitor_work.work); in kfree_rcu_shrink_scan()
3531 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); in kfree_rcu_scheduler_running() local
3533 if (need_offload_krc(krcp)) in kfree_rcu_scheduler_running()
3534 schedule_delayed_monitor_work(krcp); in kfree_rcu_scheduler_running()
5025 struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu); in kfree_rcu_batch_init() local
5028 INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work); in kfree_rcu_batch_init()
5029 krcp->krw_arr[i].krcp = krcp; in kfree_rcu_batch_init()
5032 INIT_LIST_HEAD(&krcp->krw_arr[i].bulk_head_free[j]); in kfree_rcu_batch_init()
5036 INIT_LIST_HEAD(&krcp->bulk_head[i]); in kfree_rcu_batch_init()
5038 INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor); in kfree_rcu_batch_init()
5039 INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func); in kfree_rcu_batch_init()
5040 krcp->initialized = true; in kfree_rcu_batch_init()