Lines Matching refs:ent

112 	struct mlx5_ib_dev *dev = async_create->ent->dev;  in mlx5_ib_create_mkey_cb()
125 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent);
147 static int push_mkey_locked(struct mlx5_cache_ent *ent, bool limit_pendings, in push_mkey_locked() argument
150 XA_STATE(xas, &ent->mkeys, 0); in push_mkey_locked()
154 (ent->reserved - ent->stored) > MAX_PENDING_REG_MR) in push_mkey_locked()
163 xas_set(&xas, ent->reserved); in push_mkey_locked()
166 if (to_store && ent->stored == ent->reserved) in push_mkey_locked()
171 ent->reserved++; in push_mkey_locked()
173 if (ent->stored != ent->reserved) in push_mkey_locked()
174 __xa_store(&ent->mkeys, in push_mkey_locked()
175 ent->stored, in push_mkey_locked()
178 ent->stored++; in push_mkey_locked()
179 queue_adjust_cache_locked(ent); in push_mkey_locked()
180 WRITE_ONCE(ent->dev->cache.last_add, in push_mkey_locked()
185 xa_unlock_irq(&ent->mkeys); in push_mkey_locked()
193 xa_lock_irq(&ent->mkeys); in push_mkey_locked()
195 xa_lock_irq(&ent->mkeys); in push_mkey_locked()
203 static int push_mkey(struct mlx5_cache_ent *ent, bool limit_pendings, in push_mkey() argument
208 xa_lock_irq(&ent->mkeys); in push_mkey()
209 ret = push_mkey_locked(ent, limit_pendings, to_store); in push_mkey()
210 xa_unlock_irq(&ent->mkeys); in push_mkey()
214 static void undo_push_reserve_mkey(struct mlx5_cache_ent *ent) in undo_push_reserve_mkey() argument
218 ent->reserved--; in undo_push_reserve_mkey()
219 old = __xa_erase(&ent->mkeys, ent->reserved); in undo_push_reserve_mkey()
223 static void push_to_reserved(struct mlx5_cache_ent *ent, u32 mkey) in push_to_reserved() argument
227 old = __xa_store(&ent->mkeys, ent->stored, xa_mk_value(mkey), 0); in push_to_reserved()
229 ent->stored++; in push_to_reserved()
232 static u32 pop_stored_mkey(struct mlx5_cache_ent *ent) in pop_stored_mkey() argument
236 ent->stored--; in pop_stored_mkey()
237 ent->reserved--; in pop_stored_mkey()
239 if (ent->stored == ent->reserved) { in pop_stored_mkey()
240 xa_mkey = __xa_erase(&ent->mkeys, ent->stored); in pop_stored_mkey()
245 xa_mkey = __xa_store(&ent->mkeys, ent->stored, XA_ZERO_ENTRY, in pop_stored_mkey()
248 old = __xa_erase(&ent->mkeys, ent->reserved); in pop_stored_mkey()
257 struct mlx5_cache_ent *ent = mkey_out->ent; in create_mkey_callback() local
258 struct mlx5_ib_dev *dev = ent->dev; in create_mkey_callback()
264 xa_lock_irqsave(&ent->mkeys, flags); in create_mkey_callback()
265 undo_push_reserve_mkey(ent); in create_mkey_callback()
267 xa_unlock_irqrestore(&ent->mkeys, flags); in create_mkey_callback()
276 xa_lock_irqsave(&ent->mkeys, flags); in create_mkey_callback()
277 push_to_reserved(ent, mkey_out->mkey); in create_mkey_callback()
279 queue_adjust_cache_locked(ent); in create_mkey_callback()
280 xa_unlock_irqrestore(&ent->mkeys, flags); in create_mkey_callback()
303 static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc) in set_cache_mkc() argument
305 set_mkc_access_pd_addr_fields(mkc, ent->rb_key.access_flags, 0, in set_cache_mkc()
306 ent->dev->umrc.pd); in set_cache_mkc()
309 MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3); in set_cache_mkc()
311 (ent->rb_key.access_mode >> 2) & 0x7); in set_cache_mkc()
312 MLX5_SET(mkc, mkc, ma_translation_mode, !!ent->rb_key.ats); in set_cache_mkc()
315 get_mkc_octo_size(ent->rb_key.access_mode, in set_cache_mkc()
316 ent->rb_key.ndescs)); in set_cache_mkc()
321 static int add_keys(struct mlx5_cache_ent *ent, unsigned int num) in add_keys() argument
335 set_cache_mkc(ent, mkc); in add_keys()
336 async_create->ent = ent; in add_keys()
338 err = push_mkey(ent, true, NULL); in add_keys()
344 mlx5_ib_warn(ent->dev, "create mkey failed %d\n", err); in add_keys()
352 xa_lock_irq(&ent->mkeys); in add_keys()
353 undo_push_reserve_mkey(ent); in add_keys()
354 xa_unlock_irq(&ent->mkeys); in add_keys()
361 static int create_cache_mkey(struct mlx5_cache_ent *ent, u32 *mkey) in create_cache_mkey() argument
372 set_cache_mkc(ent, mkc); in create_cache_mkey()
374 err = mlx5_core_create_mkey(ent->dev->mdev, mkey, in, inlen); in create_cache_mkey()
378 WRITE_ONCE(ent->dev->cache.last_add, jiffies); in create_cache_mkey()
384 static void remove_cache_mr_locked(struct mlx5_cache_ent *ent) in remove_cache_mr_locked() argument
388 lockdep_assert_held(&ent->mkeys.xa_lock); in remove_cache_mr_locked()
389 if (!ent->stored) in remove_cache_mr_locked()
391 mkey = pop_stored_mkey(ent); in remove_cache_mr_locked()
392 xa_unlock_irq(&ent->mkeys); in remove_cache_mr_locked()
393 mlx5_core_destroy_mkey(ent->dev->mdev, mkey); in remove_cache_mr_locked()
394 xa_lock_irq(&ent->mkeys); in remove_cache_mr_locked()
397 static int resize_available_mrs(struct mlx5_cache_ent *ent, unsigned int target, in resize_available_mrs() argument
399 __acquires(&ent->mkeys) __releases(&ent->mkeys) in resize_available_mrs()
403 lockdep_assert_held(&ent->mkeys.xa_lock); in resize_available_mrs()
407 target = ent->limit * 2; in resize_available_mrs()
408 if (target == ent->reserved) in resize_available_mrs()
410 if (target > ent->reserved) { in resize_available_mrs()
411 u32 todo = target - ent->reserved; in resize_available_mrs()
413 xa_unlock_irq(&ent->mkeys); in resize_available_mrs()
414 err = add_keys(ent, todo); in resize_available_mrs()
417 xa_lock_irq(&ent->mkeys); in resize_available_mrs()
424 remove_cache_mr_locked(ent); in resize_available_mrs()
432 struct mlx5_cache_ent *ent = filp->private_data; in size_write() local
445 xa_lock_irq(&ent->mkeys); in size_write()
446 if (target < ent->in_use) { in size_write()
450 target = target - ent->in_use; in size_write()
451 if (target < ent->limit || target > ent->limit*2) { in size_write()
455 err = resize_available_mrs(ent, target, false); in size_write()
458 xa_unlock_irq(&ent->mkeys); in size_write()
463 xa_unlock_irq(&ent->mkeys); in size_write()
470 struct mlx5_cache_ent *ent = filp->private_data; in size_read() local
474 err = snprintf(lbuf, sizeof(lbuf), "%ld\n", ent->stored + ent->in_use); in size_read()
491 struct mlx5_cache_ent *ent = filp->private_data; in limit_write() local
503 xa_lock_irq(&ent->mkeys); in limit_write()
504 ent->limit = var; in limit_write()
505 err = resize_available_mrs(ent, 0, true); in limit_write()
506 xa_unlock_irq(&ent->mkeys); in limit_write()
515 struct mlx5_cache_ent *ent = filp->private_data; in limit_read() local
519 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit); in limit_read()
535 struct mlx5_cache_ent *ent; in someone_adding() local
541 ent = rb_entry(node, struct mlx5_cache_ent, node); in someone_adding()
542 xa_lock_irq(&ent->mkeys); in someone_adding()
543 ret = ent->stored < ent->limit; in someone_adding()
544 xa_unlock_irq(&ent->mkeys); in someone_adding()
559 static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent) in queue_adjust_cache_locked() argument
561 lockdep_assert_held(&ent->mkeys.xa_lock); in queue_adjust_cache_locked()
563 if (ent->disabled || READ_ONCE(ent->dev->fill_delay) || ent->is_tmp) in queue_adjust_cache_locked()
565 if (ent->stored < ent->limit) { in queue_adjust_cache_locked()
566 ent->fill_to_high_water = true; in queue_adjust_cache_locked()
567 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0); in queue_adjust_cache_locked()
568 } else if (ent->fill_to_high_water && in queue_adjust_cache_locked()
569 ent->reserved < 2 * ent->limit) { in queue_adjust_cache_locked()
574 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0); in queue_adjust_cache_locked()
575 } else if (ent->stored == 2 * ent->limit) { in queue_adjust_cache_locked()
576 ent->fill_to_high_water = false; in queue_adjust_cache_locked()
577 } else if (ent->stored > 2 * ent->limit) { in queue_adjust_cache_locked()
579 ent->fill_to_high_water = false; in queue_adjust_cache_locked()
580 if (ent->stored != ent->reserved) in queue_adjust_cache_locked()
581 queue_delayed_work(ent->dev->cache.wq, &ent->dwork, in queue_adjust_cache_locked()
584 mod_delayed_work(ent->dev->cache.wq, &ent->dwork, 0); in queue_adjust_cache_locked()
588 static void __cache_work_func(struct mlx5_cache_ent *ent) in __cache_work_func() argument
590 struct mlx5_ib_dev *dev = ent->dev; in __cache_work_func()
594 xa_lock_irq(&ent->mkeys); in __cache_work_func()
595 if (ent->disabled) in __cache_work_func()
598 if (ent->fill_to_high_water && ent->reserved < 2 * ent->limit && in __cache_work_func()
600 xa_unlock_irq(&ent->mkeys); in __cache_work_func()
601 err = add_keys(ent, 1); in __cache_work_func()
602 xa_lock_irq(&ent->mkeys); in __cache_work_func()
603 if (ent->disabled) in __cache_work_func()
616 queue_delayed_work(cache->wq, &ent->dwork, in __cache_work_func()
620 } else if (ent->stored > 2 * ent->limit) { in __cache_work_func()
635 xa_unlock_irq(&ent->mkeys); in __cache_work_func()
639 xa_lock_irq(&ent->mkeys); in __cache_work_func()
640 if (ent->disabled) in __cache_work_func()
643 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); in __cache_work_func()
646 remove_cache_mr_locked(ent); in __cache_work_func()
647 queue_adjust_cache_locked(ent); in __cache_work_func()
650 xa_unlock_irq(&ent->mkeys); in __cache_work_func()
655 struct mlx5_cache_ent *ent; in delayed_cache_work_func() local
657 ent = container_of(work, struct mlx5_cache_ent, dwork.work); in delayed_cache_work_func()
658 __cache_work_func(ent); in delayed_cache_work_func()
687 struct mlx5_cache_ent *ent) in mlx5_cache_ent_insert() argument
697 cmp = cache_ent_key_cmp(cur->rb_key, ent->rb_key); in mlx5_cache_ent_insert()
707 rb_link_node(&ent->node, parent, new); in mlx5_cache_ent_insert()
708 rb_insert_color(&ent->node, &cache->rb_root); in mlx5_cache_ent_insert()
755 struct mlx5_cache_ent *ent, in _mlx5_mr_cache_alloc() argument
765 xa_lock_irq(&ent->mkeys); in _mlx5_mr_cache_alloc()
766 ent->in_use++; in _mlx5_mr_cache_alloc()
768 if (!ent->stored) { in _mlx5_mr_cache_alloc()
769 queue_adjust_cache_locked(ent); in _mlx5_mr_cache_alloc()
770 ent->miss++; in _mlx5_mr_cache_alloc()
771 xa_unlock_irq(&ent->mkeys); in _mlx5_mr_cache_alloc()
772 err = create_cache_mkey(ent, &mr->mmkey.key); in _mlx5_mr_cache_alloc()
774 xa_lock_irq(&ent->mkeys); in _mlx5_mr_cache_alloc()
775 ent->in_use--; in _mlx5_mr_cache_alloc()
776 xa_unlock_irq(&ent->mkeys); in _mlx5_mr_cache_alloc()
781 mr->mmkey.key = pop_stored_mkey(ent); in _mlx5_mr_cache_alloc()
782 queue_adjust_cache_locked(ent); in _mlx5_mr_cache_alloc()
783 xa_unlock_irq(&ent->mkeys); in _mlx5_mr_cache_alloc()
785 mr->mmkey.cache_ent = ent; in _mlx5_mr_cache_alloc()
824 struct mlx5_cache_ent *ent = mkey_cache_ent_from_rb_key(dev, rb_key); in mlx5_mr_cache_alloc() local
826 if (!ent) in mlx5_mr_cache_alloc()
829 return _mlx5_mr_cache_alloc(dev, ent, access_flags); in mlx5_mr_cache_alloc()
832 static void clean_keys(struct mlx5_ib_dev *dev, struct mlx5_cache_ent *ent) in clean_keys() argument
836 cancel_delayed_work(&ent->dwork); in clean_keys()
837 xa_lock_irq(&ent->mkeys); in clean_keys()
838 while (ent->stored) { in clean_keys()
839 mkey = pop_stored_mkey(ent); in clean_keys()
840 xa_unlock_irq(&ent->mkeys); in clean_keys()
842 xa_lock_irq(&ent->mkeys); in clean_keys()
844 xa_unlock_irq(&ent->mkeys); in clean_keys()
857 struct mlx5_cache_ent *ent) in mlx5_mkey_cache_debugfs_add_ent() argument
859 int order = order_base_2(ent->rb_key.ndescs); in mlx5_mkey_cache_debugfs_add_ent()
865 if (ent->rb_key.access_mode == MLX5_MKC_ACCESS_MODE_KSM) in mlx5_mkey_cache_debugfs_add_ent()
868 sprintf(ent->name, "%d", order); in mlx5_mkey_cache_debugfs_add_ent()
869 dir = debugfs_create_dir(ent->name, dev->cache.fs_root); in mlx5_mkey_cache_debugfs_add_ent()
870 debugfs_create_file("size", 0600, dir, ent, &size_fops); in mlx5_mkey_cache_debugfs_add_ent()
871 debugfs_create_file("limit", 0600, dir, ent, &limit_fops); in mlx5_mkey_cache_debugfs_add_ent()
872 debugfs_create_ulong("cur", 0400, dir, &ent->stored); in mlx5_mkey_cache_debugfs_add_ent()
873 debugfs_create_u32("miss", 0600, dir, &ent->miss); in mlx5_mkey_cache_debugfs_add_ent()
899 struct mlx5_cache_ent *ent; in mlx5r_cache_create_ent_locked() local
903 ent = kzalloc(sizeof(*ent), GFP_KERNEL); in mlx5r_cache_create_ent_locked()
904 if (!ent) in mlx5r_cache_create_ent_locked()
907 xa_init_flags(&ent->mkeys, XA_FLAGS_LOCK_IRQ); in mlx5r_cache_create_ent_locked()
908 ent->rb_key = rb_key; in mlx5r_cache_create_ent_locked()
909 ent->dev = dev; in mlx5r_cache_create_ent_locked()
910 ent->is_tmp = !persistent_entry; in mlx5r_cache_create_ent_locked()
912 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); in mlx5r_cache_create_ent_locked()
914 ret = mlx5_cache_ent_insert(&dev->cache, ent); in mlx5r_cache_create_ent_locked()
916 kfree(ent); in mlx5r_cache_create_ent_locked()
929 ent->limit = dev->mdev->profile.mr_cache[order].limit; in mlx5r_cache_create_ent_locked()
931 ent->limit = 0; in mlx5r_cache_create_ent_locked()
933 mlx5_mkey_cache_debugfs_add_ent(dev, ent); in mlx5r_cache_create_ent_locked()
935 mod_delayed_work(ent->dev->cache.wq, in mlx5r_cache_create_ent_locked()
936 &ent->dev->cache.remove_ent_dwork, in mlx5r_cache_create_ent_locked()
940 return ent; in mlx5r_cache_create_ent_locked()
946 struct mlx5_cache_ent *ent; in remove_ent_work_func() local
954 ent = rb_entry(cur, struct mlx5_cache_ent, node); in remove_ent_work_func()
958 xa_lock_irq(&ent->mkeys); in remove_ent_work_func()
959 if (!ent->is_tmp) { in remove_ent_work_func()
960 xa_unlock_irq(&ent->mkeys); in remove_ent_work_func()
964 xa_unlock_irq(&ent->mkeys); in remove_ent_work_func()
966 clean_keys(ent->dev, ent); in remove_ent_work_func()
979 struct mlx5_cache_ent *ent; in mlx5_mkey_cache_init() local
1000 ent = mlx5r_cache_create_ent_locked(dev, rb_key, true); in mlx5_mkey_cache_init()
1001 if (IS_ERR(ent)) { in mlx5_mkey_cache_init()
1002 ret = PTR_ERR(ent); in mlx5_mkey_cache_init()
1013 ent = rb_entry(node, struct mlx5_cache_ent, node); in mlx5_mkey_cache_init()
1014 xa_lock_irq(&ent->mkeys); in mlx5_mkey_cache_init()
1015 queue_adjust_cache_locked(ent); in mlx5_mkey_cache_init()
1016 xa_unlock_irq(&ent->mkeys); in mlx5_mkey_cache_init()
1031 struct mlx5_cache_ent *ent; in mlx5_mkey_cache_cleanup() local
1040 ent = rb_entry(node, struct mlx5_cache_ent, node); in mlx5_mkey_cache_cleanup()
1041 xa_lock_irq(&ent->mkeys); in mlx5_mkey_cache_cleanup()
1042 ent->disabled = true; in mlx5_mkey_cache_cleanup()
1043 xa_unlock_irq(&ent->mkeys); in mlx5_mkey_cache_cleanup()
1044 cancel_delayed_work(&ent->dwork); in mlx5_mkey_cache_cleanup()
1061 ent = rb_entry(node, struct mlx5_cache_ent, node); in mlx5_mkey_cache_cleanup()
1063 clean_keys(dev, ent); in mlx5_mkey_cache_cleanup()
1064 rb_erase(&ent->node, root); in mlx5_mkey_cache_cleanup()
1065 kfree(ent); in mlx5_mkey_cache_cleanup()
1168 struct mlx5_cache_ent *ent; in alloc_cacheable_mr() local
1183 ent = mkey_cache_ent_from_rb_key(dev, rb_key); in alloc_cacheable_mr()
1188 if (!ent) { in alloc_cacheable_mr()
1198 mr = _mlx5_mr_cache_alloc(dev, ent, access_flags); in alloc_cacheable_mr()
1834 struct mlx5_cache_ent *ent; in cache_ent_find_and_store() local
1844 ent = mkey_cache_ent_from_rb_key(dev, mr->mmkey.rb_key); in cache_ent_find_and_store()
1845 if (ent) { in cache_ent_find_and_store()
1846 if (ent->rb_key.ndescs == mr->mmkey.rb_key.ndescs) { in cache_ent_find_and_store()
1847 if (ent->disabled) { in cache_ent_find_and_store()
1851 mr->mmkey.cache_ent = ent; in cache_ent_find_and_store()
1858 ent = mlx5r_cache_create_ent_locked(dev, mr->mmkey.rb_key, false); in cache_ent_find_and_store()
1860 if (IS_ERR(ent)) in cache_ent_find_and_store()
1861 return PTR_ERR(ent); in cache_ent_find_and_store()
1863 mr->mmkey.cache_ent = ent; in cache_ent_find_and_store()