Lines Matching refs:table

35 	struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;  in mlx5_sf_dev_allocated()  local
37 return table && !xa_empty(&table->devices); in mlx5_sf_dev_allocated()
85 struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table; in mlx5_sf_dev_add() local
113 if (!table->max_sfs) { in mlx5_sf_dev_add()
119 sf_dev->bar_base_addr = table->base_address + (sf_index * table->sf_bar_length); in mlx5_sf_dev_add()
136 err = xa_insert(&table->devices, sf_index, sf_dev, GFP_KERNEL); in mlx5_sf_dev_add()
150 struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table; in mlx5_sf_dev_del() local
152 xa_erase(&table->devices, sf_index); in mlx5_sf_dev_del()
159 struct mlx5_sf_dev_table *table = container_of(nb, struct mlx5_sf_dev_table, nb); in mlx5_sf_dev_state_change_handler() local
166 max_functions = mlx5_sf_max_functions(table->dev); in mlx5_sf_dev_state_change_handler()
170 base_id = mlx5_sf_start_function_id(table->dev); in mlx5_sf_dev_state_change_handler()
175 mutex_lock(&table->table_lock); in mlx5_sf_dev_state_change_handler()
176 sf_dev = xa_load(&table->devices, sf_index); in mlx5_sf_dev_state_change_handler()
181 mlx5_sf_dev_del(table->dev, sf_dev, sf_index); in mlx5_sf_dev_state_change_handler()
185 mlx5_sf_dev_del(table->dev, sf_dev, sf_index); in mlx5_sf_dev_state_change_handler()
187 mlx5_core_err(table->dev, in mlx5_sf_dev_state_change_handler()
193 mlx5_sf_dev_add(table->dev, sf_index, event->function_id, in mlx5_sf_dev_state_change_handler()
199 mutex_unlock(&table->table_lock); in mlx5_sf_dev_state_change_handler()
203 static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table) in mlx5_sf_dev_vhca_arm_all() argument
205 struct mlx5_core_dev *dev = table->dev; in mlx5_sf_dev_vhca_arm_all()
226 struct mlx5_sf_dev_table *table = container_of(work, struct mlx5_sf_dev_table, work); in mlx5_sf_dev_add_active_work() local
228 struct mlx5_core_dev *dev = table->dev; in mlx5_sf_dev_add_active_work()
239 if (table->stop_active_wq) in mlx5_sf_dev_add_active_work()
252 mutex_lock(&table->table_lock); in mlx5_sf_dev_add_active_work()
254 if (!xa_load(&table->devices, i)) in mlx5_sf_dev_add_active_work()
267 mutex_unlock(&table->table_lock); in mlx5_sf_dev_add_active_work()
272 static int mlx5_sf_dev_queue_active_work(struct mlx5_sf_dev_table *table) in mlx5_sf_dev_queue_active_work() argument
274 if (MLX5_CAP_GEN(table->dev, eswitch_manager)) in mlx5_sf_dev_queue_active_work()
280 table->active_wq = create_singlethread_workqueue("mlx5_active_sf"); in mlx5_sf_dev_queue_active_work()
281 if (!table->active_wq) in mlx5_sf_dev_queue_active_work()
283 INIT_WORK(&table->work, &mlx5_sf_dev_add_active_work); in mlx5_sf_dev_queue_active_work()
284 queue_work(table->active_wq, &table->work); in mlx5_sf_dev_queue_active_work()
288 static void mlx5_sf_dev_destroy_active_work(struct mlx5_sf_dev_table *table) in mlx5_sf_dev_destroy_active_work() argument
290 if (table->active_wq) { in mlx5_sf_dev_destroy_active_work()
291 table->stop_active_wq = true; in mlx5_sf_dev_destroy_active_work()
292 destroy_workqueue(table->active_wq); in mlx5_sf_dev_destroy_active_work()
298 struct mlx5_sf_dev_table *table; in mlx5_sf_dev_table_create() local
305 table = kzalloc(sizeof(*table), GFP_KERNEL); in mlx5_sf_dev_table_create()
306 if (!table) { in mlx5_sf_dev_table_create()
311 table->nb.notifier_call = mlx5_sf_dev_state_change_handler; in mlx5_sf_dev_table_create()
312 table->dev = dev; in mlx5_sf_dev_table_create()
317 table->sf_bar_length = 1 << (MLX5_CAP_GEN(dev, log_min_sf_size) + 12); in mlx5_sf_dev_table_create()
318 table->base_address = pci_resource_start(dev->pdev, 2); in mlx5_sf_dev_table_create()
319 table->max_sfs = max_sfs; in mlx5_sf_dev_table_create()
320 xa_init(&table->devices); in mlx5_sf_dev_table_create()
321 mutex_init(&table->table_lock); in mlx5_sf_dev_table_create()
322 dev->priv.sf_dev_table = table; in mlx5_sf_dev_table_create()
324 err = mlx5_vhca_event_notifier_register(dev, &table->nb); in mlx5_sf_dev_table_create()
328 err = mlx5_sf_dev_queue_active_work(table); in mlx5_sf_dev_table_create()
332 err = mlx5_sf_dev_vhca_arm_all(table); in mlx5_sf_dev_table_create()
339 mlx5_sf_dev_destroy_active_work(table); in mlx5_sf_dev_table_create()
341 mlx5_vhca_event_notifier_unregister(dev, &table->nb); in mlx5_sf_dev_table_create()
343 table->max_sfs = 0; in mlx5_sf_dev_table_create()
344 kfree(table); in mlx5_sf_dev_table_create()
350 static void mlx5_sf_dev_destroy_all(struct mlx5_sf_dev_table *table) in mlx5_sf_dev_destroy_all() argument
355 xa_for_each(&table->devices, index, sf_dev) { in mlx5_sf_dev_destroy_all()
356 xa_erase(&table->devices, index); in mlx5_sf_dev_destroy_all()
357 mlx5_sf_dev_remove(table->dev, sf_dev); in mlx5_sf_dev_destroy_all()
363 struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table; in mlx5_sf_dev_table_destroy() local
365 if (!table) in mlx5_sf_dev_table_destroy()
368 mlx5_sf_dev_destroy_active_work(table); in mlx5_sf_dev_table_destroy()
369 mlx5_vhca_event_notifier_unregister(dev, &table->nb); in mlx5_sf_dev_table_destroy()
370 mutex_destroy(&table->table_lock); in mlx5_sf_dev_table_destroy()
375 mlx5_sf_dev_destroy_all(table); in mlx5_sf_dev_table_destroy()
377 WARN_ON(!xa_empty(&table->devices)); in mlx5_sf_dev_table_destroy()
378 kfree(table); in mlx5_sf_dev_table_destroy()