main.c (bfc5d839184f53cc16d551873f9254f2d4d493be) | main.c (df097a278c7592873d88571a8c78f987f6ae511b) |
---|---|
1/* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: --- 68 unchanged lines hidden (view full) --- 77MODULE_LICENSE("Dual BSD/GPL"); 78 79static char mlx5_version[] = 80 DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v" 81 DRIVER_VERSION "\n"; 82 83struct mlx5_ib_event_work { 84 struct work_struct work; | 1/* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: --- 68 unchanged lines hidden (view full) --- 77MODULE_LICENSE("Dual BSD/GPL"); 78 79static char mlx5_version[] = 80 DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v" 81 DRIVER_VERSION "\n"; 82 83struct mlx5_ib_event_work { 84 struct work_struct work; |
85 struct mlx5_core_dev *dev; 86 void *context; | 85 union { 86 struct mlx5_ib_dev *dev; 87 struct mlx5_ib_multiport_info *mpi; 88 }; 89 bool is_slave; |
87 enum mlx5_dev_event event; | 90 enum mlx5_dev_event event; |
88 unsigned long param; | 91 void *param; |
89}; 90 91enum { 92 MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3, 93}; 94 95static struct workqueue_struct *mlx5_ib_event_wq; 96static LIST_HEAD(mlx5_ib_unaffiliated_port_list); --- 3611 unchanged lines hidden (view full) --- 3708} 3709 3710static struct mlx5_ib_flow_handler * 3711_create_raw_flow_rule(struct mlx5_ib_dev *dev, 3712 struct mlx5_ib_flow_prio *ft_prio, 3713 struct mlx5_flow_destination *dst, 3714 struct mlx5_ib_flow_matcher *fs_matcher, 3715 struct mlx5_flow_act *flow_act, | 92}; 93 94enum { 95 MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3, 96}; 97 98static struct workqueue_struct *mlx5_ib_event_wq; 99static LIST_HEAD(mlx5_ib_unaffiliated_port_list); --- 3611 unchanged lines hidden (view full) --- 3711} 3712 3713static struct mlx5_ib_flow_handler * 3714_create_raw_flow_rule(struct mlx5_ib_dev *dev, 3715 struct mlx5_ib_flow_prio *ft_prio, 3716 struct mlx5_flow_destination *dst, 3717 struct mlx5_ib_flow_matcher *fs_matcher, 3718 struct mlx5_flow_act *flow_act, |
3716 void *cmd_in, int inlen, 3717 int dst_num) | 3719 void *cmd_in, int inlen) |
3718{ 3719 struct mlx5_ib_flow_handler *handler; 3720 struct mlx5_flow_spec *spec; 3721 struct mlx5_flow_table *ft = ft_prio->flow_table; 3722 int err = 0; 3723 3724 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 3725 handler = kzalloc(sizeof(*handler), GFP_KERNEL); --- 5 unchanged lines hidden (view full) --- 3731 INIT_LIST_HEAD(&handler->list); 3732 3733 memcpy(spec->match_value, cmd_in, inlen); 3734 memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params, 3735 fs_matcher->mask_len); 3736 spec->match_criteria_enable = fs_matcher->match_criteria_enable; 3737 3738 handler->rule = mlx5_add_flow_rules(ft, spec, | 3720{ 3721 struct mlx5_ib_flow_handler *handler; 3722 struct mlx5_flow_spec *spec; 3723 struct mlx5_flow_table *ft = ft_prio->flow_table; 3724 int err = 0; 3725 3726 spec = kvzalloc(sizeof(*spec), GFP_KERNEL); 3727 handler = kzalloc(sizeof(*handler), GFP_KERNEL); --- 5 unchanged lines hidden (view full) --- 3733 INIT_LIST_HEAD(&handler->list); 3734 3735 memcpy(spec->match_value, cmd_in, inlen); 3736 memcpy(spec->match_criteria, fs_matcher->matcher_mask.match_params, 3737 fs_matcher->mask_len); 3738 spec->match_criteria_enable = fs_matcher->match_criteria_enable; 3739 3740 handler->rule = mlx5_add_flow_rules(ft, spec, |
3739 flow_act, dst, dst_num); | 3741 flow_act, dst, 1); |
3740 3741 if (IS_ERR(handler->rule)) { 3742 err = PTR_ERR(handler->rule); 3743 goto free; 3744 } 3745 3746 ft_prio->refcount++; 3747 handler->prio = ft_prio; --- 46 unchanged lines hidden (view full) --- 3794 3795 return false; 3796} 3797 3798struct mlx5_ib_flow_handler * 3799mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev, 3800 struct mlx5_ib_flow_matcher *fs_matcher, 3801 struct mlx5_flow_act *flow_act, | 3742 3743 if (IS_ERR(handler->rule)) { 3744 err = PTR_ERR(handler->rule); 3745 goto free; 3746 } 3747 3748 ft_prio->refcount++; 3749 handler->prio = ft_prio; --- 46 unchanged lines hidden (view full) --- 3796 3797 return false; 3798} 3799 3800struct mlx5_ib_flow_handler * 3801mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev, 3802 struct mlx5_ib_flow_matcher *fs_matcher, 3803 struct mlx5_flow_act *flow_act, |
3802 u32 counter_id, | |
3803 void *cmd_in, int inlen, int dest_id, 3804 int dest_type) 3805{ 3806 struct mlx5_flow_destination *dst; 3807 struct mlx5_ib_flow_prio *ft_prio; 3808 struct mlx5_ib_flow_handler *handler; | 3804 void *cmd_in, int inlen, int dest_id, 3805 int dest_type) 3806{ 3807 struct mlx5_flow_destination *dst; 3808 struct mlx5_ib_flow_prio *ft_prio; 3809 struct mlx5_ib_flow_handler *handler; |
3809 int dst_num = 0; | |
3810 bool mcast; 3811 int err; 3812 3813 if (fs_matcher->flow_type != MLX5_IB_FLOW_TYPE_NORMAL) 3814 return ERR_PTR(-EOPNOTSUPP); 3815 3816 if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO) 3817 return ERR_PTR(-ENOMEM); 3818 | 3810 bool mcast; 3811 int err; 3812 3813 if (fs_matcher->flow_type != MLX5_IB_FLOW_TYPE_NORMAL) 3814 return ERR_PTR(-EOPNOTSUPP); 3815 3816 if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO) 3817 return ERR_PTR(-ENOMEM); 3818 |
3819 dst = kzalloc(sizeof(*dst) * 2, GFP_KERNEL); | 3819 dst = kzalloc(sizeof(*dst), GFP_KERNEL); |
3820 if (!dst) 3821 return ERR_PTR(-ENOMEM); 3822 3823 mcast = raw_fs_is_multicast(fs_matcher, cmd_in); 3824 mutex_lock(&dev->flow_db->lock); 3825 3826 ft_prio = _get_flow_table(dev, fs_matcher, mcast); 3827 if (IS_ERR(ft_prio)) { 3828 err = PTR_ERR(ft_prio); 3829 goto unlock; 3830 } 3831 3832 if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) { | 3820 if (!dst) 3821 return ERR_PTR(-ENOMEM); 3822 3823 mcast = raw_fs_is_multicast(fs_matcher, cmd_in); 3824 mutex_lock(&dev->flow_db->lock); 3825 3826 ft_prio = _get_flow_table(dev, fs_matcher, mcast); 3827 if (IS_ERR(ft_prio)) { 3828 err = PTR_ERR(ft_prio); 3829 goto unlock; 3830 } 3831 3832 if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) { |
3833 dst[dst_num].type = dest_type; 3834 dst[dst_num].tir_num = dest_id; | 3833 dst->type = dest_type; 3834 dst->tir_num = dest_id; |
3835 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 3836 } else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) { | 3835 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 3836 } else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) { |
3837 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM; 3838 dst[dst_num].ft_num = dest_id; | 3837 dst->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM; 3838 dst->ft_num = dest_id; |
3839 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 3840 } else { | 3839 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 3840 } else { |
3841 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_PORT; | 3841 dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT; |
3842 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW; 3843 } 3844 | 3842 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW; 3843 } 3844 |
3845 dst_num++; 3846 3847 if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 3848 dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; 3849 dst[dst_num].counter_id = counter_id; 3850 dst_num++; 3851 } 3852 | |
3853 handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, flow_act, | 3845 handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, flow_act, |
3854 cmd_in, inlen, dst_num); | 3846 cmd_in, inlen); |
3855 3856 if (IS_ERR(handler)) { 3857 err = PTR_ERR(handler); 3858 goto destroy_ft; 3859 } 3860 3861 mutex_unlock(&dev->flow_db->lock); 3862 atomic_inc(&fs_matcher->usecnt); --- 383 unchanged lines hidden (view full) --- 4246 4247static void mlx5_ib_handle_event(struct work_struct *_work) 4248{ 4249 struct mlx5_ib_event_work *work = 4250 container_of(_work, struct mlx5_ib_event_work, work); 4251 struct mlx5_ib_dev *ibdev; 4252 struct ib_event ibev; 4253 bool fatal = false; | 3847 3848 if (IS_ERR(handler)) { 3849 err = PTR_ERR(handler); 3850 goto destroy_ft; 3851 } 3852 3853 mutex_unlock(&dev->flow_db->lock); 3854 atomic_inc(&fs_matcher->usecnt); --- 383 unchanged lines hidden (view full) --- 4238 4239static void mlx5_ib_handle_event(struct work_struct *_work) 4240{ 4241 struct mlx5_ib_event_work *work = 4242 container_of(_work, struct mlx5_ib_event_work, work); 4243 struct mlx5_ib_dev *ibdev; 4244 struct ib_event ibev; 4245 bool fatal = false; |
4254 u8 port = (u8)work->param; | 4246 u8 port = (u8)(unsigned long)work->param; |
4255 | 4247 |
4256 if (mlx5_core_is_mp_slave(work->dev)) { 4257 ibdev = mlx5_ib_get_ibdev_from_mpi(work->context); | 4248 if (work->is_slave) { 4249 ibdev = mlx5_ib_get_ibdev_from_mpi(work->mpi); |
4258 if (!ibdev) 4259 goto out; 4260 } else { | 4250 if (!ibdev) 4251 goto out; 4252 } else { |
4261 ibdev = work->context; | 4253 ibdev = work->dev; |
4262 } 4263 4264 switch (work->event) { 4265 case MLX5_DEV_EVENT_SYS_ERROR: 4266 ibev.event = IB_EVENT_DEVICE_FATAL; 4267 mlx5_ib_handle_internal_error(ibdev); 4268 fatal = true; 4269 break; | 4254 } 4255 4256 switch (work->event) { 4257 case MLX5_DEV_EVENT_SYS_ERROR: 4258 ibev.event = IB_EVENT_DEVICE_FATAL; 4259 mlx5_ib_handle_internal_error(ibdev); 4260 fatal = true; 4261 break; |
4270 | |
4271 case MLX5_DEV_EVENT_PORT_UP: 4272 case MLX5_DEV_EVENT_PORT_DOWN: 4273 case MLX5_DEV_EVENT_PORT_INITIALIZED: 4274 /* In RoCE, port up/down events are handled in 4275 * mlx5_netdev_event(). 4276 */ 4277 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) == 4278 IB_LINK_LAYER_ETHERNET) --- 38 unchanged lines hidden (view full) --- 4317 ib_dispatch_event(&ibev); 4318 4319 if (fatal) 4320 ibdev->ib_active = false; 4321out: 4322 kfree(work); 4323} 4324 | 4262 case MLX5_DEV_EVENT_PORT_UP: 4263 case MLX5_DEV_EVENT_PORT_DOWN: 4264 case MLX5_DEV_EVENT_PORT_INITIALIZED: 4265 /* In RoCE, port up/down events are handled in 4266 * mlx5_netdev_event(). 4267 */ 4268 if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) == 4269 IB_LINK_LAYER_ETHERNET) --- 38 unchanged lines hidden (view full) --- 4308 ib_dispatch_event(&ibev); 4309 4310 if (fatal) 4311 ibdev->ib_active = false; 4312out: 4313 kfree(work); 4314} 4315 |
4325static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, 4326 enum mlx5_dev_event event, unsigned long param) | 4316static int mlx5_ib_event(struct notifier_block *nb, 4317 unsigned long event, void *param) |
4327{ 4328 struct mlx5_ib_event_work *work; 4329 4330 work = kmalloc(sizeof(*work), GFP_ATOMIC); 4331 if (!work) | 4318{ 4319 struct mlx5_ib_event_work *work; 4320 4321 work = kmalloc(sizeof(*work), GFP_ATOMIC); 4322 if (!work) |
4332 return; | 4323 return NOTIFY_DONE; |
4333 4334 INIT_WORK(&work->work, mlx5_ib_handle_event); | 4324 4325 INIT_WORK(&work->work, mlx5_ib_handle_event); |
4335 work->dev = dev; | 4326 work->dev = container_of(nb, struct mlx5_ib_dev, mdev_events); 4327 work->is_slave = false; |
4336 work->param = param; | 4328 work->param = param; |
4337 work->context = context; | |
4338 work->event = event; 4339 4340 queue_work(mlx5_ib_event_wq, &work->work); | 4329 work->event = event; 4330 4331 queue_work(mlx5_ib_event_wq, &work->work); |
4332 4333 return NOTIFY_OK; |
|
4341} 4342 | 4334} 4335 |
4336static int mlx5_ib_event_slave_port(struct notifier_block *nb, 4337 unsigned long event, void *param) 4338{ 4339 struct mlx5_ib_event_work *work; 4340 4341 work = kmalloc(sizeof(*work), GFP_ATOMIC); 4342 if (!work) 4343 return NOTIFY_DONE; 4344 4345 INIT_WORK(&work->work, mlx5_ib_handle_event); 4346 work->mpi = container_of(nb, struct mlx5_ib_multiport_info, mdev_events); 4347 work->is_slave = true; 4348 work->param = param; 4349 work->event = event; 4350 queue_work(mlx5_ib_event_wq, &work->work); 4351 4352 return NOTIFY_OK; 4353} 4354 |
|
4343static int set_has_smi_cap(struct mlx5_ib_dev *dev) 4344{ 4345 struct mlx5_hca_vport_context vport_ctx; 4346 int err; 4347 int port; 4348 4349 for (port = 1; port <= dev->num_ports; port++) { 4350 dev->mdev->port_caps[port - 1].has_smi = false; --- 987 unchanged lines hidden (view full) --- 5338 INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler); 5339 atomic_set(&dev->delay_drop.rqs_cnt, 0); 5340 atomic_set(&dev->delay_drop.events_cnt, 0); 5341 5342 if (delay_drop_debugfs_init(dev)) 5343 mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n"); 5344} 5345 | 4355static int set_has_smi_cap(struct mlx5_ib_dev *dev) 4356{ 4357 struct mlx5_hca_vport_context vport_ctx; 4358 int err; 4359 int port; 4360 4361 for (port = 1; port <= dev->num_ports; port++) { 4362 dev->mdev->port_caps[port - 1].has_smi = false; --- 987 unchanged lines hidden (view full) --- 5350 INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler); 5351 atomic_set(&dev->delay_drop.rqs_cnt, 0); 5352 atomic_set(&dev->delay_drop.events_cnt, 0); 5353 5354 if (delay_drop_debugfs_init(dev)) 5355 mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n"); 5356} 5357 |
5358static const struct cpumask * 5359mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector) 5360{ 5361 struct mlx5_ib_dev *dev = to_mdev(ibdev); 5362 5363 return mlx5_comp_irq_get_affinity_mask(dev->mdev, comp_vector); 5364} 5365 |
|
5346/* The mlx5_ib_multiport_mutex should be held when calling this function */ 5347static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, 5348 struct mlx5_ib_multiport_info *mpi) 5349{ 5350 u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1; 5351 struct mlx5_ib_port *port = &ibdev->port[port_num]; 5352 int comps; 5353 int err; 5354 int i; 5355 5356 mlx5_ib_cleanup_cong_debugfs(ibdev, port_num); 5357 5358 spin_lock(&port->mp.mpi_lock); 5359 if (!mpi->ibdev) { 5360 spin_unlock(&port->mp.mpi_lock); 5361 return; 5362 } | 5366/* The mlx5_ib_multiport_mutex should be held when calling this function */ 5367static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, 5368 struct mlx5_ib_multiport_info *mpi) 5369{ 5370 u8 port_num = mlx5_core_native_port_num(mpi->mdev) - 1; 5371 struct mlx5_ib_port *port = &ibdev->port[port_num]; 5372 int comps; 5373 int err; 5374 int i; 5375 5376 mlx5_ib_cleanup_cong_debugfs(ibdev, port_num); 5377 5378 spin_lock(&port->mp.mpi_lock); 5379 if (!mpi->ibdev) { 5380 spin_unlock(&port->mp.mpi_lock); 5381 return; 5382 } |
5383 5384 if (mpi->mdev_events.notifier_call) 5385 mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events); 5386 mpi->mdev_events.notifier_call = NULL; 5387 |
|
5363 mpi->ibdev = NULL; 5364 5365 spin_unlock(&port->mp.mpi_lock); 5366 mlx5_remove_netdev_notifier(ibdev, port_num); 5367 spin_lock(&port->mp.mpi_lock); 5368 5369 comps = mpi->mdev_refcnt; 5370 if (comps) { --- 39 unchanged lines hidden (view full) --- 5410 mlx5_ib_dbg(ibdev, "port %d already affiliated.\n", 5411 port_num + 1); 5412 spin_unlock(&ibdev->port[port_num].mp.mpi_lock); 5413 return false; 5414 } 5415 5416 ibdev->port[port_num].mp.mpi = mpi; 5417 mpi->ibdev = ibdev; | 5388 mpi->ibdev = NULL; 5389 5390 spin_unlock(&port->mp.mpi_lock); 5391 mlx5_remove_netdev_notifier(ibdev, port_num); 5392 spin_lock(&port->mp.mpi_lock); 5393 5394 comps = mpi->mdev_refcnt; 5395 if (comps) { --- 39 unchanged lines hidden (view full) --- 5435 mlx5_ib_dbg(ibdev, "port %d already affiliated.\n", 5436 port_num + 1); 5437 spin_unlock(&ibdev->port[port_num].mp.mpi_lock); 5438 return false; 5439 } 5440 5441 ibdev->port[port_num].mp.mpi = mpi; 5442 mpi->ibdev = ibdev; |
5443 mpi->mdev_events.notifier_call = NULL; |
|
5418 spin_unlock(&ibdev->port[port_num].mp.mpi_lock); 5419 5420 err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev); 5421 if (err) 5422 goto unbind; 5423 5424 err = get_port_caps(ibdev, mlx5_core_native_port_num(mpi->mdev)); 5425 if (err) 5426 goto unbind; 5427 5428 err = mlx5_add_netdev_notifier(ibdev, port_num); 5429 if (err) { 5430 mlx5_ib_err(ibdev, "failed adding netdev notifier for port %u\n", 5431 port_num + 1); 5432 goto unbind; 5433 } 5434 | 5444 spin_unlock(&ibdev->port[port_num].mp.mpi_lock); 5445 5446 err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev); 5447 if (err) 5448 goto unbind; 5449 5450 err = get_port_caps(ibdev, mlx5_core_native_port_num(mpi->mdev)); 5451 if (err) 5452 goto unbind; 5453 5454 err = mlx5_add_netdev_notifier(ibdev, port_num); 5455 if (err) { 5456 mlx5_ib_err(ibdev, "failed adding netdev notifier for port %u\n", 5457 port_num + 1); 5458 goto unbind; 5459 } 5460 |
5461 mpi->mdev_events.notifier_call = mlx5_ib_event_slave_port; 5462 mlx5_notifier_register(mpi->mdev, &mpi->mdev_events); 5463 |
|
5435 err = mlx5_ib_init_cong_debugfs(ibdev, port_num); 5436 if (err) 5437 goto unbind; 5438 5439 return true; 5440 5441unbind: 5442 mlx5_ib_unbind_slave_port(ibdev, mpi); --- 113 unchanged lines hidden (view full) --- 5556 5557ADD_UVERBS_ATTRIBUTES_SIMPLE( 5558 mlx5_ib_flow_action, 5559 UVERBS_OBJECT_FLOW_ACTION, 5560 UVERBS_METHOD_FLOW_ACTION_ESP_CREATE, 5561 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS, 5562 enum mlx5_ib_uapi_flow_action_flags)); 5563 | 5464 err = mlx5_ib_init_cong_debugfs(ibdev, port_num); 5465 if (err) 5466 goto unbind; 5467 5468 return true; 5469 5470unbind: 5471 mlx5_ib_unbind_slave_port(ibdev, mpi); --- 113 unchanged lines hidden (view full) --- 5585 5586ADD_UVERBS_ATTRIBUTES_SIMPLE( 5587 mlx5_ib_flow_action, 5588 UVERBS_OBJECT_FLOW_ACTION, 5589 UVERBS_METHOD_FLOW_ACTION_ESP_CREATE, 5590 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS, 5591 enum mlx5_ib_uapi_flow_action_flags)); 5592 |
5564static const struct uapi_definition mlx5_ib_defs[] = { 5565#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) 5566 UAPI_DEF_CHAIN(mlx5_ib_devx_defs), 5567 UAPI_DEF_CHAIN(mlx5_ib_flow_defs), 5568#endif | 5593static int populate_specs_root(struct mlx5_ib_dev *dev) 5594{ 5595 const struct uverbs_object_tree_def **trees = dev->driver_trees; 5596 size_t num_trees = 0; |
5569 | 5597 |
5570 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION, 5571 &mlx5_ib_flow_action), 5572 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm), 5573 {} 5574}; | 5598 if (mlx5_accel_ipsec_device_caps(dev->mdev) & 5599 MLX5_ACCEL_IPSEC_CAP_DEVICE) 5600 trees[num_trees++] = &mlx5_ib_flow_action; |
5575 | 5601 |
5602 if (MLX5_CAP_DEV_MEM(dev->mdev, memic)) 5603 trees[num_trees++] = &mlx5_ib_dm; 5604 5605 if (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) & 5606 MLX5_GENERAL_OBJ_TYPES_CAP_UCTX) 5607 trees[num_trees++] = mlx5_ib_get_devx_tree(); 5608 5609 num_trees += mlx5_ib_get_flow_trees(trees + num_trees); 5610 5611 WARN_ON(num_trees >= ARRAY_SIZE(dev->driver_trees)); 5612 trees[num_trees] = NULL; 5613 dev->ib_dev.driver_specs = trees; 5614 5615 return 0; 5616} 5617 |
|
5576static int mlx5_ib_read_counters(struct ib_counters *counters, 5577 struct ib_counters_read_attr *read_attr, 5578 struct uverbs_attr_bundle *attrs) 5579{ 5580 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters); 5581 struct mlx5_read_counters_attr mread_attr = {}; 5582 struct mlx5_ib_flow_counters_desc *desc; 5583 int ret, i; --- 245 unchanged lines hidden (view full) --- 5829 dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr; 5830 dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach; 5831 dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach; 5832 dev->ib_dev.process_mad = mlx5_ib_process_mad; 5833 dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr; 5834 dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg; 5835 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; 5836 dev->ib_dev.get_dev_fw_str = get_dev_fw_str; | 5618static int mlx5_ib_read_counters(struct ib_counters *counters, 5619 struct ib_counters_read_attr *read_attr, 5620 struct uverbs_attr_bundle *attrs) 5621{ 5622 struct mlx5_ib_mcounters *mcounters = to_mcounters(counters); 5623 struct mlx5_read_counters_attr mread_attr = {}; 5624 struct mlx5_ib_flow_counters_desc *desc; 5625 int ret, i; --- 245 unchanged lines hidden (view full) --- 5871 dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr; 5872 dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach; 5873 dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach; 5874 dev->ib_dev.process_mad = mlx5_ib_process_mad; 5875 dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr; 5876 dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg; 5877 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; 5878 dev->ib_dev.get_dev_fw_str = get_dev_fw_str; |
5879 dev->ib_dev.get_vector_affinity = mlx5_ib_get_vector_affinity; |
|
5837 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) && 5838 IS_ENABLED(CONFIG_MLX5_CORE_IPOIB)) 5839 dev->ib_dev.rdma_netdev_get_params = mlx5_ib_rn_get_params; 5840 5841 if (mlx5_core_is_pf(mdev)) { 5842 dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config; 5843 dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state; 5844 dev->ib_dev.get_vf_stats = mlx5_ib_get_vf_stats; --- 26 unchanged lines hidden (view full) --- 5871 dev->ib_dev.reg_dm_mr = mlx5_ib_reg_dm_mr; 5872 } 5873 5874 dev->ib_dev.create_flow = mlx5_ib_create_flow; 5875 dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow; 5876 dev->ib_dev.uverbs_ex_cmd_mask |= 5877 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) | 5878 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW); | 5880 if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) && 5881 IS_ENABLED(CONFIG_MLX5_CORE_IPOIB)) 5882 dev->ib_dev.rdma_netdev_get_params = mlx5_ib_rn_get_params; 5883 5884 if (mlx5_core_is_pf(mdev)) { 5885 dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config; 5886 dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state; 5887 dev->ib_dev.get_vf_stats = mlx5_ib_get_vf_stats; --- 26 unchanged lines hidden (view full) --- 5914 dev->ib_dev.reg_dm_mr = mlx5_ib_reg_dm_mr; 5915 } 5916 5917 dev->ib_dev.create_flow = mlx5_ib_create_flow; 5918 dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow; 5919 dev->ib_dev.uverbs_ex_cmd_mask |= 5920 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) | 5921 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW); |
5879 if (mlx5_accel_ipsec_device_caps(dev->mdev) & 5880 MLX5_ACCEL_IPSEC_CAP_DEVICE) { 5881 dev->ib_dev.create_flow_action_esp = 5882 mlx5_ib_create_flow_action_esp; 5883 dev->ib_dev.modify_flow_action_esp = 5884 mlx5_ib_modify_flow_action_esp; 5885 } | 5922 dev->ib_dev.create_flow_action_esp = mlx5_ib_create_flow_action_esp; |
5886 dev->ib_dev.destroy_flow_action = mlx5_ib_destroy_flow_action; | 5923 dev->ib_dev.destroy_flow_action = mlx5_ib_destroy_flow_action; |
5924 dev->ib_dev.modify_flow_action_esp = mlx5_ib_modify_flow_action_esp; |
|
5887 dev->ib_dev.driver_id = RDMA_DRIVER_MLX5; 5888 dev->ib_dev.create_counters = mlx5_ib_create_counters; 5889 dev->ib_dev.destroy_counters = mlx5_ib_destroy_counters; 5890 dev->ib_dev.read_counters = mlx5_ib_read_counters; 5891 | 5925 dev->ib_dev.driver_id = RDMA_DRIVER_MLX5; 5926 dev->ib_dev.create_counters = mlx5_ib_create_counters; 5927 dev->ib_dev.destroy_counters = mlx5_ib_destroy_counters; 5928 dev->ib_dev.read_counters = mlx5_ib_read_counters; 5929 |
5892 if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)) 5893 dev->ib_dev.driver_def = mlx5_ib_defs; 5894 | |
5895 err = init_node_data(dev); 5896 if (err) 5897 return err; 5898 5899 if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && 5900 (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) || 5901 MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc))) 5902 mutex_init(&dev->lb.mutex); --- 196 unchanged lines hidden (view full) --- 6099} 6100 6101void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev) 6102{ 6103 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg); 6104 mlx5_free_bfreg(dev->mdev, &dev->bfreg); 6105} 6106 | 5930 err = init_node_data(dev); 5931 if (err) 5932 return err; 5933 5934 if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && 5935 (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) || 5936 MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc))) 5937 mutex_init(&dev->lb.mutex); --- 196 unchanged lines hidden (view full) --- 6134} 6135 6136void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev) 6137{ 6138 mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg); 6139 mlx5_free_bfreg(dev->mdev, &dev->bfreg); 6140} 6141 |
6142static int mlx5_ib_stage_populate_specs(struct mlx5_ib_dev *dev) 6143{ 6144 return populate_specs_root(dev); 6145} 6146 |
|
6107int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev) 6108{ 6109 const char *name; 6110 6111 rdma_set_device_sysfs_group(&dev->ib_dev, &mlx5_attr_group); 6112 if (!mlx5_lag_is_active(dev->mdev)) 6113 name = "mlx5_%d"; 6114 else --- 35 unchanged lines hidden (view full) --- 6150 return 0; 6151} 6152 6153static void mlx5_ib_stage_rep_reg_cleanup(struct mlx5_ib_dev *dev) 6154{ 6155 mlx5_ib_unregister_vport_reps(dev); 6156} 6157 | 6147int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev) 6148{ 6149 const char *name; 6150 6151 rdma_set_device_sysfs_group(&dev->ib_dev, &mlx5_attr_group); 6152 if (!mlx5_lag_is_active(dev->mdev)) 6153 name = "mlx5_%d"; 6154 else --- 35 unchanged lines hidden (view full) --- 6190 return 0; 6191} 6192 6193static void mlx5_ib_stage_rep_reg_cleanup(struct mlx5_ib_dev *dev) 6194{ 6195 mlx5_ib_unregister_vport_reps(dev); 6196} 6197 |
6198static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev) 6199{ 6200 dev->mdev_events.notifier_call = mlx5_ib_event; 6201 mlx5_notifier_register(dev->mdev, &dev->mdev_events); 6202 return 0; 6203} 6204 6205static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev) 6206{ 6207 mlx5_notifier_unregister(dev->mdev, &dev->mdev_events); 6208} 6209 |
|
6158void __mlx5_ib_remove(struct mlx5_ib_dev *dev, 6159 const struct mlx5_ib_profile *profile, 6160 int stage) 6161{ 6162 /* Number of stages to cleanup */ 6163 while (stage) { 6164 stage--; 6165 if (profile->stage[stage].cleanup) --- 49 unchanged lines hidden (view full) --- 6215 mlx5_ib_stage_non_default_cb, 6216 NULL), 6217 STAGE_CREATE(MLX5_IB_STAGE_ROCE, 6218 mlx5_ib_stage_roce_init, 6219 mlx5_ib_stage_roce_cleanup), 6220 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES, 6221 mlx5_ib_stage_dev_res_init, 6222 mlx5_ib_stage_dev_res_cleanup), | 6210void __mlx5_ib_remove(struct mlx5_ib_dev *dev, 6211 const struct mlx5_ib_profile *profile, 6212 int stage) 6213{ 6214 /* Number of stages to cleanup */ 6215 while (stage) { 6216 stage--; 6217 if (profile->stage[stage].cleanup) --- 49 unchanged lines hidden (view full) --- 6267 mlx5_ib_stage_non_default_cb, 6268 NULL), 6269 STAGE_CREATE(MLX5_IB_STAGE_ROCE, 6270 mlx5_ib_stage_roce_init, 6271 mlx5_ib_stage_roce_cleanup), 6272 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES, 6273 mlx5_ib_stage_dev_res_init, 6274 mlx5_ib_stage_dev_res_cleanup), |
6275 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER, 6276 mlx5_ib_stage_dev_notifier_init, 6277 mlx5_ib_stage_dev_notifier_cleanup), |
|
6223 STAGE_CREATE(MLX5_IB_STAGE_ODP, 6224 mlx5_ib_stage_odp_init, 6225 mlx5_ib_stage_odp_cleanup), 6226 STAGE_CREATE(MLX5_IB_STAGE_COUNTERS, 6227 mlx5_ib_stage_counters_init, 6228 mlx5_ib_stage_counters_cleanup), 6229 STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS, 6230 mlx5_ib_stage_cong_debugfs_init, 6231 mlx5_ib_stage_cong_debugfs_cleanup), 6232 STAGE_CREATE(MLX5_IB_STAGE_UAR, 6233 mlx5_ib_stage_uar_init, 6234 mlx5_ib_stage_uar_cleanup), 6235 STAGE_CREATE(MLX5_IB_STAGE_BFREG, 6236 mlx5_ib_stage_bfrag_init, 6237 mlx5_ib_stage_bfrag_cleanup), 6238 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR, 6239 NULL, 6240 mlx5_ib_stage_pre_ib_reg_umr_cleanup), | 6278 STAGE_CREATE(MLX5_IB_STAGE_ODP, 6279 mlx5_ib_stage_odp_init, 6280 mlx5_ib_stage_odp_cleanup), 6281 STAGE_CREATE(MLX5_IB_STAGE_COUNTERS, 6282 mlx5_ib_stage_counters_init, 6283 mlx5_ib_stage_counters_cleanup), 6284 STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS, 6285 mlx5_ib_stage_cong_debugfs_init, 6286 mlx5_ib_stage_cong_debugfs_cleanup), 6287 STAGE_CREATE(MLX5_IB_STAGE_UAR, 6288 mlx5_ib_stage_uar_init, 6289 mlx5_ib_stage_uar_cleanup), 6290 STAGE_CREATE(MLX5_IB_STAGE_BFREG, 6291 mlx5_ib_stage_bfrag_init, 6292 mlx5_ib_stage_bfrag_cleanup), 6293 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR, 6294 NULL, 6295 mlx5_ib_stage_pre_ib_reg_umr_cleanup), |
6296 STAGE_CREATE(MLX5_IB_STAGE_SPECS, 6297 mlx5_ib_stage_populate_specs, 6298 NULL), |
|
6241 STAGE_CREATE(MLX5_IB_STAGE_IB_REG, 6242 mlx5_ib_stage_ib_reg_init, 6243 mlx5_ib_stage_ib_reg_cleanup), 6244 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, 6245 mlx5_ib_stage_post_ib_reg_umr_init, 6246 NULL), 6247 STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP, 6248 mlx5_ib_stage_delay_drop_init, --- 14 unchanged lines hidden (view full) --- 6263 mlx5_ib_stage_rep_non_default_cb, 6264 NULL), 6265 STAGE_CREATE(MLX5_IB_STAGE_ROCE, 6266 mlx5_ib_stage_rep_roce_init, 6267 mlx5_ib_stage_rep_roce_cleanup), 6268 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES, 6269 mlx5_ib_stage_dev_res_init, 6270 mlx5_ib_stage_dev_res_cleanup), | 6299 STAGE_CREATE(MLX5_IB_STAGE_IB_REG, 6300 mlx5_ib_stage_ib_reg_init, 6301 mlx5_ib_stage_ib_reg_cleanup), 6302 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, 6303 mlx5_ib_stage_post_ib_reg_umr_init, 6304 NULL), 6305 STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP, 6306 mlx5_ib_stage_delay_drop_init, --- 14 unchanged lines hidden (view full) --- 6321 mlx5_ib_stage_rep_non_default_cb, 6322 NULL), 6323 STAGE_CREATE(MLX5_IB_STAGE_ROCE, 6324 mlx5_ib_stage_rep_roce_init, 6325 mlx5_ib_stage_rep_roce_cleanup), 6326 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES, 6327 mlx5_ib_stage_dev_res_init, 6328 mlx5_ib_stage_dev_res_cleanup), |
6329 STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER, 6330 mlx5_ib_stage_dev_notifier_init, 6331 mlx5_ib_stage_dev_notifier_cleanup), |
|
6271 STAGE_CREATE(MLX5_IB_STAGE_COUNTERS, 6272 mlx5_ib_stage_counters_init, 6273 mlx5_ib_stage_counters_cleanup), 6274 STAGE_CREATE(MLX5_IB_STAGE_UAR, 6275 mlx5_ib_stage_uar_init, 6276 mlx5_ib_stage_uar_cleanup), 6277 STAGE_CREATE(MLX5_IB_STAGE_BFREG, 6278 mlx5_ib_stage_bfrag_init, 6279 mlx5_ib_stage_bfrag_cleanup), 6280 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR, 6281 NULL, 6282 mlx5_ib_stage_pre_ib_reg_umr_cleanup), | 6332 STAGE_CREATE(MLX5_IB_STAGE_COUNTERS, 6333 mlx5_ib_stage_counters_init, 6334 mlx5_ib_stage_counters_cleanup), 6335 STAGE_CREATE(MLX5_IB_STAGE_UAR, 6336 mlx5_ib_stage_uar_init, 6337 mlx5_ib_stage_uar_cleanup), 6338 STAGE_CREATE(MLX5_IB_STAGE_BFREG, 6339 mlx5_ib_stage_bfrag_init, 6340 mlx5_ib_stage_bfrag_cleanup), 6341 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR, 6342 NULL, 6343 mlx5_ib_stage_pre_ib_reg_umr_cleanup), |
6344 STAGE_CREATE(MLX5_IB_STAGE_SPECS, 6345 mlx5_ib_stage_populate_specs, 6346 NULL), |
|
6283 STAGE_CREATE(MLX5_IB_STAGE_IB_REG, 6284 mlx5_ib_stage_ib_reg_init, 6285 mlx5_ib_stage_ib_reg_cleanup), 6286 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, 6287 mlx5_ib_stage_post_ib_reg_umr_init, 6288 NULL), 6289 STAGE_CREATE(MLX5_IB_STAGE_REP_REG, 6290 mlx5_ib_stage_rep_reg_init, --- 89 unchanged lines hidden (view full) --- 6380 6381 dev = context; 6382 __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); 6383} 6384 6385static struct mlx5_interface mlx5_ib_interface = { 6386 .add = mlx5_ib_add, 6387 .remove = mlx5_ib_remove, | 6347 STAGE_CREATE(MLX5_IB_STAGE_IB_REG, 6348 mlx5_ib_stage_ib_reg_init, 6349 mlx5_ib_stage_ib_reg_cleanup), 6350 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, 6351 mlx5_ib_stage_post_ib_reg_umr_init, 6352 NULL), 6353 STAGE_CREATE(MLX5_IB_STAGE_REP_REG, 6354 mlx5_ib_stage_rep_reg_init, --- 89 unchanged lines hidden (view full) --- 6444 6445 dev = context; 6446 __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); 6447} 6448 6449static struct mlx5_interface mlx5_ib_interface = { 6450 .add = mlx5_ib_add, 6451 .remove = mlx5_ib_remove, |
6388 .event = mlx5_ib_event, | |
6389 .protocol = MLX5_INTERFACE_PROTOCOL_IB, 6390}; 6391 6392unsigned long mlx5_ib_get_xlt_emergency_page(void) 6393{ 6394 mutex_lock(&xlt_emergency_page_mutex); 6395 return xlt_emergency_page; 6396} --- 39 unchanged lines hidden --- | 6452 .protocol = MLX5_INTERFACE_PROTOCOL_IB, 6453}; 6454 6455unsigned long mlx5_ib_get_xlt_emergency_page(void) 6456{ 6457 mutex_lock(&xlt_emergency_page_mutex); 6458 return xlt_emergency_page; 6459} --- 39 unchanged lines hidden --- |