1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
4  */
5 
6 #include <linux/mlx5/vport.h>
7 #include "ib_rep.h"
8 #include "srq.h"
9 
10 static const struct mlx5_ib_profile vf_rep_profile = {
11 	STAGE_CREATE(MLX5_IB_STAGE_INIT,
12 		     mlx5_ib_stage_init_init,
13 		     mlx5_ib_stage_init_cleanup),
14 	STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
15 		     mlx5_ib_stage_rep_flow_db_init,
16 		     NULL),
17 	STAGE_CREATE(MLX5_IB_STAGE_CAPS,
18 		     mlx5_ib_stage_caps_init,
19 		     NULL),
20 	STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
21 		     mlx5_ib_stage_rep_non_default_cb,
22 		     NULL),
23 	STAGE_CREATE(MLX5_IB_STAGE_ROCE,
24 		     mlx5_ib_stage_rep_roce_init,
25 		     mlx5_ib_stage_rep_roce_cleanup),
26 	STAGE_CREATE(MLX5_IB_STAGE_SRQ,
27 		     mlx5_init_srq_table,
28 		     mlx5_cleanup_srq_table),
29 	STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
30 		     mlx5_ib_stage_dev_res_init,
31 		     mlx5_ib_stage_dev_res_cleanup),
32 	STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
33 		     mlx5_ib_stage_counters_init,
34 		     mlx5_ib_stage_counters_cleanup),
35 	STAGE_CREATE(MLX5_IB_STAGE_BFREG,
36 		     mlx5_ib_stage_bfrag_init,
37 		     mlx5_ib_stage_bfrag_cleanup),
38 	STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
39 		     NULL,
40 		     mlx5_ib_stage_pre_ib_reg_umr_cleanup),
41 	STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
42 		     mlx5_ib_stage_ib_reg_init,
43 		     mlx5_ib_stage_ib_reg_cleanup),
44 	STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
45 		     mlx5_ib_stage_post_ib_reg_umr_init,
46 		     NULL),
47 };
48 
49 static int
50 mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
51 {
52 	const struct mlx5_ib_profile *profile;
53 	struct mlx5_ib_dev *ibdev;
54 
55 	if (rep->vport == MLX5_VPORT_UPLINK)
56 		profile = &uplink_rep_profile;
57 	else
58 		profile = &vf_rep_profile;
59 
60 	ibdev = ib_alloc_device(mlx5_ib_dev, ib_dev);
61 	if (!ibdev)
62 		return -ENOMEM;
63 
64 	ibdev->rep = rep;
65 	ibdev->mdev = dev;
66 	ibdev->num_ports = max(MLX5_CAP_GEN(dev, num_ports),
67 			       MLX5_CAP_GEN(dev, num_vhca_ports));
68 	if (!__mlx5_ib_add(ibdev, profile)) {
69 		ib_dealloc_device(&ibdev->ib_dev);
70 		return -EINVAL;
71 	}
72 
73 	rep->rep_if[REP_IB].priv = ibdev;
74 
75 	return 0;
76 }
77 
78 static void
79 mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
80 {
81 	struct mlx5_ib_dev *dev;
82 
83 	if (!rep->rep_if[REP_IB].priv)
84 		return;
85 
86 	dev = mlx5_ib_rep_to_dev(rep);
87 	__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
88 	rep->rep_if[REP_IB].priv = NULL;
89 	ib_dealloc_device(&dev->ib_dev);
90 }
91 
92 static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
93 {
94 	return mlx5_ib_rep_to_dev(rep);
95 }
96 
97 void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev)
98 {
99 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
100 	struct mlx5_eswitch_rep_if rep_if = {};
101 
102 	rep_if.load = mlx5_ib_vport_rep_load;
103 	rep_if.unload = mlx5_ib_vport_rep_unload;
104 	rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev;
105 
106 	mlx5_eswitch_register_vport_reps(esw, &rep_if, REP_IB);
107 }
108 
109 void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev)
110 {
111 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
112 
113 	mlx5_eswitch_unregister_vport_reps(esw, REP_IB);
114 }
115 
116 u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw)
117 {
118 	return mlx5_eswitch_mode(esw);
119 }
120 
121 struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
122 					  int vport_index)
123 {
124 	return mlx5_eswitch_get_proto_dev(esw, vport_index, REP_IB);
125 }
126 
127 struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
128 					  int vport_index)
129 {
130 	return mlx5_eswitch_get_proto_dev(esw, vport_index, REP_ETH);
131 }
132 
133 struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw)
134 {
135 	return mlx5_eswitch_uplink_get_proto_dev(esw, REP_IB);
136 }
137 
138 struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw, int vport)
139 {
140 	return mlx5_eswitch_vport_rep(esw, vport);
141 }
142 
143 int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
144 			      struct mlx5_ib_sq *sq)
145 {
146 	struct mlx5_flow_handle *flow_rule;
147 	struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
148 
149 	if (!dev->rep)
150 		return 0;
151 
152 	flow_rule =
153 		mlx5_eswitch_add_send_to_vport_rule(esw,
154 						    dev->rep->vport,
155 						    sq->base.mqp.qpn);
156 	if (IS_ERR(flow_rule))
157 		return PTR_ERR(flow_rule);
158 	sq->flow_rule = flow_rule;
159 
160 	return 0;
161 }
162