1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3 
4 #include "selq.h"
5 #include <linux/slab.h>
6 #include <linux/netdevice.h>
7 #include "en.h"
8 #include "en/ptp.h"
9 
10 struct mlx5e_selq_params {
11 	unsigned int num_regular_queues;
12 	unsigned int num_channels;
13 	unsigned int num_tcs;
14 	bool is_htb;
15 	bool is_ptp;
16 };
17 
18 int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
19 {
20 	struct mlx5e_selq_params *init_params;
21 
22 	selq->state_lock = state_lock;
23 
24 	selq->standby = kvzalloc(sizeof(*selq->standby), GFP_KERNEL);
25 	if (!selq->standby)
26 		return -ENOMEM;
27 
28 	init_params = kvzalloc(sizeof(*selq->active), GFP_KERNEL);
29 	if (!init_params) {
30 		kvfree(selq->standby);
31 		selq->standby = NULL;
32 		return -ENOMEM;
33 	}
34 	/* Assign dummy values, so that mlx5e_select_queue won't crash. */
35 	*init_params = (struct mlx5e_selq_params) {
36 		.num_regular_queues = 1,
37 		.num_channels = 1,
38 		.num_tcs = 1,
39 		.is_htb = false,
40 		.is_ptp = false,
41 	};
42 	rcu_assign_pointer(selq->active, init_params);
43 
44 	return 0;
45 }
46 
47 void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
48 {
49 	WARN_ON_ONCE(selq->is_prepared);
50 
51 	kvfree(selq->standby);
52 	selq->standby = NULL;
53 	selq->is_prepared = true;
54 
55 	mlx5e_selq_apply(selq);
56 
57 	kvfree(selq->standby);
58 	selq->standby = NULL;
59 }
60 
61 void mlx5e_selq_prepare(struct mlx5e_selq *selq, struct mlx5e_params *params, bool htb)
62 {
63 	lockdep_assert_held(selq->state_lock);
64 	WARN_ON_ONCE(selq->is_prepared);
65 
66 	selq->is_prepared = true;
67 
68 	selq->standby->num_channels = params->num_channels;
69 	selq->standby->num_tcs = mlx5e_get_dcb_num_tc(params);
70 	selq->standby->num_regular_queues =
71 		selq->standby->num_channels * selq->standby->num_tcs;
72 	selq->standby->is_htb = htb;
73 	selq->standby->is_ptp = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS);
74 }
75 
76 void mlx5e_selq_apply(struct mlx5e_selq *selq)
77 {
78 	struct mlx5e_selq_params *old_params;
79 
80 	WARN_ON_ONCE(!selq->is_prepared);
81 
82 	selq->is_prepared = false;
83 
84 	old_params = rcu_replace_pointer(selq->active, selq->standby,
85 					 lockdep_is_held(selq->state_lock));
86 	synchronize_net(); /* Wait until ndo_select_queue starts emitting correct values. */
87 	selq->standby = old_params;
88 }
89 
90 void mlx5e_selq_cancel(struct mlx5e_selq *selq)
91 {
92 	lockdep_assert_held(selq->state_lock);
93 	WARN_ON_ONCE(!selq->is_prepared);
94 
95 	selq->is_prepared = false;
96 }
97 
98 #ifdef CONFIG_MLX5_CORE_EN_DCB
99 static int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb)
100 {
101 	int dscp_cp = 0;
102 
103 	if (skb->protocol == htons(ETH_P_IP))
104 		dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
105 	else if (skb->protocol == htons(ETH_P_IPV6))
106 		dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
107 
108 	return priv->dcbx_dp.dscp2prio[dscp_cp];
109 }
110 #endif
111 
112 static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb)
113 {
114 	struct mlx5e_priv *priv = netdev_priv(dev);
115 	int up = 0;
116 
117 	if (!netdev_get_num_tc(dev))
118 		goto return_txq;
119 
120 #ifdef CONFIG_MLX5_CORE_EN_DCB
121 	if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
122 		up = mlx5e_get_dscp_up(priv, skb);
123 	else
124 #endif
125 		if (skb_vlan_tag_present(skb))
126 			up = skb_vlan_tag_get_prio(skb);
127 
128 return_txq:
129 	return priv->port_ptp_tc2realtxq[up];
130 }
131 
132 static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb,
133 				  u16 htb_maj_id)
134 {
135 	u16 classid;
136 
137 	if ((TC_H_MAJ(skb->priority) >> 16) == htb_maj_id)
138 		classid = TC_H_MIN(skb->priority);
139 	else
140 		classid = READ_ONCE(priv->htb.defcls);
141 
142 	if (!classid)
143 		return 0;
144 
145 	return mlx5e_get_txq_by_classid(priv, classid);
146 }
147 
148 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
149 		       struct net_device *sb_dev)
150 {
151 	struct mlx5e_priv *priv = netdev_priv(dev);
152 	int num_tc_x_num_ch;
153 	int txq_ix;
154 	int up = 0;
155 	int ch_ix;
156 
157 	/* Sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */
158 	num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch);
159 	if (unlikely(dev->real_num_tx_queues > num_tc_x_num_ch)) {
160 		struct mlx5e_ptp *ptp_channel;
161 
162 		/* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */
163 		u16 htb_maj_id = smp_load_acquire(&priv->htb.maj_id);
164 
165 		if (unlikely(htb_maj_id)) {
166 			txq_ix = mlx5e_select_htb_queue(priv, skb, htb_maj_id);
167 			if (txq_ix > 0)
168 				return txq_ix;
169 		}
170 
171 		ptp_channel = READ_ONCE(priv->channels.ptp);
172 		if (unlikely(ptp_channel &&
173 			     test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) &&
174 			     mlx5e_use_ptpsq(skb)))
175 			return mlx5e_select_ptpsq(dev, skb);
176 
177 		txq_ix = netdev_pick_tx(dev, skb, NULL);
178 		/* Fix netdev_pick_tx() not to choose ptp_channel and HTB txqs.
179 		 * If they are selected, switch to regular queues.
180 		 * Driver to select these queues only at mlx5e_select_ptpsq()
181 		 * and mlx5e_select_htb_queue().
182 		 */
183 		if (unlikely(txq_ix >= num_tc_x_num_ch))
184 			txq_ix %= num_tc_x_num_ch;
185 	} else {
186 		txq_ix = netdev_pick_tx(dev, skb, NULL);
187 	}
188 
189 	if (!netdev_get_num_tc(dev))
190 		return txq_ix;
191 
192 #ifdef CONFIG_MLX5_CORE_EN_DCB
193 	if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
194 		up = mlx5e_get_dscp_up(priv, skb);
195 	else
196 #endif
197 		if (skb_vlan_tag_present(skb))
198 			up = skb_vlan_tag_get_prio(skb);
199 
200 	/* Normalize any picked txq_ix to [0, num_channels),
201 	 * So we can return a txq_ix that matches the channel and
202 	 * packet UP.
203 	 */
204 	ch_ix = priv->txq2sq[txq_ix]->ch_ix;
205 
206 	return priv->channel_tc2realtxq[ch_ix][up];
207 }
208