1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3 
4 #include "selq.h"
5 #include <linux/slab.h>
6 #include <linux/netdevice.h>
7 #include <linux/rcupdate.h>
8 #include "en.h"
9 #include "en/ptp.h"
10 
11 struct mlx5e_selq_params {
12 	unsigned int num_regular_queues;
13 	unsigned int num_channels;
14 	unsigned int num_tcs;
15 	bool is_htb;
16 	bool is_ptp;
17 };
18 
19 int mlx5e_selq_init(struct mlx5e_selq *selq, struct mutex *state_lock)
20 {
21 	struct mlx5e_selq_params *init_params;
22 
23 	selq->state_lock = state_lock;
24 
25 	selq->standby = kvzalloc(sizeof(*selq->standby), GFP_KERNEL);
26 	if (!selq->standby)
27 		return -ENOMEM;
28 
29 	init_params = kvzalloc(sizeof(*selq->active), GFP_KERNEL);
30 	if (!init_params) {
31 		kvfree(selq->standby);
32 		selq->standby = NULL;
33 		return -ENOMEM;
34 	}
35 	/* Assign dummy values, so that mlx5e_select_queue won't crash. */
36 	*init_params = (struct mlx5e_selq_params) {
37 		.num_regular_queues = 1,
38 		.num_channels = 1,
39 		.num_tcs = 1,
40 		.is_htb = false,
41 		.is_ptp = false,
42 	};
43 	rcu_assign_pointer(selq->active, init_params);
44 
45 	return 0;
46 }
47 
48 void mlx5e_selq_cleanup(struct mlx5e_selq *selq)
49 {
50 	WARN_ON_ONCE(selq->is_prepared);
51 
52 	kvfree(selq->standby);
53 	selq->standby = NULL;
54 	selq->is_prepared = true;
55 
56 	mlx5e_selq_apply(selq);
57 
58 	kvfree(selq->standby);
59 	selq->standby = NULL;
60 }
61 
62 void mlx5e_selq_prepare(struct mlx5e_selq *selq, struct mlx5e_params *params, bool htb)
63 {
64 	lockdep_assert_held(selq->state_lock);
65 	WARN_ON_ONCE(selq->is_prepared);
66 
67 	selq->is_prepared = true;
68 
69 	selq->standby->num_channels = params->num_channels;
70 	selq->standby->num_tcs = mlx5e_get_dcb_num_tc(params);
71 	selq->standby->num_regular_queues =
72 		selq->standby->num_channels * selq->standby->num_tcs;
73 	selq->standby->is_htb = htb;
74 	selq->standby->is_ptp = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_TX_PORT_TS);
75 }
76 
77 void mlx5e_selq_apply(struct mlx5e_selq *selq)
78 {
79 	struct mlx5e_selq_params *old_params;
80 
81 	WARN_ON_ONCE(!selq->is_prepared);
82 
83 	selq->is_prepared = false;
84 
85 	old_params = rcu_replace_pointer(selq->active, selq->standby,
86 					 lockdep_is_held(selq->state_lock));
87 	synchronize_net(); /* Wait until ndo_select_queue starts emitting correct values. */
88 	selq->standby = old_params;
89 }
90 
91 void mlx5e_selq_cancel(struct mlx5e_selq *selq)
92 {
93 	lockdep_assert_held(selq->state_lock);
94 	WARN_ON_ONCE(!selq->is_prepared);
95 
96 	selq->is_prepared = false;
97 }
98 
99 #ifdef CONFIG_MLX5_CORE_EN_DCB
100 static int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb)
101 {
102 	int dscp_cp = 0;
103 
104 	if (skb->protocol == htons(ETH_P_IP))
105 		dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
106 	else if (skb->protocol == htons(ETH_P_IPV6))
107 		dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
108 
109 	return priv->dcbx_dp.dscp2prio[dscp_cp];
110 }
111 #endif
112 
113 static u16 mlx5e_select_ptpsq(struct net_device *dev, struct sk_buff *skb,
114 			      struct mlx5e_selq_params *selq)
115 {
116 	struct mlx5e_priv *priv = netdev_priv(dev);
117 	int up = 0;
118 
119 	if (selq->num_tcs <= 1)
120 		goto return_txq;
121 
122 #ifdef CONFIG_MLX5_CORE_EN_DCB
123 	if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
124 		up = mlx5e_get_dscp_up(priv, skb);
125 	else
126 #endif
127 		if (skb_vlan_tag_present(skb))
128 			up = skb_vlan_tag_get_prio(skb);
129 
130 return_txq:
131 	return selq->num_regular_queues + up;
132 }
133 
134 static int mlx5e_select_htb_queue(struct mlx5e_priv *priv, struct sk_buff *skb)
135 {
136 	u16 classid;
137 
138 	/* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */
139 	if ((TC_H_MAJ(skb->priority) >> 16) == smp_load_acquire(&priv->htb.maj_id))
140 		classid = TC_H_MIN(skb->priority);
141 	else
142 		classid = READ_ONCE(priv->htb.defcls);
143 
144 	if (!classid)
145 		return 0;
146 
147 	return mlx5e_get_txq_by_classid(priv, classid);
148 }
149 
150 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
151 		       struct net_device *sb_dev)
152 {
153 	struct mlx5e_priv *priv = netdev_priv(dev);
154 	struct mlx5e_selq_params *selq;
155 	int txq_ix;
156 	int up = 0;
157 
158 	selq = rcu_dereference_bh(priv->selq.active);
159 
160 	/* This is a workaround needed only for the mlx5e_netdev_change_profile
161 	 * flow that zeroes out the whole priv without unregistering the netdev
162 	 * and without preventing ndo_select_queue from being called.
163 	 */
164 	if (unlikely(!selq))
165 		return 0;
166 
167 	if (unlikely(selq->is_ptp || selq->is_htb)) {
168 		if (unlikely(selq->is_htb)) {
169 			txq_ix = mlx5e_select_htb_queue(priv, skb);
170 			if (txq_ix > 0)
171 				return txq_ix;
172 		}
173 
174 		if (unlikely(selq->is_ptp && mlx5e_use_ptpsq(skb)))
175 			return mlx5e_select_ptpsq(dev, skb, selq);
176 
177 		txq_ix = netdev_pick_tx(dev, skb, NULL);
178 		/* Fix netdev_pick_tx() not to choose ptp_channel and HTB txqs.
179 		 * If they are selected, switch to regular queues.
180 		 * Driver to select these queues only at mlx5e_select_ptpsq()
181 		 * and mlx5e_select_htb_queue().
182 		 */
183 		if (unlikely(txq_ix >= selq->num_regular_queues))
184 			txq_ix %= selq->num_regular_queues;
185 	} else {
186 		txq_ix = netdev_pick_tx(dev, skb, NULL);
187 	}
188 
189 	if (selq->num_tcs <= 1)
190 		return txq_ix;
191 
192 #ifdef CONFIG_MLX5_CORE_EN_DCB
193 	if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
194 		up = mlx5e_get_dscp_up(priv, skb);
195 	else
196 #endif
197 		if (skb_vlan_tag_present(skb))
198 			up = skb_vlan_tag_get_prio(skb);
199 
200 	/* Normalize any picked txq_ix to [0, num_channels),
201 	 * So we can return a txq_ix that matches the channel and
202 	 * packet UP.
203 	 */
204 	return txq_ix % selq->num_channels + up * selq->num_channels;
205 }
206