1 /**
2  * Copyright (c) 2014 Redpine Signals Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include "rsi_mgmt.h"
18 #include "rsi_common.h"
19 
20 /**
21  * rsi_determine_min_weight_queue() - This function determines the queue with
22  *				      the min weight.
23  * @common: Pointer to the driver private structure.
24  *
25  * Return: q_num: Corresponding queue number.
26  */
27 static u8 rsi_determine_min_weight_queue(struct rsi_common *common)
28 {
29 	struct wmm_qinfo *tx_qinfo = common->tx_qinfo;
30 	u32 q_len = 0;
31 	u8 ii = 0;
32 
33 	for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) {
34 		q_len = skb_queue_len(&common->tx_queue[ii]);
35 		if ((tx_qinfo[ii].pkt_contended) && q_len) {
36 			common->min_weight = tx_qinfo[ii].weight;
37 			break;
38 		}
39 	}
40 	return ii;
41 }
42 
43 /**
44  * rsi_recalculate_weights() - This function recalculates the weights
45  *			       corresponding to each queue.
46  * @common: Pointer to the driver private structure.
47  *
48  * Return: recontend_queue bool variable
49  */
50 static bool rsi_recalculate_weights(struct rsi_common *common)
51 {
52 	struct wmm_qinfo *tx_qinfo = common->tx_qinfo;
53 	bool recontend_queue = false;
54 	u8 ii = 0;
55 	u32 q_len = 0;
56 
57 	for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) {
58 		q_len = skb_queue_len(&common->tx_queue[ii]);
59 		/* Check for the need of contention */
60 		if (q_len) {
61 			if (tx_qinfo[ii].pkt_contended) {
62 				tx_qinfo[ii].weight =
63 				((tx_qinfo[ii].weight > common->min_weight) ?
64 				 tx_qinfo[ii].weight - common->min_weight : 0);
65 			} else {
66 				tx_qinfo[ii].pkt_contended = 1;
67 				tx_qinfo[ii].weight = tx_qinfo[ii].wme_params;
68 				recontend_queue = true;
69 			}
70 		} else { /* No packets so no contention */
71 			tx_qinfo[ii].weight = 0;
72 			tx_qinfo[ii].pkt_contended = 0;
73 		}
74 	}
75 
76 	return recontend_queue;
77 }
78 
79 /**
80  * rsi_get_num_pkts_dequeue() - This function determines the number of
81  *		                packets to be dequeued based on the number
82  *			        of bytes calculated using txop.
83  *
84  * @common: Pointer to the driver private structure.
85  * @q_num: the queue from which pkts have to be dequeued
86  *
87  * Return: pkt_num: Number of pkts to be dequeued.
88  */
89 static u32 rsi_get_num_pkts_dequeue(struct rsi_common *common, u8 q_num)
90 {
91 	struct rsi_hw *adapter = common->priv;
92 	struct sk_buff *skb;
93 	u32 pkt_cnt = 0;
94 	s16 txop = common->tx_qinfo[q_num].txop * 32;
95 	__le16 r_txop;
96 	struct ieee80211_rate rate;
97 
98 	rate.bitrate = RSI_RATE_MCS0 * 5 * 10; /* Convert to Kbps */
99 	if (q_num == VI_Q)
100 		txop = ((txop << 5) / 80);
101 
102 	if (skb_queue_len(&common->tx_queue[q_num]))
103 		skb = skb_peek(&common->tx_queue[q_num]);
104 	else
105 		return 0;
106 
107 	do {
108 		r_txop = ieee80211_generic_frame_duration(adapter->hw,
109 							  adapter->vifs[0],
110 							  common->band,
111 							  skb->len, &rate);
112 		txop -= le16_to_cpu(r_txop);
113 		pkt_cnt += 1;
114 		/*checking if pkts are still there*/
115 		if (skb_queue_len(&common->tx_queue[q_num]) - pkt_cnt)
116 			skb = skb->next;
117 		else
118 			break;
119 
120 	} while (txop > 0);
121 
122 	return pkt_cnt;
123 }
124 
125 /**
126  * rsi_core_determine_hal_queue() - This function determines the queue from
127  *				    which packet has to be dequeued.
128  * @common: Pointer to the driver private structure.
129  *
130  * Return: q_num: Corresponding queue number on success.
131  */
132 static u8 rsi_core_determine_hal_queue(struct rsi_common *common)
133 {
134 	bool recontend_queue = false;
135 	u32 q_len = 0;
136 	u8 q_num = INVALID_QUEUE;
137 	u8 ii = 0;
138 
139 	if (skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])) {
140 		if (!common->mgmt_q_block)
141 			q_num = MGMT_SOFT_Q;
142 		return q_num;
143 	}
144 
145 	if (common->hw_data_qs_blocked)
146 		return q_num;
147 
148 	if (common->pkt_cnt != 0) {
149 		--common->pkt_cnt;
150 		return common->selected_qnum;
151 	}
152 
153 get_queue_num:
154 	recontend_queue = false;
155 
156 	q_num = rsi_determine_min_weight_queue(common);
157 
158 	ii = q_num;
159 
160 	/* Selecting the queue with least back off */
161 	for (; ii < NUM_EDCA_QUEUES; ii++) {
162 		q_len = skb_queue_len(&common->tx_queue[ii]);
163 		if (((common->tx_qinfo[ii].pkt_contended) &&
164 		     (common->tx_qinfo[ii].weight < common->min_weight)) &&
165 		      q_len) {
166 			common->min_weight = common->tx_qinfo[ii].weight;
167 			q_num = ii;
168 		}
169 	}
170 
171 	if (q_num < NUM_EDCA_QUEUES)
172 		common->tx_qinfo[q_num].pkt_contended = 0;
173 
174 	/* Adjust the back off values for all queues again */
175 	recontend_queue = rsi_recalculate_weights(common);
176 
177 	q_len = skb_queue_len(&common->tx_queue[q_num]);
178 	if (!q_len) {
179 		/* If any queues are freshly contended and the selected queue
180 		 * doesn't have any packets
181 		 * then get the queue number again with fresh values
182 		 */
183 		if (recontend_queue)
184 			goto get_queue_num;
185 
186 		q_num = INVALID_QUEUE;
187 		return q_num;
188 	}
189 
190 	common->selected_qnum = q_num;
191 	q_len = skb_queue_len(&common->tx_queue[q_num]);
192 
193 	if (q_num == VO_Q || q_num == VI_Q) {
194 		common->pkt_cnt = rsi_get_num_pkts_dequeue(common, q_num);
195 		common->pkt_cnt -= 1;
196 	}
197 
198 	return q_num;
199 }
200 
201 /**
202  * rsi_core_queue_pkt() - This functions enqueues the packet to the queue
203  *			  specified by the queue number.
204  * @common: Pointer to the driver private structure.
205  * @skb: Pointer to the socket buffer structure.
206  *
207  * Return: None.
208  */
209 static void rsi_core_queue_pkt(struct rsi_common *common,
210 			       struct sk_buff *skb)
211 {
212 	u8 q_num = skb->priority;
213 	if (q_num >= NUM_SOFT_QUEUES) {
214 		rsi_dbg(ERR_ZONE, "%s: Invalid Queue Number: q_num = %d\n",
215 			__func__, q_num);
216 		dev_kfree_skb(skb);
217 		return;
218 	}
219 
220 	skb_queue_tail(&common->tx_queue[q_num], skb);
221 }
222 
223 /**
224  * rsi_core_dequeue_pkt() - This functions dequeues the packet from the queue
225  *			    specified by the queue number.
226  * @common: Pointer to the driver private structure.
227  * @q_num: Queue number.
228  *
229  * Return: Pointer to sk_buff structure.
230  */
231 static struct sk_buff *rsi_core_dequeue_pkt(struct rsi_common *common,
232 					    u8 q_num)
233 {
234 	if (q_num >= NUM_SOFT_QUEUES) {
235 		rsi_dbg(ERR_ZONE, "%s: Invalid Queue Number: q_num = %d\n",
236 			__func__, q_num);
237 		return NULL;
238 	}
239 
240 	return skb_dequeue(&common->tx_queue[q_num]);
241 }
242 
243 /**
244  * rsi_core_qos_processor() - This function is used to determine the wmm queue
245  *			      based on the backoff procedure. Data packets are
246  *			      dequeued from the selected hal queue and sent to
247  *			      the below layers.
248  * @common: Pointer to the driver private structure.
249  *
250  * Return: None.
251  */
252 void rsi_core_qos_processor(struct rsi_common *common)
253 {
254 	struct rsi_hw *adapter = common->priv;
255 	struct sk_buff *skb;
256 	unsigned long tstamp_1, tstamp_2;
257 	u8 q_num;
258 	int status;
259 
260 	tstamp_1 = jiffies;
261 	while (1) {
262 		q_num = rsi_core_determine_hal_queue(common);
263 		rsi_dbg(DATA_TX_ZONE,
264 			"%s: Queue number = %d\n", __func__, q_num);
265 
266 		if (q_num == INVALID_QUEUE) {
267 			rsi_dbg(DATA_TX_ZONE, "%s: No More Pkt\n", __func__);
268 			break;
269 		}
270 
271 		mutex_lock(&common->tx_rxlock);
272 
273 		status = adapter->check_hw_queue_status(adapter, q_num);
274 		if ((status <= 0)) {
275 			mutex_unlock(&common->tx_rxlock);
276 			break;
277 		}
278 
279 		if ((q_num < MGMT_SOFT_Q) &&
280 		    ((skb_queue_len(&common->tx_queue[q_num])) <=
281 		      MIN_DATA_QUEUE_WATER_MARK)) {
282 			if (ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
283 				ieee80211_wake_queue(adapter->hw,
284 						     WME_AC(q_num));
285 		}
286 
287 		skb = rsi_core_dequeue_pkt(common, q_num);
288 		if (skb == NULL) {
289 			rsi_dbg(ERR_ZONE, "skb null\n");
290 			mutex_unlock(&common->tx_rxlock);
291 			break;
292 		}
293 
294 		if (q_num == MGMT_SOFT_Q)
295 			status = rsi_send_mgmt_pkt(common, skb);
296 		else
297 			status = rsi_send_data_pkt(common, skb);
298 
299 		if (status) {
300 			mutex_unlock(&common->tx_rxlock);
301 			break;
302 		}
303 
304 		common->tx_stats.total_tx_pkt_send[q_num]++;
305 
306 		tstamp_2 = jiffies;
307 		mutex_unlock(&common->tx_rxlock);
308 
309 		if (tstamp_2 > tstamp_1 + (300 * HZ / 1000))
310 			schedule();
311 	}
312 }
313 
314 /**
315  * rsi_core_xmit() - This function transmits the packets received from mac80211
316  * @common: Pointer to the driver private structure.
317  * @skb: Pointer to the socket buffer structure.
318  *
319  * Return: None.
320  */
321 void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
322 {
323 	struct rsi_hw *adapter = common->priv;
324 	struct ieee80211_tx_info *info;
325 	struct skb_info *tx_params;
326 	struct ieee80211_hdr *tmp_hdr = NULL;
327 	u8 q_num, tid = 0;
328 
329 	if ((!skb) || (!skb->len)) {
330 		rsi_dbg(ERR_ZONE, "%s: Null skb/zero Length packet\n",
331 			__func__);
332 		goto xmit_fail;
333 	}
334 	info = IEEE80211_SKB_CB(skb);
335 	tx_params = (struct skb_info *)info->driver_data;
336 	tmp_hdr = (struct ieee80211_hdr *)&skb->data[0];
337 
338 	if (common->fsm_state != FSM_MAC_INIT_DONE) {
339 		rsi_dbg(ERR_ZONE, "%s: FSM state not open\n", __func__);
340 		goto xmit_fail;
341 	}
342 
343 	if ((ieee80211_is_mgmt(tmp_hdr->frame_control)) ||
344 	    (ieee80211_is_ctl(tmp_hdr->frame_control)) ||
345 	    (ieee80211_is_qos_nullfunc(tmp_hdr->frame_control))) {
346 		q_num = MGMT_SOFT_Q;
347 		skb->priority = q_num;
348 	} else {
349 		if (ieee80211_is_data_qos(tmp_hdr->frame_control)) {
350 			tid = (skb->data[24] & IEEE80211_QOS_TID);
351 			skb->priority = TID_TO_WME_AC(tid);
352 		} else {
353 			tid = IEEE80211_NONQOS_TID;
354 			skb->priority = BE_Q;
355 		}
356 		q_num = skb->priority;
357 		tx_params->tid = tid;
358 		tx_params->sta_id = 0;
359 	}
360 
361 	if ((q_num != MGMT_SOFT_Q) &&
362 	    ((skb_queue_len(&common->tx_queue[q_num]) + 1) >=
363 	     DATA_QUEUE_WATER_MARK)) {
364 		rsi_dbg(ERR_ZONE, "%s: sw queue full\n", __func__);
365 		if (!ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
366 			ieee80211_stop_queue(adapter->hw, WME_AC(q_num));
367 		rsi_set_event(&common->tx_thread.event);
368 		goto xmit_fail;
369 	}
370 
371 	rsi_core_queue_pkt(common, skb);
372 	rsi_dbg(DATA_TX_ZONE, "%s: ===> Scheduling TX thead <===\n", __func__);
373 	rsi_set_event(&common->tx_thread.event);
374 
375 	return;
376 
377 xmit_fail:
378 	rsi_dbg(ERR_ZONE, "%s: Failed to queue packet\n", __func__);
379 	/* Dropping pkt here */
380 	ieee80211_free_txskb(common->priv->hw, skb);
381 }
382