Loading...
1/**
2 * Copyright (c) 2014 Redpine Signals Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "rsi_mgmt.h"
18#include "rsi_common.h"
19#include "rsi_hal.h"
20#include "rsi_coex.h"
21
22/**
23 * rsi_determine_min_weight_queue() - This function determines the queue with
24 * the min weight.
25 * @common: Pointer to the driver private structure.
26 *
27 * Return: q_num: Corresponding queue number.
28 */
29static u8 rsi_determine_min_weight_queue(struct rsi_common *common)
30{
31 struct wmm_qinfo *tx_qinfo = common->tx_qinfo;
32 u32 q_len = 0;
33 u8 ii = 0;
34
35 for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) {
36 q_len = skb_queue_len(&common->tx_queue[ii]);
37 if ((tx_qinfo[ii].pkt_contended) && q_len) {
38 common->min_weight = tx_qinfo[ii].weight;
39 break;
40 }
41 }
42 return ii;
43}
44
45/**
46 * rsi_recalculate_weights() - This function recalculates the weights
47 * corresponding to each queue.
48 * @common: Pointer to the driver private structure.
49 *
50 * Return: recontend_queue bool variable
51 */
52static bool rsi_recalculate_weights(struct rsi_common *common)
53{
54 struct wmm_qinfo *tx_qinfo = common->tx_qinfo;
55 bool recontend_queue = false;
56 u8 ii = 0;
57 u32 q_len = 0;
58
59 for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) {
60 q_len = skb_queue_len(&common->tx_queue[ii]);
61 /* Check for the need of contention */
62 if (q_len) {
63 if (tx_qinfo[ii].pkt_contended) {
64 tx_qinfo[ii].weight =
65 ((tx_qinfo[ii].weight > common->min_weight) ?
66 tx_qinfo[ii].weight - common->min_weight : 0);
67 } else {
68 tx_qinfo[ii].pkt_contended = 1;
69 tx_qinfo[ii].weight = tx_qinfo[ii].wme_params;
70 recontend_queue = true;
71 }
72 } else { /* No packets so no contention */
73 tx_qinfo[ii].weight = 0;
74 tx_qinfo[ii].pkt_contended = 0;
75 }
76 }
77
78 return recontend_queue;
79}
80
81/**
82 * rsi_get_num_pkts_dequeue() - This function determines the number of
83 * packets to be dequeued based on the number
84 * of bytes calculated using txop.
85 *
86 * @common: Pointer to the driver private structure.
87 * @q_num: the queue from which pkts have to be dequeued
88 *
89 * Return: pkt_num: Number of pkts to be dequeued.
90 */
91static u32 rsi_get_num_pkts_dequeue(struct rsi_common *common, u8 q_num)
92{
93 struct rsi_hw *adapter = common->priv;
94 struct sk_buff *skb;
95 u32 pkt_cnt = 0;
96 s16 txop = common->tx_qinfo[q_num].txop * 32;
97 __le16 r_txop;
98 struct ieee80211_rate rate;
99 struct ieee80211_hdr *wh;
100 struct ieee80211_vif *vif;
101
102 rate.bitrate = RSI_RATE_MCS0 * 5 * 10; /* Convert to Kbps */
103 if (q_num == VI_Q)
104 txop = ((txop << 5) / 80);
105
106 if (skb_queue_len(&common->tx_queue[q_num]))
107 skb = skb_peek(&common->tx_queue[q_num]);
108 else
109 return 0;
110
111 do {
112 wh = (struct ieee80211_hdr *)skb->data;
113 vif = rsi_get_vif(adapter, wh->addr2);
114 r_txop = ieee80211_generic_frame_duration(adapter->hw,
115 vif,
116 common->band,
117 skb->len, &rate);
118 txop -= le16_to_cpu(r_txop);
119 pkt_cnt += 1;
120 /*checking if pkts are still there*/
121 if (skb_queue_len(&common->tx_queue[q_num]) - pkt_cnt)
122 skb = skb->next;
123 else
124 break;
125
126 } while (txop > 0);
127
128 return pkt_cnt;
129}
130
131/**
132 * rsi_core_determine_hal_queue() - This function determines the queue from
133 * which packet has to be dequeued.
134 * @common: Pointer to the driver private structure.
135 *
136 * Return: q_num: Corresponding queue number on success.
137 */
138static u8 rsi_core_determine_hal_queue(struct rsi_common *common)
139{
140 bool recontend_queue = false;
141 u32 q_len = 0;
142 u8 q_num = INVALID_QUEUE;
143 u8 ii = 0;
144
145 if (skb_queue_len(&common->tx_queue[MGMT_BEACON_Q])) {
146 q_num = MGMT_BEACON_Q;
147 return q_num;
148 }
149 if (skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])) {
150 if (!common->mgmt_q_block)
151 q_num = MGMT_SOFT_Q;
152 return q_num;
153 }
154
155 if (common->hw_data_qs_blocked)
156 return q_num;
157
158 if (common->pkt_cnt != 0) {
159 --common->pkt_cnt;
160 return common->selected_qnum;
161 }
162
163get_queue_num:
164 recontend_queue = false;
165
166 q_num = rsi_determine_min_weight_queue(common);
167
168 ii = q_num;
169
170 /* Selecting the queue with least back off */
171 for (; ii < NUM_EDCA_QUEUES; ii++) {
172 q_len = skb_queue_len(&common->tx_queue[ii]);
173 if (((common->tx_qinfo[ii].pkt_contended) &&
174 (common->tx_qinfo[ii].weight < common->min_weight)) &&
175 q_len) {
176 common->min_weight = common->tx_qinfo[ii].weight;
177 q_num = ii;
178 }
179 }
180
181 if (q_num < NUM_EDCA_QUEUES)
182 common->tx_qinfo[q_num].pkt_contended = 0;
183
184 /* Adjust the back off values for all queues again */
185 recontend_queue = rsi_recalculate_weights(common);
186
187 q_len = skb_queue_len(&common->tx_queue[q_num]);
188 if (!q_len) {
189 /* If any queues are freshly contended and the selected queue
190 * doesn't have any packets
191 * then get the queue number again with fresh values
192 */
193 if (recontend_queue)
194 goto get_queue_num;
195
196 q_num = INVALID_QUEUE;
197 return q_num;
198 }
199
200 common->selected_qnum = q_num;
201 q_len = skb_queue_len(&common->tx_queue[q_num]);
202
203 if (q_num == VO_Q || q_num == VI_Q) {
204 common->pkt_cnt = rsi_get_num_pkts_dequeue(common, q_num);
205 common->pkt_cnt -= 1;
206 }
207
208 return q_num;
209}
210
211/**
212 * rsi_core_queue_pkt() - This functions enqueues the packet to the queue
213 * specified by the queue number.
214 * @common: Pointer to the driver private structure.
215 * @skb: Pointer to the socket buffer structure.
216 *
217 * Return: None.
218 */
219static void rsi_core_queue_pkt(struct rsi_common *common,
220 struct sk_buff *skb)
221{
222 u8 q_num = skb->priority;
223 if (q_num >= NUM_SOFT_QUEUES) {
224 rsi_dbg(ERR_ZONE, "%s: Invalid Queue Number: q_num = %d\n",
225 __func__, q_num);
226 dev_kfree_skb(skb);
227 return;
228 }
229
230 skb_queue_tail(&common->tx_queue[q_num], skb);
231}
232
233/**
234 * rsi_core_dequeue_pkt() - This functions dequeues the packet from the queue
235 * specified by the queue number.
236 * @common: Pointer to the driver private structure.
237 * @q_num: Queue number.
238 *
239 * Return: Pointer to sk_buff structure.
240 */
241static struct sk_buff *rsi_core_dequeue_pkt(struct rsi_common *common,
242 u8 q_num)
243{
244 if (q_num >= NUM_SOFT_QUEUES) {
245 rsi_dbg(ERR_ZONE, "%s: Invalid Queue Number: q_num = %d\n",
246 __func__, q_num);
247 return NULL;
248 }
249
250 return skb_dequeue(&common->tx_queue[q_num]);
251}
252
253/**
254 * rsi_core_qos_processor() - This function is used to determine the wmm queue
255 * based on the backoff procedure. Data packets are
256 * dequeued from the selected hal queue and sent to
257 * the below layers.
258 * @common: Pointer to the driver private structure.
259 *
260 * Return: None.
261 */
262void rsi_core_qos_processor(struct rsi_common *common)
263{
264 struct rsi_hw *adapter = common->priv;
265 struct sk_buff *skb;
266 unsigned long tstamp_1, tstamp_2;
267 u8 q_num;
268 int status;
269
270 tstamp_1 = jiffies;
271 while (1) {
272 q_num = rsi_core_determine_hal_queue(common);
273 rsi_dbg(DATA_TX_ZONE,
274 "%s: Queue number = %d\n", __func__, q_num);
275
276 if (q_num == INVALID_QUEUE) {
277 rsi_dbg(DATA_TX_ZONE, "%s: No More Pkt\n", __func__);
278 break;
279 }
280 if (common->hibernate_resume)
281 break;
282
283 mutex_lock(&common->tx_lock);
284
285 status = adapter->check_hw_queue_status(adapter, q_num);
286 if ((status <= 0)) {
287 mutex_unlock(&common->tx_lock);
288 break;
289 }
290
291 if ((q_num < MGMT_SOFT_Q) &&
292 ((skb_queue_len(&common->tx_queue[q_num])) <=
293 MIN_DATA_QUEUE_WATER_MARK)) {
294 if (ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
295 ieee80211_wake_queue(adapter->hw,
296 WME_AC(q_num));
297 }
298
299 skb = rsi_core_dequeue_pkt(common, q_num);
300 if (skb == NULL) {
301 rsi_dbg(ERR_ZONE, "skb null\n");
302 mutex_unlock(&common->tx_lock);
303 break;
304 }
305 if (q_num == MGMT_BEACON_Q) {
306 status = rsi_send_pkt_to_bus(common, skb);
307 dev_kfree_skb(skb);
308 } else {
309#ifdef CONFIG_RSI_COEX
310 if (common->coex_mode > 1) {
311 status = rsi_coex_send_pkt(common, skb,
312 RSI_WLAN_Q);
313 } else {
314#endif
315 if (q_num == MGMT_SOFT_Q)
316 status = rsi_send_mgmt_pkt(common, skb);
317 else
318 status = rsi_send_data_pkt(common, skb);
319#ifdef CONFIG_RSI_COEX
320 }
321#endif
322 }
323
324 if (status) {
325 mutex_unlock(&common->tx_lock);
326 break;
327 }
328
329 common->tx_stats.total_tx_pkt_send[q_num]++;
330
331 tstamp_2 = jiffies;
332 mutex_unlock(&common->tx_lock);
333
334 if (time_after(tstamp_2, tstamp_1 + (300 * HZ) / 1000))
335 schedule();
336 }
337}
338
339struct rsi_sta *rsi_find_sta(struct rsi_common *common, u8 *mac_addr)
340{
341 int i;
342
343 for (i = 0; i < common->max_stations; i++) {
344 if (!common->stations[i].sta)
345 continue;
346 if (!(memcmp(common->stations[i].sta->addr,
347 mac_addr, ETH_ALEN)))
348 return &common->stations[i];
349 }
350 return NULL;
351}
352
353struct ieee80211_vif *rsi_get_vif(struct rsi_hw *adapter, u8 *mac)
354{
355 struct ieee80211_vif *vif;
356 int i;
357
358 for (i = 0; i < RSI_MAX_VIFS; i++) {
359 vif = adapter->vifs[i];
360 if (!vif)
361 continue;
362 if (!memcmp(vif->addr, mac, ETH_ALEN))
363 return vif;
364 }
365 return NULL;
366}
367
368/**
369 * rsi_core_xmit() - This function transmits the packets received from mac80211
370 * @common: Pointer to the driver private structure.
371 * @skb: Pointer to the socket buffer structure.
372 *
373 * Return: None.
374 */
375void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
376{
377 struct rsi_hw *adapter = common->priv;
378 struct ieee80211_tx_info *info;
379 struct skb_info *tx_params;
380 struct ieee80211_hdr *wh = NULL;
381 struct ieee80211_vif *vif;
382 u8 q_num, tid = 0;
383 struct rsi_sta *rsta = NULL;
384
385 if ((!skb) || (!skb->len)) {
386 rsi_dbg(ERR_ZONE, "%s: Null skb/zero Length packet\n",
387 __func__);
388 goto xmit_fail;
389 }
390 if (common->fsm_state != FSM_MAC_INIT_DONE) {
391 rsi_dbg(ERR_ZONE, "%s: FSM state not open\n", __func__);
392 goto xmit_fail;
393 }
394 if (common->wow_flags & RSI_WOW_ENABLED) {
395 rsi_dbg(ERR_ZONE,
396 "%s: Blocking Tx_packets when WOWLAN is enabled\n",
397 __func__);
398 goto xmit_fail;
399 }
400
401 info = IEEE80211_SKB_CB(skb);
402 tx_params = (struct skb_info *)info->driver_data;
403 wh = (struct ieee80211_hdr *)&skb->data[0];
404 tx_params->sta_id = 0;
405
406 vif = rsi_get_vif(adapter, wh->addr2);
407 if (!vif)
408 goto xmit_fail;
409 tx_params->vif = vif;
410 tx_params->vap_id = ((struct vif_priv *)vif->drv_priv)->vap_id;
411 if ((ieee80211_is_mgmt(wh->frame_control)) ||
412 (ieee80211_is_ctl(wh->frame_control)) ||
413 (ieee80211_is_qos_nullfunc(wh->frame_control))) {
414 q_num = MGMT_SOFT_Q;
415 skb->priority = q_num;
416 } else {
417 if (ieee80211_is_data_qos(wh->frame_control)) {
418 tid = (skb->data[24] & IEEE80211_QOS_TID);
419 skb->priority = TID_TO_WME_AC(tid);
420 } else {
421 tid = IEEE80211_NONQOS_TID;
422 skb->priority = BE_Q;
423 }
424
425 q_num = skb->priority;
426 tx_params->tid = tid;
427
428 if (((vif->type == NL80211_IFTYPE_AP) ||
429 (vif->type == NL80211_IFTYPE_P2P_GO)) &&
430 (!is_broadcast_ether_addr(wh->addr1)) &&
431 (!is_multicast_ether_addr(wh->addr1))) {
432 rsta = rsi_find_sta(common, wh->addr1);
433 if (!rsta)
434 goto xmit_fail;
435 tx_params->sta_id = rsta->sta_id;
436 }
437
438 if (rsta) {
439 /* Start aggregation if not done for this tid */
440 if (!rsta->start_tx_aggr[tid]) {
441 rsta->start_tx_aggr[tid] = true;
442 ieee80211_start_tx_ba_session(rsta->sta,
443 tid, 0);
444 }
445 }
446 }
447
448 if ((q_num < MGMT_SOFT_Q) &&
449 ((skb_queue_len(&common->tx_queue[q_num]) + 1) >=
450 DATA_QUEUE_WATER_MARK)) {
451 rsi_dbg(ERR_ZONE, "%s: sw queue full\n", __func__);
452 if (!ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
453 ieee80211_stop_queue(adapter->hw, WME_AC(q_num));
454 rsi_set_event(&common->tx_thread.event);
455 goto xmit_fail;
456 }
457
458 rsi_core_queue_pkt(common, skb);
459 rsi_dbg(DATA_TX_ZONE, "%s: ===> Scheduling TX thead <===\n", __func__);
460 rsi_set_event(&common->tx_thread.event);
461
462 return;
463
464xmit_fail:
465 rsi_dbg(ERR_ZONE, "%s: Failed to queue packet\n", __func__);
466 /* Dropping pkt here */
467 ieee80211_free_txskb(common->priv->hw, skb);
468}
1/**
2 * Copyright (c) 2014 Redpine Signals Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "rsi_mgmt.h"
18#include "rsi_common.h"
19
20/**
21 * rsi_determine_min_weight_queue() - This function determines the queue with
22 * the min weight.
23 * @common: Pointer to the driver private structure.
24 *
25 * Return: q_num: Corresponding queue number.
26 */
27static u8 rsi_determine_min_weight_queue(struct rsi_common *common)
28{
29 struct wmm_qinfo *tx_qinfo = common->tx_qinfo;
30 u32 q_len = 0;
31 u8 ii = 0;
32
33 for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) {
34 q_len = skb_queue_len(&common->tx_queue[ii]);
35 if ((tx_qinfo[ii].pkt_contended) && q_len) {
36 common->min_weight = tx_qinfo[ii].weight;
37 break;
38 }
39 }
40 return ii;
41}
42
43/**
44 * rsi_recalculate_weights() - This function recalculates the weights
45 * corresponding to each queue.
46 * @common: Pointer to the driver private structure.
47 *
48 * Return: recontend_queue bool variable
49 */
50static bool rsi_recalculate_weights(struct rsi_common *common)
51{
52 struct wmm_qinfo *tx_qinfo = common->tx_qinfo;
53 bool recontend_queue = false;
54 u8 ii = 0;
55 u32 q_len = 0;
56
57 for (ii = 0; ii < NUM_EDCA_QUEUES; ii++) {
58 q_len = skb_queue_len(&common->tx_queue[ii]);
59 /* Check for the need of contention */
60 if (q_len) {
61 if (tx_qinfo[ii].pkt_contended) {
62 tx_qinfo[ii].weight =
63 ((tx_qinfo[ii].weight > common->min_weight) ?
64 tx_qinfo[ii].weight - common->min_weight : 0);
65 } else {
66 tx_qinfo[ii].pkt_contended = 1;
67 tx_qinfo[ii].weight = tx_qinfo[ii].wme_params;
68 recontend_queue = true;
69 }
70 } else { /* No packets so no contention */
71 tx_qinfo[ii].weight = 0;
72 tx_qinfo[ii].pkt_contended = 0;
73 }
74 }
75
76 return recontend_queue;
77}
78
79/**
80 * rsi_get_num_pkts_dequeue() - This function determines the number of
81 * packets to be dequeued based on the number
82 * of bytes calculated using txop.
83 *
84 * @common: Pointer to the driver private structure.
85 * @q_num: the queue from which pkts have to be dequeued
86 *
87 * Return: pkt_num: Number of pkts to be dequeued.
88 */
89static u32 rsi_get_num_pkts_dequeue(struct rsi_common *common, u8 q_num)
90{
91 struct rsi_hw *adapter = common->priv;
92 struct sk_buff *skb;
93 u32 pkt_cnt = 0;
94 s16 txop = common->tx_qinfo[q_num].txop * 32;
95 __le16 r_txop;
96 struct ieee80211_rate rate;
97
98 rate.bitrate = RSI_RATE_MCS0 * 5 * 10; /* Convert to Kbps */
99 if (q_num == VI_Q)
100 txop = ((txop << 5) / 80);
101
102 if (skb_queue_len(&common->tx_queue[q_num]))
103 skb = skb_peek(&common->tx_queue[q_num]);
104 else
105 return 0;
106
107 do {
108 r_txop = ieee80211_generic_frame_duration(adapter->hw,
109 adapter->vifs[0],
110 common->band,
111 skb->len, &rate);
112 txop -= le16_to_cpu(r_txop);
113 pkt_cnt += 1;
114 /*checking if pkts are still there*/
115 if (skb_queue_len(&common->tx_queue[q_num]) - pkt_cnt)
116 skb = skb->next;
117 else
118 break;
119
120 } while (txop > 0);
121
122 return pkt_cnt;
123}
124
125/**
126 * rsi_core_determine_hal_queue() - This function determines the queue from
127 * which packet has to be dequeued.
128 * @common: Pointer to the driver private structure.
129 *
130 * Return: q_num: Corresponding queue number on success.
131 */
132static u8 rsi_core_determine_hal_queue(struct rsi_common *common)
133{
134 bool recontend_queue = false;
135 u32 q_len = 0;
136 u8 q_num = INVALID_QUEUE;
137 u8 ii = 0;
138
139 if (skb_queue_len(&common->tx_queue[MGMT_SOFT_Q])) {
140 if (!common->mgmt_q_block)
141 q_num = MGMT_SOFT_Q;
142 return q_num;
143 }
144
145 if (common->hw_data_qs_blocked)
146 return q_num;
147
148 if (common->pkt_cnt != 0) {
149 --common->pkt_cnt;
150 return common->selected_qnum;
151 }
152
153get_queue_num:
154 recontend_queue = false;
155
156 q_num = rsi_determine_min_weight_queue(common);
157
158 ii = q_num;
159
160 /* Selecting the queue with least back off */
161 for (; ii < NUM_EDCA_QUEUES; ii++) {
162 q_len = skb_queue_len(&common->tx_queue[ii]);
163 if (((common->tx_qinfo[ii].pkt_contended) &&
164 (common->tx_qinfo[ii].weight < common->min_weight)) &&
165 q_len) {
166 common->min_weight = common->tx_qinfo[ii].weight;
167 q_num = ii;
168 }
169 }
170
171 if (q_num < NUM_EDCA_QUEUES)
172 common->tx_qinfo[q_num].pkt_contended = 0;
173
174 /* Adjust the back off values for all queues again */
175 recontend_queue = rsi_recalculate_weights(common);
176
177 q_len = skb_queue_len(&common->tx_queue[q_num]);
178 if (!q_len) {
179 /* If any queues are freshly contended and the selected queue
180 * doesn't have any packets
181 * then get the queue number again with fresh values
182 */
183 if (recontend_queue)
184 goto get_queue_num;
185
186 q_num = INVALID_QUEUE;
187 return q_num;
188 }
189
190 common->selected_qnum = q_num;
191 q_len = skb_queue_len(&common->tx_queue[q_num]);
192
193 if (q_num == VO_Q || q_num == VI_Q) {
194 common->pkt_cnt = rsi_get_num_pkts_dequeue(common, q_num);
195 common->pkt_cnt -= 1;
196 }
197
198 return q_num;
199}
200
201/**
202 * rsi_core_queue_pkt() - This functions enqueues the packet to the queue
203 * specified by the queue number.
204 * @common: Pointer to the driver private structure.
205 * @skb: Pointer to the socket buffer structure.
206 *
207 * Return: None.
208 */
209static void rsi_core_queue_pkt(struct rsi_common *common,
210 struct sk_buff *skb)
211{
212 u8 q_num = skb->priority;
213 if (q_num >= NUM_SOFT_QUEUES) {
214 rsi_dbg(ERR_ZONE, "%s: Invalid Queue Number: q_num = %d\n",
215 __func__, q_num);
216 dev_kfree_skb(skb);
217 return;
218 }
219
220 skb_queue_tail(&common->tx_queue[q_num], skb);
221}
222
223/**
224 * rsi_core_dequeue_pkt() - This functions dequeues the packet from the queue
225 * specified by the queue number.
226 * @common: Pointer to the driver private structure.
227 * @q_num: Queue number.
228 *
229 * Return: Pointer to sk_buff structure.
230 */
231static struct sk_buff *rsi_core_dequeue_pkt(struct rsi_common *common,
232 u8 q_num)
233{
234 if (q_num >= NUM_SOFT_QUEUES) {
235 rsi_dbg(ERR_ZONE, "%s: Invalid Queue Number: q_num = %d\n",
236 __func__, q_num);
237 return NULL;
238 }
239
240 return skb_dequeue(&common->tx_queue[q_num]);
241}
242
243/**
244 * rsi_core_qos_processor() - This function is used to determine the wmm queue
245 * based on the backoff procedure. Data packets are
246 * dequeued from the selected hal queue and sent to
247 * the below layers.
248 * @common: Pointer to the driver private structure.
249 *
250 * Return: None.
251 */
252void rsi_core_qos_processor(struct rsi_common *common)
253{
254 struct rsi_hw *adapter = common->priv;
255 struct sk_buff *skb;
256 unsigned long tstamp_1, tstamp_2;
257 u8 q_num;
258 int status;
259
260 tstamp_1 = jiffies;
261 while (1) {
262 q_num = rsi_core_determine_hal_queue(common);
263 rsi_dbg(DATA_TX_ZONE,
264 "%s: Queue number = %d\n", __func__, q_num);
265
266 if (q_num == INVALID_QUEUE) {
267 rsi_dbg(DATA_TX_ZONE, "%s: No More Pkt\n", __func__);
268 break;
269 }
270
271 mutex_lock(&common->tx_rxlock);
272
273 status = adapter->check_hw_queue_status(adapter, q_num);
274 if ((status <= 0)) {
275 mutex_unlock(&common->tx_rxlock);
276 break;
277 }
278
279 if ((q_num < MGMT_SOFT_Q) &&
280 ((skb_queue_len(&common->tx_queue[q_num])) <=
281 MIN_DATA_QUEUE_WATER_MARK)) {
282 if (ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
283 ieee80211_wake_queue(adapter->hw,
284 WME_AC(q_num));
285 }
286
287 skb = rsi_core_dequeue_pkt(common, q_num);
288 if (skb == NULL) {
289 rsi_dbg(ERR_ZONE, "skb null\n");
290 mutex_unlock(&common->tx_rxlock);
291 break;
292 }
293
294 if (q_num == MGMT_SOFT_Q)
295 status = rsi_send_mgmt_pkt(common, skb);
296 else
297 status = rsi_send_data_pkt(common, skb);
298
299 if (status) {
300 mutex_unlock(&common->tx_rxlock);
301 break;
302 }
303
304 common->tx_stats.total_tx_pkt_send[q_num]++;
305
306 tstamp_2 = jiffies;
307 mutex_unlock(&common->tx_rxlock);
308
309 if (tstamp_2 > tstamp_1 + (300 * HZ / 1000))
310 schedule();
311 }
312}
313
314/**
315 * rsi_core_xmit() - This function transmits the packets received from mac80211
316 * @common: Pointer to the driver private structure.
317 * @skb: Pointer to the socket buffer structure.
318 *
319 * Return: None.
320 */
321void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
322{
323 struct rsi_hw *adapter = common->priv;
324 struct ieee80211_tx_info *info;
325 struct skb_info *tx_params;
326 struct ieee80211_hdr *tmp_hdr = NULL;
327 u8 q_num, tid = 0;
328
329 if ((!skb) || (!skb->len)) {
330 rsi_dbg(ERR_ZONE, "%s: Null skb/zero Length packet\n",
331 __func__);
332 goto xmit_fail;
333 }
334 info = IEEE80211_SKB_CB(skb);
335 tx_params = (struct skb_info *)info->driver_data;
336 tmp_hdr = (struct ieee80211_hdr *)&skb->data[0];
337
338 if (common->fsm_state != FSM_MAC_INIT_DONE) {
339 rsi_dbg(ERR_ZONE, "%s: FSM state not open\n", __func__);
340 goto xmit_fail;
341 }
342
343 if ((ieee80211_is_mgmt(tmp_hdr->frame_control)) ||
344 (ieee80211_is_ctl(tmp_hdr->frame_control)) ||
345 (ieee80211_is_qos_nullfunc(tmp_hdr->frame_control))) {
346 q_num = MGMT_SOFT_Q;
347 skb->priority = q_num;
348 } else {
349 if (ieee80211_is_data_qos(tmp_hdr->frame_control)) {
350 tid = (skb->data[24] & IEEE80211_QOS_TID);
351 skb->priority = TID_TO_WME_AC(tid);
352 } else {
353 tid = IEEE80211_NONQOS_TID;
354 skb->priority = BE_Q;
355 }
356 q_num = skb->priority;
357 tx_params->tid = tid;
358 tx_params->sta_id = 0;
359 }
360
361 if ((q_num != MGMT_SOFT_Q) &&
362 ((skb_queue_len(&common->tx_queue[q_num]) + 1) >=
363 DATA_QUEUE_WATER_MARK)) {
364 rsi_dbg(ERR_ZONE, "%s: sw queue full\n", __func__);
365 if (!ieee80211_queue_stopped(adapter->hw, WME_AC(q_num)))
366 ieee80211_stop_queue(adapter->hw, WME_AC(q_num));
367 rsi_set_event(&common->tx_thread.event);
368 goto xmit_fail;
369 }
370
371 rsi_core_queue_pkt(common, skb);
372 rsi_dbg(DATA_TX_ZONE, "%s: ===> Scheduling TX thead <===\n", __func__);
373 rsi_set_event(&common->tx_thread.event);
374
375 return;
376
377xmit_fail:
378 rsi_dbg(ERR_ZONE, "%s: Failed to queue packet\n", __func__);
379 /* Dropping pkt here */
380 ieee80211_free_txskb(common->priv->hw, skb);
381}