Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * NXP Wireless LAN device driver: WMM
   4 *
   5 * Copyright 2011-2020 NXP
   6 */
   7
   8#include "decl.h"
   9#include "ioctl.h"
  10#include "util.h"
  11#include "fw.h"
  12#include "main.h"
  13#include "wmm.h"
  14#include "11n.h"
  15
  16
  17/* Maximum value FW can accept for driver delay in packet transmission */
  18#define DRV_PKT_DELAY_TO_FW_MAX   512
  19
  20
  21#define WMM_QUEUED_PACKET_LOWER_LIMIT   180
  22
  23#define WMM_QUEUED_PACKET_UPPER_LIMIT   200
  24
  25/* Offset for TOS field in the IP header */
  26#define IPTOS_OFFSET 5
  27
  28static bool disable_tx_amsdu;
  29module_param(disable_tx_amsdu, bool, 0644);
  30
  31/* This table inverses the tos_to_tid operation to get a priority
  32 * which is in sequential order, and can be compared.
  33 * Use this to compare the priority of two different TIDs.
  34 */
  35const u8 tos_to_tid_inv[] = {
  36	0x02,  /* from tos_to_tid[2] = 0 */
  37	0x00,  /* from tos_to_tid[0] = 1 */
  38	0x01,  /* from tos_to_tid[1] = 2 */
  39	0x03,
  40	0x04,
  41	0x05,
  42	0x06,
  43	0x07
  44};
  45
  46/* WMM information IE */
  47static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07,
  48	0x00, 0x50, 0xf2, 0x02,
  49	0x00, 0x01, 0x00
  50};
  51
  52static const u8 wmm_aci_to_qidx_map[] = { WMM_AC_BE,
  53	WMM_AC_BK,
  54	WMM_AC_VI,
  55	WMM_AC_VO
  56};
  57
  58static u8 tos_to_tid[] = {
  59	/* TID DSCP_P2 DSCP_P1 DSCP_P0 WMM_AC */
  60	0x01,			/* 0 1 0 AC_BK */
  61	0x02,			/* 0 0 0 AC_BK */
  62	0x00,			/* 0 0 1 AC_BE */
  63	0x03,			/* 0 1 1 AC_BE */
  64	0x04,			/* 1 0 0 AC_VI */
  65	0x05,			/* 1 0 1 AC_VI */
  66	0x06,			/* 1 1 0 AC_VO */
  67	0x07			/* 1 1 1 AC_VO */
  68};
  69
  70static u8 ac_to_tid[4][2] = { {1, 2}, {0, 3}, {4, 5}, {6, 7} };
  71
  72/*
  73 * This function debug prints the priority parameters for a WMM AC.
  74 */
  75static void
  76mwifiex_wmm_ac_debug_print(const struct ieee_types_wmm_ac_parameters *ac_param)
  77{
  78	const char *ac_str[] = { "BK", "BE", "VI", "VO" };
  79
  80	pr_debug("info: WMM AC_%s: ACI=%d, ACM=%d, Aifsn=%d, "
  81		 "EcwMin=%d, EcwMax=%d, TxopLimit=%d\n",
  82		 ac_str[wmm_aci_to_qidx_map[(ac_param->aci_aifsn_bitmap
  83					     & MWIFIEX_ACI) >> 5]],
  84		 (ac_param->aci_aifsn_bitmap & MWIFIEX_ACI) >> 5,
  85		 (ac_param->aci_aifsn_bitmap & MWIFIEX_ACM) >> 4,
  86		 ac_param->aci_aifsn_bitmap & MWIFIEX_AIFSN,
  87		 ac_param->ecw_bitmap & MWIFIEX_ECW_MIN,
  88		 (ac_param->ecw_bitmap & MWIFIEX_ECW_MAX) >> 4,
  89		 le16_to_cpu(ac_param->tx_op_limit));
  90}
  91
  92/*
  93 * This function allocates a route address list.
  94 *
  95 * The function also initializes the list with the provided RA.
  96 */
  97static struct mwifiex_ra_list_tbl *
  98mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, const u8 *ra)
  99{
 100	struct mwifiex_ra_list_tbl *ra_list;
 101
 102	ra_list = kzalloc(sizeof(struct mwifiex_ra_list_tbl), GFP_ATOMIC);
 103	if (!ra_list)
 104		return NULL;
 105
 106	INIT_LIST_HEAD(&ra_list->list);
 107	skb_queue_head_init(&ra_list->skb_head);
 108
 109	memcpy(ra_list->ra, ra, ETH_ALEN);
 110
 111	ra_list->total_pkt_count = 0;
 112
 113	mwifiex_dbg(adapter, INFO, "info: allocated ra_list %p\n", ra_list);
 114
 115	return ra_list;
 116}
 117
 118/* This function returns random no between 16 and 32 to be used as threshold
 119 * for no of packets after which BA setup is initiated.
 120 */
 121static u8 mwifiex_get_random_ba_threshold(void)
 122{
 123	u64 ns;
 124	/* setup ba_packet_threshold here random number between
 125	 * [BA_SETUP_PACKET_OFFSET,
 126	 * BA_SETUP_PACKET_OFFSET+BA_SETUP_MAX_PACKET_THRESHOLD-1]
 127	 */
 128	ns = ktime_get_ns();
 129	ns += (ns >> 32) + (ns >> 16);
 130
 131	return ((u8)ns % BA_SETUP_MAX_PACKET_THRESHOLD) + BA_SETUP_PACKET_OFFSET;
 132}
 133
 134/*
 135 * This function allocates and adds a RA list for all TIDs
 136 * with the given RA.
 137 */
 138void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
 139{
 140	int i;
 141	struct mwifiex_ra_list_tbl *ra_list;
 142	struct mwifiex_adapter *adapter = priv->adapter;
 143	struct mwifiex_sta_node *node;
 144
 145
 146	for (i = 0; i < MAX_NUM_TID; ++i) {
 147		ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra);
 148		mwifiex_dbg(adapter, INFO,
 149			    "info: created ra_list %p\n", ra_list);
 150
 151		if (!ra_list)
 152			break;
 153
 154		ra_list->is_11n_enabled = 0;
 155		ra_list->tdls_link = false;
 156		ra_list->ba_status = BA_SETUP_NONE;
 157		ra_list->amsdu_in_ampdu = false;
 158		if (!mwifiex_queuing_ra_based(priv)) {
 159			if (mwifiex_is_tdls_link_setup
 160				(mwifiex_get_tdls_link_status(priv, ra))) {
 161				ra_list->tdls_link = true;
 162				ra_list->is_11n_enabled =
 163					mwifiex_tdls_peer_11n_enabled(priv, ra);
 164			} else {
 165				ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
 166			}
 167		} else {
 168			spin_lock_bh(&priv->sta_list_spinlock);
 169			node = mwifiex_get_sta_entry(priv, ra);
 170			if (node)
 171				ra_list->tx_paused = node->tx_pause;
 172			ra_list->is_11n_enabled =
 173				      mwifiex_is_sta_11n_enabled(priv, node);
 174			if (ra_list->is_11n_enabled)
 175				ra_list->max_amsdu = node->max_amsdu;
 176			spin_unlock_bh(&priv->sta_list_spinlock);
 177		}
 178
 179		mwifiex_dbg(adapter, DATA, "data: ralist %p: is_11n_enabled=%d\n",
 180			    ra_list, ra_list->is_11n_enabled);
 181
 182		if (ra_list->is_11n_enabled) {
 183			ra_list->ba_pkt_count = 0;
 184			ra_list->ba_packet_thr =
 185					      mwifiex_get_random_ba_threshold();
 186		}
 187		list_add_tail(&ra_list->list,
 188			      &priv->wmm.tid_tbl_ptr[i].ra_list);
 189	}
 190}
 191
 192/*
 193 * This function sets the WMM queue priorities to their default values.
 194 */
 195static void mwifiex_wmm_default_queue_priorities(struct mwifiex_private *priv)
 196{
 197	/* Default queue priorities: VO->VI->BE->BK */
 198	priv->wmm.queue_priority[0] = WMM_AC_VO;
 199	priv->wmm.queue_priority[1] = WMM_AC_VI;
 200	priv->wmm.queue_priority[2] = WMM_AC_BE;
 201	priv->wmm.queue_priority[3] = WMM_AC_BK;
 202}
 203
 204/*
 205 * This function map ACs to TIDs.
 206 */
 207static void
 208mwifiex_wmm_queue_priorities_tid(struct mwifiex_private *priv)
 209{
 210	struct mwifiex_wmm_desc *wmm = &priv->wmm;
 211	u8 *queue_priority = wmm->queue_priority;
 212	int i;
 213
 214	for (i = 0; i < 4; ++i) {
 215		tos_to_tid[7 - (i * 2)] = ac_to_tid[queue_priority[i]][1];
 216		tos_to_tid[6 - (i * 2)] = ac_to_tid[queue_priority[i]][0];
 217	}
 218
 219	for (i = 0; i < MAX_NUM_TID; ++i)
 220		priv->tos_to_tid_inv[tos_to_tid[i]] = (u8)i;
 221
 222	atomic_set(&wmm->highest_queued_prio, HIGH_PRIO_TID);
 223}
 224
 225/*
 226 * This function initializes WMM priority queues.
 227 */
 228void
 229mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
 230				   struct ieee_types_wmm_parameter *wmm_ie)
 231{
 232	u16 cw_min, avg_back_off, tmp[4];
 233	u32 i, j, num_ac;
 234	u8 ac_idx;
 235
 236	if (!wmm_ie || !priv->wmm_enabled) {
 237		/* WMM is not enabled, just set the defaults and return */
 238		mwifiex_wmm_default_queue_priorities(priv);
 239		return;
 240	}
 241
 242	mwifiex_dbg(priv->adapter, INFO,
 243		    "info: WMM Parameter IE: version=%d,\t"
 244		    "qos_info Parameter Set Count=%d, Reserved=%#x\n",
 245		    wmm_ie->version, wmm_ie->qos_info_bitmap &
 246		    IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
 247		    wmm_ie->reserved);
 248
 249	for (num_ac = 0; num_ac < ARRAY_SIZE(wmm_ie->ac_params); num_ac++) {
 250		u8 ecw = wmm_ie->ac_params[num_ac].ecw_bitmap;
 251		u8 aci_aifsn = wmm_ie->ac_params[num_ac].aci_aifsn_bitmap;
 252		cw_min = (1 << (ecw & MWIFIEX_ECW_MIN)) - 1;
 253		avg_back_off = (cw_min >> 1) + (aci_aifsn & MWIFIEX_AIFSN);
 254
 255		ac_idx = wmm_aci_to_qidx_map[(aci_aifsn & MWIFIEX_ACI) >> 5];
 256		priv->wmm.queue_priority[ac_idx] = ac_idx;
 257		tmp[ac_idx] = avg_back_off;
 258
 259		mwifiex_dbg(priv->adapter, INFO,
 260			    "info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n",
 261			    (1 << ((ecw & MWIFIEX_ECW_MAX) >> 4)) - 1,
 262			    cw_min, avg_back_off);
 263		mwifiex_wmm_ac_debug_print(&wmm_ie->ac_params[num_ac]);
 264	}
 265
 266	/* Bubble sort */
 267	for (i = 0; i < num_ac; i++) {
 268		for (j = 1; j < num_ac - i; j++) {
 269			if (tmp[j - 1] > tmp[j]) {
 270				swap(tmp[j - 1], tmp[j]);
 271				swap(priv->wmm.queue_priority[j - 1],
 272				     priv->wmm.queue_priority[j]);
 273			} else if (tmp[j - 1] == tmp[j]) {
 274				if (priv->wmm.queue_priority[j - 1]
 275				    < priv->wmm.queue_priority[j])
 276					swap(priv->wmm.queue_priority[j - 1],
 277					     priv->wmm.queue_priority[j]);
 278			}
 279		}
 280	}
 281
 282	mwifiex_wmm_queue_priorities_tid(priv);
 283}
 284
 285/*
 286 * This function evaluates whether or not an AC is to be downgraded.
 287 *
 288 * In case the AC is not enabled, the highest AC is returned that is
 289 * enabled and does not require admission control.
 290 */
 291static enum mwifiex_wmm_ac_e
 292mwifiex_wmm_eval_downgrade_ac(struct mwifiex_private *priv,
 293			      enum mwifiex_wmm_ac_e eval_ac)
 294{
 295	int down_ac;
 296	enum mwifiex_wmm_ac_e ret_ac;
 297	struct mwifiex_wmm_ac_status *ac_status;
 298
 299	ac_status = &priv->wmm.ac_status[eval_ac];
 300
 301	if (!ac_status->disabled)
 302		/* Okay to use this AC, its enabled */
 303		return eval_ac;
 304
 305	/* Setup a default return value of the lowest priority */
 306	ret_ac = WMM_AC_BK;
 307
 308	/*
 309	 *  Find the highest AC that is enabled and does not require
 310	 *  admission control. The spec disallows downgrading to an AC,
 311	 *  which is enabled due to a completed admission control.
 312	 *  Unadmitted traffic is not to be sent on an AC with admitted
 313	 *  traffic.
 314	 */
 315	for (down_ac = WMM_AC_BK; down_ac < eval_ac; down_ac++) {
 316		ac_status = &priv->wmm.ac_status[down_ac];
 317
 318		if (!ac_status->disabled && !ac_status->flow_required)
 319			/* AC is enabled and does not require admission
 320			   control */
 321			ret_ac = (enum mwifiex_wmm_ac_e) down_ac;
 322	}
 323
 324	return ret_ac;
 325}
 326
 327/*
 328 * This function downgrades WMM priority queue.
 329 */
 330void
 331mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv)
 332{
 333	int ac_val;
 334
 335	mwifiex_dbg(priv->adapter, INFO, "info: WMM: AC Priorities:\t"
 336		    "BK(0), BE(1), VI(2), VO(3)\n");
 337
 338	if (!priv->wmm_enabled) {
 339		/* WMM is not enabled, default priorities */
 340		for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++)
 341			priv->wmm.ac_down_graded_vals[ac_val] =
 342						(enum mwifiex_wmm_ac_e) ac_val;
 343	} else {
 344		for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++) {
 345			priv->wmm.ac_down_graded_vals[ac_val]
 346				= mwifiex_wmm_eval_downgrade_ac(priv,
 347						(enum mwifiex_wmm_ac_e) ac_val);
 348			mwifiex_dbg(priv->adapter, INFO,
 349				    "info: WMM: AC PRIO %d maps to %d\n",
 350				    ac_val,
 351				    priv->wmm.ac_down_graded_vals[ac_val]);
 352		}
 353	}
 354}
 355
 356/*
 357 * This function converts the IP TOS field to an WMM AC
 358 * Queue assignment.
 359 */
 360static enum mwifiex_wmm_ac_e
 361mwifiex_wmm_convert_tos_to_ac(struct mwifiex_adapter *adapter, u32 tos)
 362{
 363	/* Map of TOS UP values to WMM AC */
 364	static const enum mwifiex_wmm_ac_e tos_to_ac[] = {
 365		WMM_AC_BE,
 366		WMM_AC_BK,
 367		WMM_AC_BK,
 368		WMM_AC_BE,
 369		WMM_AC_VI,
 370		WMM_AC_VI,
 371		WMM_AC_VO,
 372		WMM_AC_VO
 373	};
 374
 375	if (tos >= ARRAY_SIZE(tos_to_ac))
 376		return WMM_AC_BE;
 377
 378	return tos_to_ac[tos];
 379}
 380
 381/*
 382 * This function evaluates a given TID and downgrades it to a lower
 383 * TID if the WMM Parameter IE received from the AP indicates that the
 384 * AP is disabled (due to call admission control (ACM bit). Mapping
 385 * of TID to AC is taken care of internally.
 386 */
 387u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid)
 388{
 389	enum mwifiex_wmm_ac_e ac, ac_down;
 390	u8 new_tid;
 391
 392	ac = mwifiex_wmm_convert_tos_to_ac(priv->adapter, tid);
 393	ac_down = priv->wmm.ac_down_graded_vals[ac];
 394
 395	/* Send the index to tid array, picking from the array will be
 396	 * taken care by dequeuing function
 397	 */
 398	new_tid = ac_to_tid[ac_down][tid % 2];
 399
 400	return new_tid;
 401}
 402
 403/*
 404 * This function initializes the WMM state information and the
 405 * WMM data path queues.
 406 */
 407void
 408mwifiex_wmm_init(struct mwifiex_adapter *adapter)
 409{
 410	int i, j;
 411	struct mwifiex_private *priv;
 412
 413	for (j = 0; j < adapter->priv_num; ++j) {
 414		priv = adapter->priv[j];
 415		if (!priv)
 416			continue;
 417
 418		for (i = 0; i < MAX_NUM_TID; ++i) {
 419			if (!disable_tx_amsdu &&
 420			    adapter->tx_buf_size > MWIFIEX_TX_DATA_BUF_SIZE_2K)
 421				priv->aggr_prio_tbl[i].amsdu =
 422							priv->tos_to_tid_inv[i];
 423			else
 424				priv->aggr_prio_tbl[i].amsdu =
 425							BA_STREAM_NOT_ALLOWED;
 426			priv->aggr_prio_tbl[i].ampdu_ap =
 427							priv->tos_to_tid_inv[i];
 428			priv->aggr_prio_tbl[i].ampdu_user =
 429							priv->tos_to_tid_inv[i];
 430		}
 431
 432		priv->aggr_prio_tbl[6].amsdu
 433					= priv->aggr_prio_tbl[6].ampdu_ap
 434					= priv->aggr_prio_tbl[6].ampdu_user
 435					= BA_STREAM_NOT_ALLOWED;
 436
 437		priv->aggr_prio_tbl[7].amsdu = priv->aggr_prio_tbl[7].ampdu_ap
 438					= priv->aggr_prio_tbl[7].ampdu_user
 439					= BA_STREAM_NOT_ALLOWED;
 440
 441		mwifiex_set_ba_params(priv);
 442		mwifiex_reset_11n_rx_seq_num(priv);
 443
 444		priv->wmm.drv_pkt_delay_max = MWIFIEX_WMM_DRV_DELAY_MAX;
 445		atomic_set(&priv->wmm.tx_pkts_queued, 0);
 446		atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
 447	}
 448}
 449
 450int mwifiex_bypass_txlist_empty(struct mwifiex_adapter *adapter)
 451{
 452	struct mwifiex_private *priv;
 453	int i;
 454
 455	for (i = 0; i < adapter->priv_num; i++) {
 456		priv = adapter->priv[i];
 
 
 457		if (adapter->if_ops.is_port_ready &&
 458		    !adapter->if_ops.is_port_ready(priv))
 459			continue;
 460		if (!skb_queue_empty(&priv->bypass_txq))
 461			return false;
 462	}
 463
 464	return true;
 465}
 466
 467/*
 468 * This function checks if WMM Tx queue is empty.
 469 */
 470int
 471mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
 472{
 473	int i;
 474	struct mwifiex_private *priv;
 475
 476	for (i = 0; i < adapter->priv_num; ++i) {
 477		priv = adapter->priv[i];
 
 
 478		if (!priv->port_open &&
 479		    (priv->bss_mode != NL80211_IFTYPE_ADHOC))
 480			continue;
 481		if (adapter->if_ops.is_port_ready &&
 482		    !adapter->if_ops.is_port_ready(priv))
 483			continue;
 484		if (atomic_read(&priv->wmm.tx_pkts_queued))
 485			return false;
 486	}
 487
 488	return true;
 489}
 490
 491/*
 492 * This function deletes all packets in an RA list node.
 493 *
 494 * The packet sent completion callback handler are called with
 495 * status failure, after they are dequeued to ensure proper
 496 * cleanup. The RA list node itself is freed at the end.
 497 */
 498static void
 499mwifiex_wmm_del_pkts_in_ralist_node(struct mwifiex_private *priv,
 500				    struct mwifiex_ra_list_tbl *ra_list)
 501{
 502	struct mwifiex_adapter *adapter = priv->adapter;
 503	struct sk_buff *skb, *tmp;
 504
 505	skb_queue_walk_safe(&ra_list->skb_head, skb, tmp) {
 506		skb_unlink(skb, &ra_list->skb_head);
 507		mwifiex_write_data_complete(adapter, skb, 0, -1);
 508	}
 509}
 510
 511/*
 512 * This function deletes all packets in an RA list.
 513 *
 514 * Each nodes in the RA list are freed individually first, and then
 515 * the RA list itself is freed.
 516 */
 517static void
 518mwifiex_wmm_del_pkts_in_ralist(struct mwifiex_private *priv,
 519			       struct list_head *ra_list_head)
 520{
 521	struct mwifiex_ra_list_tbl *ra_list;
 522
 523	list_for_each_entry(ra_list, ra_list_head, list)
 524		mwifiex_wmm_del_pkts_in_ralist_node(priv, ra_list);
 525}
 526
 527/*
 528 * This function deletes all packets in all RA lists.
 529 */
 530static void mwifiex_wmm_cleanup_queues(struct mwifiex_private *priv)
 531{
 532	int i;
 533
 534	for (i = 0; i < MAX_NUM_TID; i++)
 535		mwifiex_wmm_del_pkts_in_ralist(priv, &priv->wmm.tid_tbl_ptr[i].
 536								       ra_list);
 537
 538	atomic_set(&priv->wmm.tx_pkts_queued, 0);
 539	atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
 540}
 541
 542/*
 543 * This function deletes all route addresses from all RA lists.
 544 */
 545static void mwifiex_wmm_delete_all_ralist(struct mwifiex_private *priv)
 546{
 547	struct mwifiex_ra_list_tbl *ra_list, *tmp_node;
 548	int i;
 549
 550	for (i = 0; i < MAX_NUM_TID; ++i) {
 551		mwifiex_dbg(priv->adapter, INFO,
 552			    "info: ra_list: freeing buf for tid %d\n", i);
 553		list_for_each_entry_safe(ra_list, tmp_node,
 554					 &priv->wmm.tid_tbl_ptr[i].ra_list,
 555					 list) {
 556			list_del(&ra_list->list);
 557			kfree(ra_list);
 558		}
 559
 560		INIT_LIST_HEAD(&priv->wmm.tid_tbl_ptr[i].ra_list);
 561	}
 562}
 563
 564static int mwifiex_free_ack_frame(int id, void *p, void *data)
 565{
 566	pr_warn("Have pending ack frames!\n");
 567	kfree_skb(p);
 568	return 0;
 569}
 570
 571/*
 572 * This function cleans up the Tx and Rx queues.
 573 *
 574 * Cleanup includes -
 575 *      - All packets in RA lists
 576 *      - All entries in Rx reorder table
 577 *      - All entries in Tx BA stream table
 578 *      - MPA buffer (if required)
 579 *      - All RA lists
 580 */
 581void
 582mwifiex_clean_txrx(struct mwifiex_private *priv)
 583{
 584	struct sk_buff *skb, *tmp;
 585
 586	mwifiex_11n_cleanup_reorder_tbl(priv);
 587	spin_lock_bh(&priv->wmm.ra_list_spinlock);
 588
 589	mwifiex_wmm_cleanup_queues(priv);
 590	mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
 591
 592	if (priv->adapter->if_ops.cleanup_mpa_buf)
 593		priv->adapter->if_ops.cleanup_mpa_buf(priv->adapter);
 594
 595	mwifiex_wmm_delete_all_ralist(priv);
 596	memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
 597
 598	if (priv->adapter->if_ops.clean_pcie_ring &&
 599	    !test_bit(MWIFIEX_SURPRISE_REMOVED, &priv->adapter->work_flags))
 600		priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
 601	spin_unlock_bh(&priv->wmm.ra_list_spinlock);
 602
 603	skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) {
 604		skb_unlink(skb, &priv->tdls_txq);
 605		mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
 606	}
 607
 608	skb_queue_walk_safe(&priv->bypass_txq, skb, tmp) {
 609		skb_unlink(skb, &priv->bypass_txq);
 610		mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
 611	}
 612	atomic_set(&priv->adapter->bypass_tx_pending, 0);
 613
 614	idr_for_each(&priv->ack_status_frames, mwifiex_free_ack_frame, NULL);
 615	idr_destroy(&priv->ack_status_frames);
 616}
 617
 618/*
 619 * This function retrieves a particular RA list node, matching with the
 620 * given TID and RA address.
 621 */
 622struct mwifiex_ra_list_tbl *
 623mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
 624			    const u8 *ra_addr)
 625{
 626	struct mwifiex_ra_list_tbl *ra_list;
 627
 628	list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[tid].ra_list,
 629			    list) {
 630		if (!memcmp(ra_list->ra, ra_addr, ETH_ALEN))
 631			return ra_list;
 632	}
 633
 634	return NULL;
 635}
 636
 637void mwifiex_update_ralist_tx_pause(struct mwifiex_private *priv, u8 *mac,
 638				    u8 tx_pause)
 639{
 640	struct mwifiex_ra_list_tbl *ra_list;
 641	u32 pkt_cnt = 0, tx_pkts_queued;
 642	int i;
 643
 644	spin_lock_bh(&priv->wmm.ra_list_spinlock);
 645
 646	for (i = 0; i < MAX_NUM_TID; ++i) {
 647		ra_list = mwifiex_wmm_get_ralist_node(priv, i, mac);
 648		if (ra_list && ra_list->tx_paused != tx_pause) {
 649			pkt_cnt += ra_list->total_pkt_count;
 650			ra_list->tx_paused = tx_pause;
 651			if (tx_pause)
 652				priv->wmm.pkts_paused[i] +=
 653					ra_list->total_pkt_count;
 654			else
 655				priv->wmm.pkts_paused[i] -=
 656					ra_list->total_pkt_count;
 657		}
 658	}
 659
 660	if (pkt_cnt) {
 661		tx_pkts_queued = atomic_read(&priv->wmm.tx_pkts_queued);
 662		if (tx_pause)
 663			tx_pkts_queued -= pkt_cnt;
 664		else
 665			tx_pkts_queued += pkt_cnt;
 666
 667		atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued);
 668		atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
 669	}
 670	spin_unlock_bh(&priv->wmm.ra_list_spinlock);
 671}
 672
 673/* This function updates non-tdls peer ralist tx_pause while
 674 * tdls channel switching
 675 */
 676void mwifiex_update_ralist_tx_pause_in_tdls_cs(struct mwifiex_private *priv,
 677					       u8 *mac, u8 tx_pause)
 678{
 679	struct mwifiex_ra_list_tbl *ra_list;
 680	u32 pkt_cnt = 0, tx_pkts_queued;
 681	int i;
 682
 683	spin_lock_bh(&priv->wmm.ra_list_spinlock);
 684
 685	for (i = 0; i < MAX_NUM_TID; ++i) {
 686		list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[i].ra_list,
 687				    list) {
 688			if (!memcmp(ra_list->ra, mac, ETH_ALEN))
 689				continue;
 690
 691			if (ra_list->tx_paused != tx_pause) {
 692				pkt_cnt += ra_list->total_pkt_count;
 693				ra_list->tx_paused = tx_pause;
 694				if (tx_pause)
 695					priv->wmm.pkts_paused[i] +=
 696						ra_list->total_pkt_count;
 697				else
 698					priv->wmm.pkts_paused[i] -=
 699						ra_list->total_pkt_count;
 700			}
 701		}
 702	}
 703
 704	if (pkt_cnt) {
 705		tx_pkts_queued = atomic_read(&priv->wmm.tx_pkts_queued);
 706		if (tx_pause)
 707			tx_pkts_queued -= pkt_cnt;
 708		else
 709			tx_pkts_queued += pkt_cnt;
 710
 711		atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued);
 712		atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
 713	}
 714	spin_unlock_bh(&priv->wmm.ra_list_spinlock);
 715}
 716
 717/*
 718 * This function retrieves an RA list node for a given TID and
 719 * RA address pair.
 720 *
 721 * If no such node is found, a new node is added first and then
 722 * retrieved.
 723 */
 724struct mwifiex_ra_list_tbl *
 725mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
 726			    const u8 *ra_addr)
 727{
 728	struct mwifiex_ra_list_tbl *ra_list;
 729
 730	ra_list = mwifiex_wmm_get_ralist_node(priv, tid, ra_addr);
 731	if (ra_list)
 732		return ra_list;
 733	mwifiex_ralist_add(priv, ra_addr);
 734
 735	return mwifiex_wmm_get_ralist_node(priv, tid, ra_addr);
 736}
 737
 738/*
 739 * This function deletes RA list nodes for given mac for all TIDs.
 740 * Function also decrements TX pending count accordingly.
 741 */
 742void
 743mwifiex_wmm_del_peer_ra_list(struct mwifiex_private *priv, const u8 *ra_addr)
 744{
 745	struct mwifiex_ra_list_tbl *ra_list;
 746	int i;
 747
 748	spin_lock_bh(&priv->wmm.ra_list_spinlock);
 749
 750	for (i = 0; i < MAX_NUM_TID; ++i) {
 751		ra_list = mwifiex_wmm_get_ralist_node(priv, i, ra_addr);
 752
 753		if (!ra_list)
 754			continue;
 755		mwifiex_wmm_del_pkts_in_ralist_node(priv, ra_list);
 756		if (ra_list->tx_paused)
 757			priv->wmm.pkts_paused[i] -= ra_list->total_pkt_count;
 758		else
 759			atomic_sub(ra_list->total_pkt_count,
 760				   &priv->wmm.tx_pkts_queued);
 761		list_del(&ra_list->list);
 762		kfree(ra_list);
 763	}
 764	spin_unlock_bh(&priv->wmm.ra_list_spinlock);
 765}
 766
 767/*
 768 * This function checks if a particular RA list node exists in a given TID
 769 * table index.
 770 */
 771int
 772mwifiex_is_ralist_valid(struct mwifiex_private *priv,
 773			struct mwifiex_ra_list_tbl *ra_list, int ptr_index)
 774{
 775	struct mwifiex_ra_list_tbl *rlist;
 776
 777	list_for_each_entry(rlist, &priv->wmm.tid_tbl_ptr[ptr_index].ra_list,
 778			    list) {
 779		if (rlist == ra_list)
 780			return true;
 781	}
 782
 783	return false;
 784}
 785
 786/*
 787 * This function adds a packet to bypass TX queue.
 788 * This is special TX queue for packets which can be sent even when port_open
 789 * is false.
 790 */
 791void
 792mwifiex_wmm_add_buf_bypass_txqueue(struct mwifiex_private *priv,
 793				   struct sk_buff *skb)
 794{
 795	skb_queue_tail(&priv->bypass_txq, skb);
 796}
 797
 798/*
 799 * This function adds a packet to WMM queue.
 800 *
 801 * In disconnected state the packet is immediately dropped and the
 802 * packet send completion callback is called with status failure.
 803 *
 804 * Otherwise, the correct RA list node is located and the packet
 805 * is queued at the list tail.
 806 */
 807void
 808mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
 809			    struct sk_buff *skb)
 810{
 811	struct mwifiex_adapter *adapter = priv->adapter;
 812	u32 tid;
 813	struct mwifiex_ra_list_tbl *ra_list;
 814	u8 ra[ETH_ALEN], tid_down;
 815	struct list_head list_head;
 816	int tdls_status = TDLS_NOT_SETUP;
 817	struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
 818	struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
 819
 820	memcpy(ra, eth_hdr->h_dest, ETH_ALEN);
 821
 822	if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
 823	    ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) {
 824		if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS)
 825			mwifiex_dbg(adapter, DATA,
 826				    "TDLS setup packet for %pM.\t"
 827				    "Don't block\n", ra);
 828		else if (memcmp(priv->cfg_bssid, ra, ETH_ALEN))
 829			tdls_status = mwifiex_get_tdls_link_status(priv, ra);
 830	}
 831
 832	if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) {
 833		mwifiex_dbg(adapter, DATA, "data: drop packet in disconnect\n");
 834		mwifiex_write_data_complete(adapter, skb, 0, -1);
 835		return;
 836	}
 837
 838	tid = skb->priority;
 839
 840	spin_lock_bh(&priv->wmm.ra_list_spinlock);
 841
 842	tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
 843
 844	/* In case of infra as we have already created the list during
 845	   association we just don't have to call get_queue_raptr, we will
 846	   have only 1 raptr for a tid in case of infra */
 847	if (!mwifiex_queuing_ra_based(priv) &&
 848	    !mwifiex_is_skb_mgmt_frame(skb)) {
 849		switch (tdls_status) {
 850		case TDLS_SETUP_COMPLETE:
 851		case TDLS_CHAN_SWITCHING:
 852		case TDLS_IN_BASE_CHAN:
 853		case TDLS_IN_OFF_CHAN:
 854			ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down,
 855							      ra);
 856			tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
 857			break;
 858		case TDLS_SETUP_INPROGRESS:
 859			skb_queue_tail(&priv->tdls_txq, skb);
 860			spin_unlock_bh(&priv->wmm.ra_list_spinlock);
 861			return;
 862		default:
 863			list_head = priv->wmm.tid_tbl_ptr[tid_down].ra_list;
 864			ra_list = list_first_entry_or_null(&list_head,
 865					struct mwifiex_ra_list_tbl, list);
 866			break;
 867		}
 868	} else {
 869		memcpy(ra, skb->data, ETH_ALEN);
 870		if (is_multicast_ether_addr(ra) || mwifiex_is_skb_mgmt_frame(skb))
 871			eth_broadcast_addr(ra);
 872		ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra);
 873	}
 874
 875	if (!ra_list) {
 876		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
 877		mwifiex_write_data_complete(adapter, skb, 0, -1);
 878		return;
 879	}
 880
 881	skb_queue_tail(&ra_list->skb_head, skb);
 882
 883	ra_list->ba_pkt_count++;
 884	ra_list->total_pkt_count++;
 885
 886	if (atomic_read(&priv->wmm.highest_queued_prio) <
 887						priv->tos_to_tid_inv[tid_down])
 888		atomic_set(&priv->wmm.highest_queued_prio,
 889			   priv->tos_to_tid_inv[tid_down]);
 890
 891	if (ra_list->tx_paused)
 892		priv->wmm.pkts_paused[tid_down]++;
 893	else
 894		atomic_inc(&priv->wmm.tx_pkts_queued);
 895
 896	spin_unlock_bh(&priv->wmm.ra_list_spinlock);
 897}
 898
 899/*
 900 * This function processes the get WMM status command response from firmware.
 901 *
 902 * The response may contain multiple TLVs -
 903 *      - AC Queue status TLVs
 904 *      - Current WMM Parameter IE TLV
 905 *      - Admission Control action frame TLVs
 906 *
 907 * This function parses the TLVs and then calls further specific functions
 908 * to process any changes in the queue prioritize or state.
 909 */
 910int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
 911			       const struct host_cmd_ds_command *resp)
 912{
 913	u8 *curr = (u8 *) &resp->params.get_wmm_status;
 914	uint16_t resp_len = le16_to_cpu(resp->size), tlv_len;
 915	int mask = IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK;
 916	bool valid = true;
 917
 918	struct mwifiex_ie_types_data *tlv_hdr;
 919	struct mwifiex_ie_types_wmm_queue_status *tlv_wmm_qstatus;
 920	struct ieee_types_wmm_parameter *wmm_param_ie = NULL;
 921	struct mwifiex_wmm_ac_status *ac_status;
 922
 923	mwifiex_dbg(priv->adapter, INFO,
 924		    "info: WMM: WMM_GET_STATUS cmdresp received: %d\n",
 925		    resp_len);
 926
 927	while ((resp_len >= sizeof(tlv_hdr->header)) && valid) {
 928		tlv_hdr = (struct mwifiex_ie_types_data *) curr;
 929		tlv_len = le16_to_cpu(tlv_hdr->header.len);
 930
 931		if (resp_len < tlv_len + sizeof(tlv_hdr->header))
 932			break;
 933
 934		switch (le16_to_cpu(tlv_hdr->header.type)) {
 935		case TLV_TYPE_WMMQSTATUS:
 936			tlv_wmm_qstatus =
 937				(struct mwifiex_ie_types_wmm_queue_status *)
 938				tlv_hdr;
 939			mwifiex_dbg(priv->adapter, CMD,
 940				    "info: CMD_RESP: WMM_GET_STATUS:\t"
 941				    "QSTATUS TLV: %d, %d, %d\n",
 942				    tlv_wmm_qstatus->queue_index,
 943				    tlv_wmm_qstatus->flow_required,
 944				    tlv_wmm_qstatus->disabled);
 945
 946			ac_status = &priv->wmm.ac_status[tlv_wmm_qstatus->
 947							 queue_index];
 948			ac_status->disabled = tlv_wmm_qstatus->disabled;
 949			ac_status->flow_required =
 950						tlv_wmm_qstatus->flow_required;
 951			ac_status->flow_created = tlv_wmm_qstatus->flow_created;
 952			break;
 953
 954		case WLAN_EID_VENDOR_SPECIFIC:
 955			/*
 956			 * Point the regular IEEE IE 2 bytes into the Marvell IE
 957			 *   and setup the IEEE IE type and length byte fields
 958			 */
 959
 960			wmm_param_ie =
 961				(struct ieee_types_wmm_parameter *) (curr +
 962								    2);
 963			wmm_param_ie->vend_hdr.len = (u8) tlv_len;
 964			wmm_param_ie->vend_hdr.element_id =
 965						WLAN_EID_VENDOR_SPECIFIC;
 966
 967			mwifiex_dbg(priv->adapter, CMD,
 968				    "info: CMD_RESP: WMM_GET_STATUS:\t"
 969				    "WMM Parameter Set Count: %d\n",
 970				    wmm_param_ie->qos_info_bitmap & mask);
 971
 972			if (wmm_param_ie->vend_hdr.len + 2 >
 973				sizeof(struct ieee_types_wmm_parameter))
 974				break;
 975
 976			memcpy((u8 *) &priv->curr_bss_params.bss_descriptor.
 977			       wmm_ie, wmm_param_ie,
 978			       wmm_param_ie->vend_hdr.len + 2);
 979
 980			break;
 981
 982		default:
 983			valid = false;
 984			break;
 985		}
 986
 987		curr += (tlv_len + sizeof(tlv_hdr->header));
 988		resp_len -= (tlv_len + sizeof(tlv_hdr->header));
 989	}
 990
 991	mwifiex_wmm_setup_queue_priorities(priv, wmm_param_ie);
 992	mwifiex_wmm_setup_ac_downgrade(priv);
 993
 994	return 0;
 995}
 996
 997/*
 998 * Callback handler from the command module to allow insertion of a WMM TLV.
 999 *
1000 * If the BSS we are associating to supports WMM, this function adds the
1001 * required WMM Information IE to the association request command buffer in
1002 * the form of a Marvell extended IEEE IE.
1003 */
1004u32
1005mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
1006				    u8 **assoc_buf,
1007				    struct ieee_types_wmm_parameter *wmm_ie,
1008				    struct ieee80211_ht_cap *ht_cap)
1009{
1010	struct mwifiex_ie_types_wmm_param_set *wmm_tlv;
1011	u32 ret_len = 0;
1012
1013	/* Null checks */
1014	if (!assoc_buf)
1015		return 0;
1016	if (!(*assoc_buf))
1017		return 0;
1018
1019	if (!wmm_ie)
1020		return 0;
1021
1022	mwifiex_dbg(priv->adapter, INFO,
1023		    "info: WMM: process assoc req: bss->wmm_ie=%#x\n",
1024		    wmm_ie->vend_hdr.element_id);
1025
1026	if ((priv->wmm_required ||
1027	     (ht_cap && (priv->adapter->config_bands & BAND_GN ||
1028	     priv->adapter->config_bands & BAND_AN))) &&
1029	    wmm_ie->vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC) {
1030		wmm_tlv = (struct mwifiex_ie_types_wmm_param_set *) *assoc_buf;
1031		wmm_tlv->header.type = cpu_to_le16((u16) wmm_info_ie[0]);
1032		wmm_tlv->header.len = cpu_to_le16((u16) wmm_info_ie[1]);
1033		memcpy(wmm_tlv->wmm_ie, &wmm_info_ie[2],
1034		       le16_to_cpu(wmm_tlv->header.len));
1035		if (wmm_ie->qos_info_bitmap & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD)
1036			memcpy((u8 *) (wmm_tlv->wmm_ie
1037				       + le16_to_cpu(wmm_tlv->header.len)
1038				       - sizeof(priv->wmm_qosinfo)),
1039			       &priv->wmm_qosinfo, sizeof(priv->wmm_qosinfo));
1040
1041		ret_len = sizeof(wmm_tlv->header)
1042			  + le16_to_cpu(wmm_tlv->header.len);
1043
1044		*assoc_buf += ret_len;
1045	}
1046
1047	return ret_len;
1048}
1049
1050/*
1051 * This function computes the time delay in the driver queues for a
1052 * given packet.
1053 *
1054 * When the packet is received at the OS/Driver interface, the current
1055 * time is set in the packet structure. The difference between the present
1056 * time and that received time is computed in this function and limited
1057 * based on pre-compiled limits in the driver.
1058 */
1059u8
1060mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv,
1061				  const struct sk_buff *skb)
1062{
1063	u32 queue_delay = ktime_to_ms(net_timedelta(skb->tstamp));
1064	u8 ret_val;
1065
1066	/*
1067	 * Queue delay is passed as a uint8 in units of 2ms (ms shifted
1068	 *  by 1). Min value (other than 0) is therefore 2ms, max is 510ms.
1069	 *
1070	 * Pass max value if queue_delay is beyond the uint8 range
1071	 */
1072	ret_val = (u8) (min(queue_delay, priv->wmm.drv_pkt_delay_max) >> 1);
1073
1074	mwifiex_dbg(priv->adapter, DATA, "data: WMM: Pkt Delay: %d ms,\t"
1075		    "%d ms sent to FW\n", queue_delay, ret_val);
1076
1077	return ret_val;
1078}
1079
1080/*
1081 * This function retrieves the highest priority RA list table pointer.
1082 */
1083static struct mwifiex_ra_list_tbl *
1084mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
1085				     struct mwifiex_private **priv, int *tid)
1086{
1087	struct mwifiex_private *priv_tmp;
1088	struct mwifiex_ra_list_tbl *ptr;
1089	struct mwifiex_tid_tbl *tid_ptr;
1090	atomic_t *hqp;
1091	int i, j;
1092
1093	/* check the BSS with highest priority first */
1094	for (j = adapter->priv_num - 1; j >= 0; --j) {
1095		/* iterate over BSS with the equal priority */
1096		list_for_each_entry(adapter->bss_prio_tbl[j].bss_prio_cur,
1097				    &adapter->bss_prio_tbl[j].bss_prio_head,
1098				    list) {
1099
1100try_again:
1101			priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv;
1102
1103			if (((priv_tmp->bss_mode != NL80211_IFTYPE_ADHOC) &&
1104			     !priv_tmp->port_open) ||
1105			    (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0))
1106				continue;
1107
1108			if (adapter->if_ops.is_port_ready &&
1109			    !adapter->if_ops.is_port_ready(priv_tmp))
1110				continue;
1111
1112			/* iterate over the WMM queues of the BSS */
1113			hqp = &priv_tmp->wmm.highest_queued_prio;
1114			for (i = atomic_read(hqp); i >= LOW_PRIO_TID; --i) {
1115
1116				spin_lock_bh(&priv_tmp->wmm.ra_list_spinlock);
1117
1118				tid_ptr = &(priv_tmp)->wmm.
1119					tid_tbl_ptr[tos_to_tid[i]];
1120
1121				/* iterate over receiver addresses */
1122				list_for_each_entry(ptr, &tid_ptr->ra_list,
1123						    list) {
1124
1125					if (!ptr->tx_paused &&
1126					    !skb_queue_empty(&ptr->skb_head))
1127						/* holds both locks */
1128						goto found;
1129				}
1130
1131				spin_unlock_bh(&priv_tmp->wmm.ra_list_spinlock);
1132			}
1133
1134			if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) != 0) {
1135				atomic_set(&priv_tmp->wmm.highest_queued_prio,
1136					   HIGH_PRIO_TID);
1137				/* Iterate current private once more, since
1138				 * there still exist packets in data queue
1139				 */
1140				goto try_again;
1141			} else
1142				atomic_set(&priv_tmp->wmm.highest_queued_prio,
1143					   NO_PKT_PRIO_TID);
1144		}
1145	}
1146
1147	return NULL;
1148
1149found:
1150	/* holds ra_list_spinlock */
1151	if (atomic_read(hqp) > i)
1152		atomic_set(hqp, i);
1153	spin_unlock_bh(&priv_tmp->wmm.ra_list_spinlock);
1154
1155	*priv = priv_tmp;
1156	*tid = tos_to_tid[i];
1157
1158	return ptr;
1159}
1160
1161/* This functions rotates ra and bss lists so packets are picked round robin.
1162 *
1163 * After a packet is successfully transmitted, rotate the ra list, so the ra
1164 * next to the one transmitted, will come first in the list. This way we pick
1165 * the ra' in a round robin fashion. Same applies to bss nodes of equal
1166 * priority.
1167 *
1168 * Function also increments wmm.packets_out counter.
1169 */
1170void mwifiex_rotate_priolists(struct mwifiex_private *priv,
1171				 struct mwifiex_ra_list_tbl *ra,
1172				 int tid)
1173{
1174	struct mwifiex_adapter *adapter = priv->adapter;
1175	struct mwifiex_bss_prio_tbl *tbl = adapter->bss_prio_tbl;
1176	struct mwifiex_tid_tbl *tid_ptr = &priv->wmm.tid_tbl_ptr[tid];
1177
1178	spin_lock_bh(&tbl[priv->bss_priority].bss_prio_lock);
1179	/*
1180	 * dirty trick: we remove 'head' temporarily and reinsert it after
1181	 * curr bss node. imagine list to stay fixed while head is moved
1182	 */
1183	list_move(&tbl[priv->bss_priority].bss_prio_head,
1184		  &tbl[priv->bss_priority].bss_prio_cur->list);
1185	spin_unlock_bh(&tbl[priv->bss_priority].bss_prio_lock);
1186
1187	spin_lock_bh(&priv->wmm.ra_list_spinlock);
1188	if (mwifiex_is_ralist_valid(priv, ra, tid)) {
1189		priv->wmm.packets_out[tid]++;
1190		/* same as above */
1191		list_move(&tid_ptr->ra_list, &ra->list);
1192	}
1193	spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1194}
1195
1196/*
1197 * This function checks if 11n aggregation is possible.
1198 */
1199static int
1200mwifiex_is_11n_aggragation_possible(struct mwifiex_private *priv,
1201				    struct mwifiex_ra_list_tbl *ptr,
1202				    int max_buf_size)
1203{
1204	int count = 0, total_size = 0;
1205	struct sk_buff *skb, *tmp;
1206	int max_amsdu_size;
1207
1208	if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP && priv->ap_11n_enabled &&
1209	    ptr->is_11n_enabled)
1210		max_amsdu_size = min_t(int, ptr->max_amsdu, max_buf_size);
1211	else
1212		max_amsdu_size = max_buf_size;
1213
1214	skb_queue_walk_safe(&ptr->skb_head, skb, tmp) {
1215		total_size += skb->len;
1216		if (total_size >= max_amsdu_size)
1217			break;
1218		if (++count >= MIN_NUM_AMSDU)
1219			return true;
1220	}
1221
1222	return false;
1223}
1224
1225/*
1226 * This function sends a single packet to firmware for transmission.
1227 */
1228static void
1229mwifiex_send_single_packet(struct mwifiex_private *priv,
1230			   struct mwifiex_ra_list_tbl *ptr, int ptr_index)
1231			   __releases(&priv->wmm.ra_list_spinlock)
1232{
1233	struct sk_buff *skb, *skb_next;
1234	struct mwifiex_tx_param tx_param;
1235	struct mwifiex_adapter *adapter = priv->adapter;
1236	struct mwifiex_txinfo *tx_info;
1237
1238	if (skb_queue_empty(&ptr->skb_head)) {
1239		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1240		mwifiex_dbg(adapter, DATA, "data: nothing to send\n");
1241		return;
1242	}
1243
1244	skb = skb_dequeue(&ptr->skb_head);
1245
1246	tx_info = MWIFIEX_SKB_TXCB(skb);
1247	mwifiex_dbg(adapter, DATA,
1248		    "data: dequeuing the packet %p %p\n", ptr, skb);
1249
1250	ptr->total_pkt_count--;
1251
1252	if (!skb_queue_empty(&ptr->skb_head))
1253		skb_next = skb_peek(&ptr->skb_head);
1254	else
1255		skb_next = NULL;
1256
1257	spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1258
1259	tx_param.next_pkt_len = ((skb_next) ? skb_next->len +
1260				sizeof(struct txpd) : 0);
1261
1262	if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
1263		/* Queue the packet back at the head */
1264		spin_lock_bh(&priv->wmm.ra_list_spinlock);
1265
1266		if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1267			spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1268			mwifiex_write_data_complete(adapter, skb, 0, -1);
1269			return;
1270		}
1271
1272		skb_queue_tail(&ptr->skb_head, skb);
1273
1274		ptr->total_pkt_count++;
1275		ptr->ba_pkt_count++;
1276		tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1277		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1278	} else {
1279		mwifiex_rotate_priolists(priv, ptr, ptr_index);
1280		atomic_dec(&priv->wmm.tx_pkts_queued);
1281	}
1282}
1283
1284/*
1285 * This function checks if the first packet in the given RA list
1286 * is already processed or not.
1287 */
1288static int
1289mwifiex_is_ptr_processed(struct mwifiex_private *priv,
1290			 struct mwifiex_ra_list_tbl *ptr)
1291{
1292	struct sk_buff *skb;
1293	struct mwifiex_txinfo *tx_info;
1294
1295	if (skb_queue_empty(&ptr->skb_head))
1296		return false;
1297
1298	skb = skb_peek(&ptr->skb_head);
1299
1300	tx_info = MWIFIEX_SKB_TXCB(skb);
1301	if (tx_info->flags & MWIFIEX_BUF_FLAG_REQUEUED_PKT)
1302		return true;
1303
1304	return false;
1305}
1306
1307/*
1308 * This function sends a single processed packet to firmware for
1309 * transmission.
1310 */
1311static void
1312mwifiex_send_processed_packet(struct mwifiex_private *priv,
1313			      struct mwifiex_ra_list_tbl *ptr, int ptr_index)
1314				__releases(&priv->wmm.ra_list_spinlock)
1315{
1316	struct mwifiex_tx_param tx_param;
1317	struct mwifiex_adapter *adapter = priv->adapter;
1318	int ret = -1;
1319	struct sk_buff *skb, *skb_next;
1320	struct mwifiex_txinfo *tx_info;
1321
1322	if (skb_queue_empty(&ptr->skb_head)) {
1323		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1324		return;
1325	}
1326
1327	skb = skb_dequeue(&ptr->skb_head);
1328
1329	if (adapter->data_sent || adapter->tx_lock_flag) {
1330		ptr->total_pkt_count--;
1331		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1332		skb_queue_tail(&adapter->tx_data_q, skb);
1333		atomic_dec(&priv->wmm.tx_pkts_queued);
1334		atomic_inc(&adapter->tx_queued);
1335		return;
1336	}
1337
1338	if (!skb_queue_empty(&ptr->skb_head))
1339		skb_next = skb_peek(&ptr->skb_head);
1340	else
1341		skb_next = NULL;
1342
1343	tx_info = MWIFIEX_SKB_TXCB(skb);
1344
1345	spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1346
1347	tx_param.next_pkt_len =
1348		((skb_next) ? skb_next->len +
1349		 sizeof(struct txpd) : 0);
1350	if (adapter->iface_type == MWIFIEX_USB) {
1351		ret = adapter->if_ops.host_to_card(adapter, priv->usb_port,
1352						   skb, &tx_param);
1353	} else {
1354		ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
1355						   skb, &tx_param);
1356	}
1357
1358	switch (ret) {
1359	case -EBUSY:
1360		mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
1361		spin_lock_bh(&priv->wmm.ra_list_spinlock);
1362
1363		if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1364			spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1365			mwifiex_write_data_complete(adapter, skb, 0, -1);
1366			return;
1367		}
1368
1369		skb_queue_tail(&ptr->skb_head, skb);
1370
1371		tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1372		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1373		break;
1374	case -1:
1375		mwifiex_dbg(adapter, ERROR, "host_to_card failed: %#x\n", ret);
1376		adapter->dbg.num_tx_host_to_card_failure++;
1377		mwifiex_write_data_complete(adapter, skb, 0, ret);
1378		break;
1379	case -EINPROGRESS:
1380		break;
1381	case 0:
1382		mwifiex_write_data_complete(adapter, skb, 0, ret);
1383		break;
1384	default:
1385		break;
1386	}
1387	if (ret != -EBUSY) {
1388		mwifiex_rotate_priolists(priv, ptr, ptr_index);
1389		atomic_dec(&priv->wmm.tx_pkts_queued);
1390		spin_lock_bh(&priv->wmm.ra_list_spinlock);
1391		ptr->total_pkt_count--;
1392		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1393	}
1394}
1395
1396/*
1397 * This function dequeues a packet from the highest priority list
1398 * and transmits it.
1399 */
1400static int
1401mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
1402{
1403	struct mwifiex_ra_list_tbl *ptr;
1404	struct mwifiex_private *priv = NULL;
1405	int ptr_index = 0;
1406	u8 ra[ETH_ALEN];
1407	int tid_del = 0, tid = 0;
1408
1409	ptr = mwifiex_wmm_get_highest_priolist_ptr(adapter, &priv, &ptr_index);
1410	if (!ptr)
1411		return -1;
1412
1413	tid = mwifiex_get_tid(ptr);
1414
1415	mwifiex_dbg(adapter, DATA, "data: tid=%d\n", tid);
1416
1417	spin_lock_bh(&priv->wmm.ra_list_spinlock);
1418	if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1419		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1420		return -1;
1421	}
1422
1423	if (mwifiex_is_ptr_processed(priv, ptr)) {
1424		mwifiex_send_processed_packet(priv, ptr, ptr_index);
1425		/* ra_list_spinlock has been freed in
1426		   mwifiex_send_processed_packet() */
1427		return 0;
1428	}
1429
1430	if (!ptr->is_11n_enabled ||
1431		ptr->ba_status ||
1432		priv->wps.session_enable) {
1433		if (ptr->is_11n_enabled &&
1434			ptr->ba_status &&
1435			ptr->amsdu_in_ampdu &&
1436			mwifiex_is_amsdu_allowed(priv, tid) &&
1437			mwifiex_is_11n_aggragation_possible(priv, ptr,
1438							adapter->tx_buf_size))
1439			mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index);
1440			/* ra_list_spinlock has been freed in
1441			 * mwifiex_11n_aggregate_pkt()
1442			 */
1443		else
1444			mwifiex_send_single_packet(priv, ptr, ptr_index);
1445			/* ra_list_spinlock has been freed in
1446			 * mwifiex_send_single_packet()
1447			 */
1448	} else {
1449		if (mwifiex_is_ampdu_allowed(priv, ptr, tid) &&
1450		    ptr->ba_pkt_count > ptr->ba_packet_thr) {
1451			if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
1452				mwifiex_create_ba_tbl(priv, ptr->ra, tid,
1453						      BA_SETUP_INPROGRESS);
1454				mwifiex_send_addba(priv, tid, ptr->ra);
1455			} else if (mwifiex_find_stream_to_delete
1456				   (priv, tid, &tid_del, ra)) {
1457				mwifiex_create_ba_tbl(priv, ptr->ra, tid,
1458						      BA_SETUP_INPROGRESS);
1459				mwifiex_send_delba(priv, tid_del, ra, 1);
1460			}
1461		}
1462		if (mwifiex_is_amsdu_allowed(priv, tid) &&
1463		    mwifiex_is_11n_aggragation_possible(priv, ptr,
1464							adapter->tx_buf_size))
1465			mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index);
1466			/* ra_list_spinlock has been freed in
1467			   mwifiex_11n_aggregate_pkt() */
1468		else
1469			mwifiex_send_single_packet(priv, ptr, ptr_index);
1470			/* ra_list_spinlock has been freed in
1471			   mwifiex_send_single_packet() */
1472	}
1473	return 0;
1474}
1475
1476void mwifiex_process_bypass_tx(struct mwifiex_adapter *adapter)
1477{
1478	struct mwifiex_tx_param tx_param;
1479	struct sk_buff *skb;
1480	struct mwifiex_txinfo *tx_info;
1481	struct mwifiex_private *priv;
1482	int i;
1483
1484	if (adapter->data_sent || adapter->tx_lock_flag)
1485		return;
1486
1487	for (i = 0; i < adapter->priv_num; ++i) {
1488		priv = adapter->priv[i];
 
 
 
1489
1490		if (adapter->if_ops.is_port_ready &&
1491		    !adapter->if_ops.is_port_ready(priv))
1492			continue;
1493
1494		if (skb_queue_empty(&priv->bypass_txq))
1495			continue;
1496
1497		skb = skb_dequeue(&priv->bypass_txq);
1498		tx_info = MWIFIEX_SKB_TXCB(skb);
1499
1500		/* no aggregation for bypass packets */
1501		tx_param.next_pkt_len = 0;
1502
1503		if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
1504			skb_queue_head(&priv->bypass_txq, skb);
1505			tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1506		} else {
1507			atomic_dec(&adapter->bypass_tx_pending);
1508		}
1509	}
1510}
1511
1512/*
1513 * This function transmits the highest priority packet awaiting in the
1514 * WMM Queues.
1515 */
1516void
1517mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter)
1518{
1519	do {
1520		if (mwifiex_dequeue_tx_packet(adapter))
1521			break;
1522		if (adapter->iface_type != MWIFIEX_SDIO) {
1523			if (adapter->data_sent ||
1524			    adapter->tx_lock_flag)
1525				break;
1526		} else {
1527			if (atomic_read(&adapter->tx_queued) >=
1528			    MWIFIEX_MAX_PKTS_TXQ)
1529				break;
1530		}
1531	} while (!mwifiex_wmm_lists_empty(adapter));
1532}
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * NXP Wireless LAN device driver: WMM
   4 *
   5 * Copyright 2011-2020 NXP
   6 */
   7
   8#include "decl.h"
   9#include "ioctl.h"
  10#include "util.h"
  11#include "fw.h"
  12#include "main.h"
  13#include "wmm.h"
  14#include "11n.h"
  15
  16
  17/* Maximum value FW can accept for driver delay in packet transmission */
  18#define DRV_PKT_DELAY_TO_FW_MAX   512
  19
  20
  21#define WMM_QUEUED_PACKET_LOWER_LIMIT   180
  22
  23#define WMM_QUEUED_PACKET_UPPER_LIMIT   200
  24
  25/* Offset for TOS field in the IP header */
  26#define IPTOS_OFFSET 5
  27
  28static bool disable_tx_amsdu;
  29module_param(disable_tx_amsdu, bool, 0644);
  30
  31/* This table inverses the tos_to_tid operation to get a priority
  32 * which is in sequential order, and can be compared.
  33 * Use this to compare the priority of two different TIDs.
  34 */
  35const u8 tos_to_tid_inv[] = {
  36	0x02,  /* from tos_to_tid[2] = 0 */
  37	0x00,  /* from tos_to_tid[0] = 1 */
  38	0x01,  /* from tos_to_tid[1] = 2 */
  39	0x03,
  40	0x04,
  41	0x05,
  42	0x06,
  43	0x07
  44};
  45
  46/* WMM information IE */
  47static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07,
  48	0x00, 0x50, 0xf2, 0x02,
  49	0x00, 0x01, 0x00
  50};
  51
  52static const u8 wmm_aci_to_qidx_map[] = { WMM_AC_BE,
  53	WMM_AC_BK,
  54	WMM_AC_VI,
  55	WMM_AC_VO
  56};
  57
  58static u8 tos_to_tid[] = {
  59	/* TID DSCP_P2 DSCP_P1 DSCP_P0 WMM_AC */
  60	0x01,			/* 0 1 0 AC_BK */
  61	0x02,			/* 0 0 0 AC_BK */
  62	0x00,			/* 0 0 1 AC_BE */
  63	0x03,			/* 0 1 1 AC_BE */
  64	0x04,			/* 1 0 0 AC_VI */
  65	0x05,			/* 1 0 1 AC_VI */
  66	0x06,			/* 1 1 0 AC_VO */
  67	0x07			/* 1 1 1 AC_VO */
  68};
  69
  70static u8 ac_to_tid[4][2] = { {1, 2}, {0, 3}, {4, 5}, {6, 7} };
  71
  72/*
  73 * This function debug prints the priority parameters for a WMM AC.
  74 */
  75static void
  76mwifiex_wmm_ac_debug_print(const struct ieee_types_wmm_ac_parameters *ac_param)
  77{
  78	const char *ac_str[] = { "BK", "BE", "VI", "VO" };
  79
  80	pr_debug("info: WMM AC_%s: ACI=%d, ACM=%d, Aifsn=%d, "
  81		 "EcwMin=%d, EcwMax=%d, TxopLimit=%d\n",
  82		 ac_str[wmm_aci_to_qidx_map[(ac_param->aci_aifsn_bitmap
  83					     & MWIFIEX_ACI) >> 5]],
  84		 (ac_param->aci_aifsn_bitmap & MWIFIEX_ACI) >> 5,
  85		 (ac_param->aci_aifsn_bitmap & MWIFIEX_ACM) >> 4,
  86		 ac_param->aci_aifsn_bitmap & MWIFIEX_AIFSN,
  87		 ac_param->ecw_bitmap & MWIFIEX_ECW_MIN,
  88		 (ac_param->ecw_bitmap & MWIFIEX_ECW_MAX) >> 4,
  89		 le16_to_cpu(ac_param->tx_op_limit));
  90}
  91
  92/*
  93 * This function allocates a route address list.
  94 *
  95 * The function also initializes the list with the provided RA.
  96 */
  97static struct mwifiex_ra_list_tbl *
  98mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, const u8 *ra)
  99{
 100	struct mwifiex_ra_list_tbl *ra_list;
 101
 102	ra_list = kzalloc(sizeof(struct mwifiex_ra_list_tbl), GFP_ATOMIC);
 103	if (!ra_list)
 104		return NULL;
 105
 106	INIT_LIST_HEAD(&ra_list->list);
 107	skb_queue_head_init(&ra_list->skb_head);
 108
 109	memcpy(ra_list->ra, ra, ETH_ALEN);
 110
 111	ra_list->total_pkt_count = 0;
 112
 113	mwifiex_dbg(adapter, INFO, "info: allocated ra_list %p\n", ra_list);
 114
 115	return ra_list;
 116}
 117
 118/* This function returns random no between 16 and 32 to be used as threshold
 119 * for no of packets after which BA setup is initiated.
 120 */
 121static u8 mwifiex_get_random_ba_threshold(void)
 122{
 123	u64 ns;
 124	/* setup ba_packet_threshold here random number between
 125	 * [BA_SETUP_PACKET_OFFSET,
 126	 * BA_SETUP_PACKET_OFFSET+BA_SETUP_MAX_PACKET_THRESHOLD-1]
 127	 */
 128	ns = ktime_get_ns();
 129	ns += (ns >> 32) + (ns >> 16);
 130
 131	return ((u8)ns % BA_SETUP_MAX_PACKET_THRESHOLD) + BA_SETUP_PACKET_OFFSET;
 132}
 133
 134/*
 135 * This function allocates and adds a RA list for all TIDs
 136 * with the given RA.
 137 */
 138void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
 139{
 140	int i;
 141	struct mwifiex_ra_list_tbl *ra_list;
 142	struct mwifiex_adapter *adapter = priv->adapter;
 143	struct mwifiex_sta_node *node;
 144
 145
 146	for (i = 0; i < MAX_NUM_TID; ++i) {
 147		ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra);
 148		mwifiex_dbg(adapter, INFO,
 149			    "info: created ra_list %p\n", ra_list);
 150
 151		if (!ra_list)
 152			break;
 153
 154		ra_list->is_11n_enabled = 0;
 155		ra_list->tdls_link = false;
 156		ra_list->ba_status = BA_SETUP_NONE;
 157		ra_list->amsdu_in_ampdu = false;
 158		if (!mwifiex_queuing_ra_based(priv)) {
 159			if (mwifiex_is_tdls_link_setup
 160				(mwifiex_get_tdls_link_status(priv, ra))) {
 161				ra_list->tdls_link = true;
 162				ra_list->is_11n_enabled =
 163					mwifiex_tdls_peer_11n_enabled(priv, ra);
 164			} else {
 165				ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
 166			}
 167		} else {
 168			spin_lock_bh(&priv->sta_list_spinlock);
 169			node = mwifiex_get_sta_entry(priv, ra);
 170			if (node)
 171				ra_list->tx_paused = node->tx_pause;
 172			ra_list->is_11n_enabled =
 173				      mwifiex_is_sta_11n_enabled(priv, node);
 174			if (ra_list->is_11n_enabled)
 175				ra_list->max_amsdu = node->max_amsdu;
 176			spin_unlock_bh(&priv->sta_list_spinlock);
 177		}
 178
 179		mwifiex_dbg(adapter, DATA, "data: ralist %p: is_11n_enabled=%d\n",
 180			    ra_list, ra_list->is_11n_enabled);
 181
 182		if (ra_list->is_11n_enabled) {
 183			ra_list->ba_pkt_count = 0;
 184			ra_list->ba_packet_thr =
 185					      mwifiex_get_random_ba_threshold();
 186		}
 187		list_add_tail(&ra_list->list,
 188			      &priv->wmm.tid_tbl_ptr[i].ra_list);
 189	}
 190}
 191
 192/*
 193 * This function sets the WMM queue priorities to their default values.
 194 */
 195static void mwifiex_wmm_default_queue_priorities(struct mwifiex_private *priv)
 196{
 197	/* Default queue priorities: VO->VI->BE->BK */
 198	priv->wmm.queue_priority[0] = WMM_AC_VO;
 199	priv->wmm.queue_priority[1] = WMM_AC_VI;
 200	priv->wmm.queue_priority[2] = WMM_AC_BE;
 201	priv->wmm.queue_priority[3] = WMM_AC_BK;
 202}
 203
 204/*
 205 * This function map ACs to TIDs.
 206 */
 207static void
 208mwifiex_wmm_queue_priorities_tid(struct mwifiex_private *priv)
 209{
 210	struct mwifiex_wmm_desc *wmm = &priv->wmm;
 211	u8 *queue_priority = wmm->queue_priority;
 212	int i;
 213
 214	for (i = 0; i < 4; ++i) {
 215		tos_to_tid[7 - (i * 2)] = ac_to_tid[queue_priority[i]][1];
 216		tos_to_tid[6 - (i * 2)] = ac_to_tid[queue_priority[i]][0];
 217	}
 218
 219	for (i = 0; i < MAX_NUM_TID; ++i)
 220		priv->tos_to_tid_inv[tos_to_tid[i]] = (u8)i;
 221
 222	atomic_set(&wmm->highest_queued_prio, HIGH_PRIO_TID);
 223}
 224
 225/*
 226 * This function initializes WMM priority queues.
 227 */
 228void
 229mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
 230				   struct ieee_types_wmm_parameter *wmm_ie)
 231{
 232	u16 cw_min, avg_back_off, tmp[4];
 233	u32 i, j, num_ac;
 234	u8 ac_idx;
 235
 236	if (!wmm_ie || !priv->wmm_enabled) {
 237		/* WMM is not enabled, just set the defaults and return */
 238		mwifiex_wmm_default_queue_priorities(priv);
 239		return;
 240	}
 241
 242	mwifiex_dbg(priv->adapter, INFO,
 243		    "info: WMM Parameter IE: version=%d,\t"
 244		    "qos_info Parameter Set Count=%d, Reserved=%#x\n",
 245		    wmm_ie->version, wmm_ie->qos_info_bitmap &
 246		    IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
 247		    wmm_ie->reserved);
 248
 249	for (num_ac = 0; num_ac < ARRAY_SIZE(wmm_ie->ac_params); num_ac++) {
 250		u8 ecw = wmm_ie->ac_params[num_ac].ecw_bitmap;
 251		u8 aci_aifsn = wmm_ie->ac_params[num_ac].aci_aifsn_bitmap;
 252		cw_min = (1 << (ecw & MWIFIEX_ECW_MIN)) - 1;
 253		avg_back_off = (cw_min >> 1) + (aci_aifsn & MWIFIEX_AIFSN);
 254
 255		ac_idx = wmm_aci_to_qidx_map[(aci_aifsn & MWIFIEX_ACI) >> 5];
 256		priv->wmm.queue_priority[ac_idx] = ac_idx;
 257		tmp[ac_idx] = avg_back_off;
 258
 259		mwifiex_dbg(priv->adapter, INFO,
 260			    "info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n",
 261			    (1 << ((ecw & MWIFIEX_ECW_MAX) >> 4)) - 1,
 262			    cw_min, avg_back_off);
 263		mwifiex_wmm_ac_debug_print(&wmm_ie->ac_params[num_ac]);
 264	}
 265
 266	/* Bubble sort */
 267	for (i = 0; i < num_ac; i++) {
 268		for (j = 1; j < num_ac - i; j++) {
 269			if (tmp[j - 1] > tmp[j]) {
 270				swap(tmp[j - 1], tmp[j]);
 271				swap(priv->wmm.queue_priority[j - 1],
 272				     priv->wmm.queue_priority[j]);
 273			} else if (tmp[j - 1] == tmp[j]) {
 274				if (priv->wmm.queue_priority[j - 1]
 275				    < priv->wmm.queue_priority[j])
 276					swap(priv->wmm.queue_priority[j - 1],
 277					     priv->wmm.queue_priority[j]);
 278			}
 279		}
 280	}
 281
 282	mwifiex_wmm_queue_priorities_tid(priv);
 283}
 284
 285/*
 286 * This function evaluates whether or not an AC is to be downgraded.
 287 *
 288 * In case the AC is not enabled, the highest AC is returned that is
 289 * enabled and does not require admission control.
 290 */
 291static enum mwifiex_wmm_ac_e
 292mwifiex_wmm_eval_downgrade_ac(struct mwifiex_private *priv,
 293			      enum mwifiex_wmm_ac_e eval_ac)
 294{
 295	int down_ac;
 296	enum mwifiex_wmm_ac_e ret_ac;
 297	struct mwifiex_wmm_ac_status *ac_status;
 298
 299	ac_status = &priv->wmm.ac_status[eval_ac];
 300
 301	if (!ac_status->disabled)
 302		/* Okay to use this AC, its enabled */
 303		return eval_ac;
 304
 305	/* Setup a default return value of the lowest priority */
 306	ret_ac = WMM_AC_BK;
 307
 308	/*
 309	 *  Find the highest AC that is enabled and does not require
 310	 *  admission control. The spec disallows downgrading to an AC,
 311	 *  which is enabled due to a completed admission control.
 312	 *  Unadmitted traffic is not to be sent on an AC with admitted
 313	 *  traffic.
 314	 */
 315	for (down_ac = WMM_AC_BK; down_ac < eval_ac; down_ac++) {
 316		ac_status = &priv->wmm.ac_status[down_ac];
 317
 318		if (!ac_status->disabled && !ac_status->flow_required)
 319			/* AC is enabled and does not require admission
 320			   control */
 321			ret_ac = (enum mwifiex_wmm_ac_e) down_ac;
 322	}
 323
 324	return ret_ac;
 325}
 326
 327/*
 328 * This function downgrades WMM priority queue.
 329 */
 330void
 331mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv)
 332{
 333	int ac_val;
 334
 335	mwifiex_dbg(priv->adapter, INFO, "info: WMM: AC Priorities:\t"
 336		    "BK(0), BE(1), VI(2), VO(3)\n");
 337
 338	if (!priv->wmm_enabled) {
 339		/* WMM is not enabled, default priorities */
 340		for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++)
 341			priv->wmm.ac_down_graded_vals[ac_val] =
 342						(enum mwifiex_wmm_ac_e) ac_val;
 343	} else {
 344		for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++) {
 345			priv->wmm.ac_down_graded_vals[ac_val]
 346				= mwifiex_wmm_eval_downgrade_ac(priv,
 347						(enum mwifiex_wmm_ac_e) ac_val);
 348			mwifiex_dbg(priv->adapter, INFO,
 349				    "info: WMM: AC PRIO %d maps to %d\n",
 350				    ac_val,
 351				    priv->wmm.ac_down_graded_vals[ac_val]);
 352		}
 353	}
 354}
 355
 356/*
 357 * This function converts the IP TOS field to an WMM AC
 358 * Queue assignment.
 359 */
 360static enum mwifiex_wmm_ac_e
 361mwifiex_wmm_convert_tos_to_ac(struct mwifiex_adapter *adapter, u32 tos)
 362{
 363	/* Map of TOS UP values to WMM AC */
 364	static const enum mwifiex_wmm_ac_e tos_to_ac[] = {
 365		WMM_AC_BE,
 366		WMM_AC_BK,
 367		WMM_AC_BK,
 368		WMM_AC_BE,
 369		WMM_AC_VI,
 370		WMM_AC_VI,
 371		WMM_AC_VO,
 372		WMM_AC_VO
 373	};
 374
 375	if (tos >= ARRAY_SIZE(tos_to_ac))
 376		return WMM_AC_BE;
 377
 378	return tos_to_ac[tos];
 379}
 380
 381/*
 382 * This function evaluates a given TID and downgrades it to a lower
 383 * TID if the WMM Parameter IE received from the AP indicates that the
 384 * AP is disabled (due to call admission control (ACM bit). Mapping
 385 * of TID to AC is taken care of internally.
 386 */
 387u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid)
 388{
 389	enum mwifiex_wmm_ac_e ac, ac_down;
 390	u8 new_tid;
 391
 392	ac = mwifiex_wmm_convert_tos_to_ac(priv->adapter, tid);
 393	ac_down = priv->wmm.ac_down_graded_vals[ac];
 394
 395	/* Send the index to tid array, picking from the array will be
 396	 * taken care by dequeuing function
 397	 */
 398	new_tid = ac_to_tid[ac_down][tid % 2];
 399
 400	return new_tid;
 401}
 402
 403/*
 404 * This function initializes the WMM state information and the
 405 * WMM data path queues.
 406 */
 407void
 408mwifiex_wmm_init(struct mwifiex_adapter *adapter)
 409{
 410	int i, j;
 411	struct mwifiex_private *priv;
 412
 413	for (j = 0; j < adapter->priv_num; ++j) {
 414		priv = adapter->priv[j];
 415		if (!priv)
 416			continue;
 417
 418		for (i = 0; i < MAX_NUM_TID; ++i) {
 419			if (!disable_tx_amsdu &&
 420			    adapter->tx_buf_size > MWIFIEX_TX_DATA_BUF_SIZE_2K)
 421				priv->aggr_prio_tbl[i].amsdu =
 422							priv->tos_to_tid_inv[i];
 423			else
 424				priv->aggr_prio_tbl[i].amsdu =
 425							BA_STREAM_NOT_ALLOWED;
 426			priv->aggr_prio_tbl[i].ampdu_ap =
 427							priv->tos_to_tid_inv[i];
 428			priv->aggr_prio_tbl[i].ampdu_user =
 429							priv->tos_to_tid_inv[i];
 430		}
 431
 432		priv->aggr_prio_tbl[6].amsdu
 433					= priv->aggr_prio_tbl[6].ampdu_ap
 434					= priv->aggr_prio_tbl[6].ampdu_user
 435					= BA_STREAM_NOT_ALLOWED;
 436
 437		priv->aggr_prio_tbl[7].amsdu = priv->aggr_prio_tbl[7].ampdu_ap
 438					= priv->aggr_prio_tbl[7].ampdu_user
 439					= BA_STREAM_NOT_ALLOWED;
 440
 441		mwifiex_set_ba_params(priv);
 442		mwifiex_reset_11n_rx_seq_num(priv);
 443
 444		priv->wmm.drv_pkt_delay_max = MWIFIEX_WMM_DRV_DELAY_MAX;
 445		atomic_set(&priv->wmm.tx_pkts_queued, 0);
 446		atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
 447	}
 448}
 449
 450int mwifiex_bypass_txlist_empty(struct mwifiex_adapter *adapter)
 451{
 452	struct mwifiex_private *priv;
 453	int i;
 454
 455	for (i = 0; i < adapter->priv_num; i++) {
 456		priv = adapter->priv[i];
 457		if (!priv)
 458			continue;
 459		if (adapter->if_ops.is_port_ready &&
 460		    !adapter->if_ops.is_port_ready(priv))
 461			continue;
 462		if (!skb_queue_empty(&priv->bypass_txq))
 463			return false;
 464	}
 465
 466	return true;
 467}
 468
 469/*
 470 * This function checks if WMM Tx queue is empty.
 471 */
 472int
 473mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
 474{
 475	int i;
 476	struct mwifiex_private *priv;
 477
 478	for (i = 0; i < adapter->priv_num; ++i) {
 479		priv = adapter->priv[i];
 480		if (!priv)
 481			continue;
 482		if (!priv->port_open &&
 483		    (priv->bss_mode != NL80211_IFTYPE_ADHOC))
 484			continue;
 485		if (adapter->if_ops.is_port_ready &&
 486		    !adapter->if_ops.is_port_ready(priv))
 487			continue;
 488		if (atomic_read(&priv->wmm.tx_pkts_queued))
 489			return false;
 490	}
 491
 492	return true;
 493}
 494
 495/*
 496 * This function deletes all packets in an RA list node.
 497 *
 498 * The packet sent completion callback handler are called with
 499 * status failure, after they are dequeued to ensure proper
 500 * cleanup. The RA list node itself is freed at the end.
 501 */
 502static void
 503mwifiex_wmm_del_pkts_in_ralist_node(struct mwifiex_private *priv,
 504				    struct mwifiex_ra_list_tbl *ra_list)
 505{
 506	struct mwifiex_adapter *adapter = priv->adapter;
 507	struct sk_buff *skb, *tmp;
 508
 509	skb_queue_walk_safe(&ra_list->skb_head, skb, tmp) {
 510		skb_unlink(skb, &ra_list->skb_head);
 511		mwifiex_write_data_complete(adapter, skb, 0, -1);
 512	}
 513}
 514
 515/*
 516 * This function deletes all packets in an RA list.
 517 *
 518 * Each nodes in the RA list are freed individually first, and then
 519 * the RA list itself is freed.
 520 */
 521static void
 522mwifiex_wmm_del_pkts_in_ralist(struct mwifiex_private *priv,
 523			       struct list_head *ra_list_head)
 524{
 525	struct mwifiex_ra_list_tbl *ra_list;
 526
 527	list_for_each_entry(ra_list, ra_list_head, list)
 528		mwifiex_wmm_del_pkts_in_ralist_node(priv, ra_list);
 529}
 530
 531/*
 532 * This function deletes all packets in all RA lists.
 533 */
 534static void mwifiex_wmm_cleanup_queues(struct mwifiex_private *priv)
 535{
 536	int i;
 537
 538	for (i = 0; i < MAX_NUM_TID; i++)
 539		mwifiex_wmm_del_pkts_in_ralist(priv, &priv->wmm.tid_tbl_ptr[i].
 540								       ra_list);
 541
 542	atomic_set(&priv->wmm.tx_pkts_queued, 0);
 543	atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
 544}
 545
 546/*
 547 * This function deletes all route addresses from all RA lists.
 548 */
 549static void mwifiex_wmm_delete_all_ralist(struct mwifiex_private *priv)
 550{
 551	struct mwifiex_ra_list_tbl *ra_list, *tmp_node;
 552	int i;
 553
 554	for (i = 0; i < MAX_NUM_TID; ++i) {
 555		mwifiex_dbg(priv->adapter, INFO,
 556			    "info: ra_list: freeing buf for tid %d\n", i);
 557		list_for_each_entry_safe(ra_list, tmp_node,
 558					 &priv->wmm.tid_tbl_ptr[i].ra_list,
 559					 list) {
 560			list_del(&ra_list->list);
 561			kfree(ra_list);
 562		}
 563
 564		INIT_LIST_HEAD(&priv->wmm.tid_tbl_ptr[i].ra_list);
 565	}
 566}
 567
 568static int mwifiex_free_ack_frame(int id, void *p, void *data)
 569{
 570	pr_warn("Have pending ack frames!\n");
 571	kfree_skb(p);
 572	return 0;
 573}
 574
 575/*
 576 * This function cleans up the Tx and Rx queues.
 577 *
 578 * Cleanup includes -
 579 *      - All packets in RA lists
 580 *      - All entries in Rx reorder table
 581 *      - All entries in Tx BA stream table
 582 *      - MPA buffer (if required)
 583 *      - All RA lists
 584 */
 585void
 586mwifiex_clean_txrx(struct mwifiex_private *priv)
 587{
 588	struct sk_buff *skb, *tmp;
 589
 590	mwifiex_11n_cleanup_reorder_tbl(priv);
 591	spin_lock_bh(&priv->wmm.ra_list_spinlock);
 592
 593	mwifiex_wmm_cleanup_queues(priv);
 594	mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
 595
 596	if (priv->adapter->if_ops.cleanup_mpa_buf)
 597		priv->adapter->if_ops.cleanup_mpa_buf(priv->adapter);
 598
 599	mwifiex_wmm_delete_all_ralist(priv);
 600	memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
 601
 602	if (priv->adapter->if_ops.clean_pcie_ring &&
 603	    !test_bit(MWIFIEX_SURPRISE_REMOVED, &priv->adapter->work_flags))
 604		priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
 605	spin_unlock_bh(&priv->wmm.ra_list_spinlock);
 606
 607	skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) {
 608		skb_unlink(skb, &priv->tdls_txq);
 609		mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
 610	}
 611
 612	skb_queue_walk_safe(&priv->bypass_txq, skb, tmp) {
 613		skb_unlink(skb, &priv->bypass_txq);
 614		mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
 615	}
 616	atomic_set(&priv->adapter->bypass_tx_pending, 0);
 617
 618	idr_for_each(&priv->ack_status_frames, mwifiex_free_ack_frame, NULL);
 619	idr_destroy(&priv->ack_status_frames);
 620}
 621
 622/*
 623 * This function retrieves a particular RA list node, matching with the
 624 * given TID and RA address.
 625 */
 626struct mwifiex_ra_list_tbl *
 627mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
 628			    const u8 *ra_addr)
 629{
 630	struct mwifiex_ra_list_tbl *ra_list;
 631
 632	list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[tid].ra_list,
 633			    list) {
 634		if (!memcmp(ra_list->ra, ra_addr, ETH_ALEN))
 635			return ra_list;
 636	}
 637
 638	return NULL;
 639}
 640
 641void mwifiex_update_ralist_tx_pause(struct mwifiex_private *priv, u8 *mac,
 642				    u8 tx_pause)
 643{
 644	struct mwifiex_ra_list_tbl *ra_list;
 645	u32 pkt_cnt = 0, tx_pkts_queued;
 646	int i;
 647
 648	spin_lock_bh(&priv->wmm.ra_list_spinlock);
 649
 650	for (i = 0; i < MAX_NUM_TID; ++i) {
 651		ra_list = mwifiex_wmm_get_ralist_node(priv, i, mac);
 652		if (ra_list && ra_list->tx_paused != tx_pause) {
 653			pkt_cnt += ra_list->total_pkt_count;
 654			ra_list->tx_paused = tx_pause;
 655			if (tx_pause)
 656				priv->wmm.pkts_paused[i] +=
 657					ra_list->total_pkt_count;
 658			else
 659				priv->wmm.pkts_paused[i] -=
 660					ra_list->total_pkt_count;
 661		}
 662	}
 663
 664	if (pkt_cnt) {
 665		tx_pkts_queued = atomic_read(&priv->wmm.tx_pkts_queued);
 666		if (tx_pause)
 667			tx_pkts_queued -= pkt_cnt;
 668		else
 669			tx_pkts_queued += pkt_cnt;
 670
 671		atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued);
 672		atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
 673	}
 674	spin_unlock_bh(&priv->wmm.ra_list_spinlock);
 675}
 676
 677/* This function updates non-tdls peer ralist tx_pause while
 678 * tdls channel switching
 679 */
 680void mwifiex_update_ralist_tx_pause_in_tdls_cs(struct mwifiex_private *priv,
 681					       u8 *mac, u8 tx_pause)
 682{
 683	struct mwifiex_ra_list_tbl *ra_list;
 684	u32 pkt_cnt = 0, tx_pkts_queued;
 685	int i;
 686
 687	spin_lock_bh(&priv->wmm.ra_list_spinlock);
 688
 689	for (i = 0; i < MAX_NUM_TID; ++i) {
 690		list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[i].ra_list,
 691				    list) {
 692			if (!memcmp(ra_list->ra, mac, ETH_ALEN))
 693				continue;
 694
 695			if (ra_list->tx_paused != tx_pause) {
 696				pkt_cnt += ra_list->total_pkt_count;
 697				ra_list->tx_paused = tx_pause;
 698				if (tx_pause)
 699					priv->wmm.pkts_paused[i] +=
 700						ra_list->total_pkt_count;
 701				else
 702					priv->wmm.pkts_paused[i] -=
 703						ra_list->total_pkt_count;
 704			}
 705		}
 706	}
 707
 708	if (pkt_cnt) {
 709		tx_pkts_queued = atomic_read(&priv->wmm.tx_pkts_queued);
 710		if (tx_pause)
 711			tx_pkts_queued -= pkt_cnt;
 712		else
 713			tx_pkts_queued += pkt_cnt;
 714
 715		atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued);
 716		atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
 717	}
 718	spin_unlock_bh(&priv->wmm.ra_list_spinlock);
 719}
 720
 721/*
 722 * This function retrieves an RA list node for a given TID and
 723 * RA address pair.
 724 *
 725 * If no such node is found, a new node is added first and then
 726 * retrieved.
 727 */
 728struct mwifiex_ra_list_tbl *
 729mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
 730			    const u8 *ra_addr)
 731{
 732	struct mwifiex_ra_list_tbl *ra_list;
 733
 734	ra_list = mwifiex_wmm_get_ralist_node(priv, tid, ra_addr);
 735	if (ra_list)
 736		return ra_list;
 737	mwifiex_ralist_add(priv, ra_addr);
 738
 739	return mwifiex_wmm_get_ralist_node(priv, tid, ra_addr);
 740}
 741
 742/*
 743 * This function deletes RA list nodes for given mac for all TIDs.
 744 * Function also decrements TX pending count accordingly.
 745 */
 746void
 747mwifiex_wmm_del_peer_ra_list(struct mwifiex_private *priv, const u8 *ra_addr)
 748{
 749	struct mwifiex_ra_list_tbl *ra_list;
 750	int i;
 751
 752	spin_lock_bh(&priv->wmm.ra_list_spinlock);
 753
 754	for (i = 0; i < MAX_NUM_TID; ++i) {
 755		ra_list = mwifiex_wmm_get_ralist_node(priv, i, ra_addr);
 756
 757		if (!ra_list)
 758			continue;
 759		mwifiex_wmm_del_pkts_in_ralist_node(priv, ra_list);
 760		if (ra_list->tx_paused)
 761			priv->wmm.pkts_paused[i] -= ra_list->total_pkt_count;
 762		else
 763			atomic_sub(ra_list->total_pkt_count,
 764				   &priv->wmm.tx_pkts_queued);
 765		list_del(&ra_list->list);
 766		kfree(ra_list);
 767	}
 768	spin_unlock_bh(&priv->wmm.ra_list_spinlock);
 769}
 770
 771/*
 772 * This function checks if a particular RA list node exists in a given TID
 773 * table index.
 774 */
 775int
 776mwifiex_is_ralist_valid(struct mwifiex_private *priv,
 777			struct mwifiex_ra_list_tbl *ra_list, int ptr_index)
 778{
 779	struct mwifiex_ra_list_tbl *rlist;
 780
 781	list_for_each_entry(rlist, &priv->wmm.tid_tbl_ptr[ptr_index].ra_list,
 782			    list) {
 783		if (rlist == ra_list)
 784			return true;
 785	}
 786
 787	return false;
 788}
 789
 790/*
 791 * This function adds a packet to bypass TX queue.
 792 * This is special TX queue for packets which can be sent even when port_open
 793 * is false.
 794 */
 795void
 796mwifiex_wmm_add_buf_bypass_txqueue(struct mwifiex_private *priv,
 797				   struct sk_buff *skb)
 798{
 799	skb_queue_tail(&priv->bypass_txq, skb);
 800}
 801
 802/*
 803 * This function adds a packet to WMM queue.
 804 *
 805 * In disconnected state the packet is immediately dropped and the
 806 * packet send completion callback is called with status failure.
 807 *
 808 * Otherwise, the correct RA list node is located and the packet
 809 * is queued at the list tail.
 810 */
 811void
 812mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
 813			    struct sk_buff *skb)
 814{
 815	struct mwifiex_adapter *adapter = priv->adapter;
 816	u32 tid;
 817	struct mwifiex_ra_list_tbl *ra_list;
 818	u8 ra[ETH_ALEN], tid_down;
 819	struct list_head list_head;
 820	int tdls_status = TDLS_NOT_SETUP;
 821	struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
 822	struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
 823
 824	memcpy(ra, eth_hdr->h_dest, ETH_ALEN);
 825
 826	if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
 827	    ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) {
 828		if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS)
 829			mwifiex_dbg(adapter, DATA,
 830				    "TDLS setup packet for %pM.\t"
 831				    "Don't block\n", ra);
 832		else if (memcmp(priv->cfg_bssid, ra, ETH_ALEN))
 833			tdls_status = mwifiex_get_tdls_link_status(priv, ra);
 834	}
 835
 836	if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) {
 837		mwifiex_dbg(adapter, DATA, "data: drop packet in disconnect\n");
 838		mwifiex_write_data_complete(adapter, skb, 0, -1);
 839		return;
 840	}
 841
 842	tid = skb->priority;
 843
 844	spin_lock_bh(&priv->wmm.ra_list_spinlock);
 845
 846	tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
 847
 848	/* In case of infra as we have already created the list during
 849	   association we just don't have to call get_queue_raptr, we will
 850	   have only 1 raptr for a tid in case of infra */
 851	if (!mwifiex_queuing_ra_based(priv) &&
 852	    !mwifiex_is_skb_mgmt_frame(skb)) {
 853		switch (tdls_status) {
 854		case TDLS_SETUP_COMPLETE:
 855		case TDLS_CHAN_SWITCHING:
 856		case TDLS_IN_BASE_CHAN:
 857		case TDLS_IN_OFF_CHAN:
 858			ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down,
 859							      ra);
 860			tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
 861			break;
 862		case TDLS_SETUP_INPROGRESS:
 863			skb_queue_tail(&priv->tdls_txq, skb);
 864			spin_unlock_bh(&priv->wmm.ra_list_spinlock);
 865			return;
 866		default:
 867			list_head = priv->wmm.tid_tbl_ptr[tid_down].ra_list;
 868			ra_list = list_first_entry_or_null(&list_head,
 869					struct mwifiex_ra_list_tbl, list);
 870			break;
 871		}
 872	} else {
 873		memcpy(ra, skb->data, ETH_ALEN);
 874		if (is_multicast_ether_addr(ra) || mwifiex_is_skb_mgmt_frame(skb))
 875			eth_broadcast_addr(ra);
 876		ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra);
 877	}
 878
 879	if (!ra_list) {
 880		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
 881		mwifiex_write_data_complete(adapter, skb, 0, -1);
 882		return;
 883	}
 884
 885	skb_queue_tail(&ra_list->skb_head, skb);
 886
 887	ra_list->ba_pkt_count++;
 888	ra_list->total_pkt_count++;
 889
 890	if (atomic_read(&priv->wmm.highest_queued_prio) <
 891						priv->tos_to_tid_inv[tid_down])
 892		atomic_set(&priv->wmm.highest_queued_prio,
 893			   priv->tos_to_tid_inv[tid_down]);
 894
 895	if (ra_list->tx_paused)
 896		priv->wmm.pkts_paused[tid_down]++;
 897	else
 898		atomic_inc(&priv->wmm.tx_pkts_queued);
 899
 900	spin_unlock_bh(&priv->wmm.ra_list_spinlock);
 901}
 902
 903/*
 904 * This function processes the get WMM status command response from firmware.
 905 *
 906 * The response may contain multiple TLVs -
 907 *      - AC Queue status TLVs
 908 *      - Current WMM Parameter IE TLV
 909 *      - Admission Control action frame TLVs
 910 *
 911 * This function parses the TLVs and then calls further specific functions
 912 * to process any changes in the queue prioritize or state.
 913 */
 914int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
 915			       const struct host_cmd_ds_command *resp)
 916{
 917	u8 *curr = (u8 *) &resp->params.get_wmm_status;
 918	uint16_t resp_len = le16_to_cpu(resp->size), tlv_len;
 919	int mask = IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK;
 920	bool valid = true;
 921
 922	struct mwifiex_ie_types_data *tlv_hdr;
 923	struct mwifiex_ie_types_wmm_queue_status *tlv_wmm_qstatus;
 924	struct ieee_types_wmm_parameter *wmm_param_ie = NULL;
 925	struct mwifiex_wmm_ac_status *ac_status;
 926
 927	mwifiex_dbg(priv->adapter, INFO,
 928		    "info: WMM: WMM_GET_STATUS cmdresp received: %d\n",
 929		    resp_len);
 930
 931	while ((resp_len >= sizeof(tlv_hdr->header)) && valid) {
 932		tlv_hdr = (struct mwifiex_ie_types_data *) curr;
 933		tlv_len = le16_to_cpu(tlv_hdr->header.len);
 934
 935		if (resp_len < tlv_len + sizeof(tlv_hdr->header))
 936			break;
 937
 938		switch (le16_to_cpu(tlv_hdr->header.type)) {
 939		case TLV_TYPE_WMMQSTATUS:
 940			tlv_wmm_qstatus =
 941				(struct mwifiex_ie_types_wmm_queue_status *)
 942				tlv_hdr;
 943			mwifiex_dbg(priv->adapter, CMD,
 944				    "info: CMD_RESP: WMM_GET_STATUS:\t"
 945				    "QSTATUS TLV: %d, %d, %d\n",
 946				    tlv_wmm_qstatus->queue_index,
 947				    tlv_wmm_qstatus->flow_required,
 948				    tlv_wmm_qstatus->disabled);
 949
 950			ac_status = &priv->wmm.ac_status[tlv_wmm_qstatus->
 951							 queue_index];
 952			ac_status->disabled = tlv_wmm_qstatus->disabled;
 953			ac_status->flow_required =
 954						tlv_wmm_qstatus->flow_required;
 955			ac_status->flow_created = tlv_wmm_qstatus->flow_created;
 956			break;
 957
 958		case WLAN_EID_VENDOR_SPECIFIC:
 959			/*
 960			 * Point the regular IEEE IE 2 bytes into the Marvell IE
 961			 *   and setup the IEEE IE type and length byte fields
 962			 */
 963
 964			wmm_param_ie =
 965				(struct ieee_types_wmm_parameter *) (curr +
 966								    2);
 967			wmm_param_ie->vend_hdr.len = (u8) tlv_len;
 968			wmm_param_ie->vend_hdr.element_id =
 969						WLAN_EID_VENDOR_SPECIFIC;
 970
 971			mwifiex_dbg(priv->adapter, CMD,
 972				    "info: CMD_RESP: WMM_GET_STATUS:\t"
 973				    "WMM Parameter Set Count: %d\n",
 974				    wmm_param_ie->qos_info_bitmap & mask);
 975
 976			if (wmm_param_ie->vend_hdr.len + 2 >
 977				sizeof(struct ieee_types_wmm_parameter))
 978				break;
 979
 980			memcpy((u8 *) &priv->curr_bss_params.bss_descriptor.
 981			       wmm_ie, wmm_param_ie,
 982			       wmm_param_ie->vend_hdr.len + 2);
 983
 984			break;
 985
 986		default:
 987			valid = false;
 988			break;
 989		}
 990
 991		curr += (tlv_len + sizeof(tlv_hdr->header));
 992		resp_len -= (tlv_len + sizeof(tlv_hdr->header));
 993	}
 994
 995	mwifiex_wmm_setup_queue_priorities(priv, wmm_param_ie);
 996	mwifiex_wmm_setup_ac_downgrade(priv);
 997
 998	return 0;
 999}
1000
1001/*
1002 * Callback handler from the command module to allow insertion of a WMM TLV.
1003 *
1004 * If the BSS we are associating to supports WMM, this function adds the
1005 * required WMM Information IE to the association request command buffer in
1006 * the form of a Marvell extended IEEE IE.
1007 */
1008u32
1009mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
1010				    u8 **assoc_buf,
1011				    struct ieee_types_wmm_parameter *wmm_ie,
1012				    struct ieee80211_ht_cap *ht_cap)
1013{
1014	struct mwifiex_ie_types_wmm_param_set *wmm_tlv;
1015	u32 ret_len = 0;
1016
1017	/* Null checks */
1018	if (!assoc_buf)
1019		return 0;
1020	if (!(*assoc_buf))
1021		return 0;
1022
1023	if (!wmm_ie)
1024		return 0;
1025
1026	mwifiex_dbg(priv->adapter, INFO,
1027		    "info: WMM: process assoc req: bss->wmm_ie=%#x\n",
1028		    wmm_ie->vend_hdr.element_id);
1029
1030	if ((priv->wmm_required ||
1031	     (ht_cap && (priv->adapter->config_bands & BAND_GN ||
1032	     priv->adapter->config_bands & BAND_AN))) &&
1033	    wmm_ie->vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC) {
1034		wmm_tlv = (struct mwifiex_ie_types_wmm_param_set *) *assoc_buf;
1035		wmm_tlv->header.type = cpu_to_le16((u16) wmm_info_ie[0]);
1036		wmm_tlv->header.len = cpu_to_le16((u16) wmm_info_ie[1]);
1037		memcpy(wmm_tlv->wmm_ie, &wmm_info_ie[2],
1038		       le16_to_cpu(wmm_tlv->header.len));
1039		if (wmm_ie->qos_info_bitmap & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD)
1040			memcpy((u8 *) (wmm_tlv->wmm_ie
1041				       + le16_to_cpu(wmm_tlv->header.len)
1042				       - sizeof(priv->wmm_qosinfo)),
1043			       &priv->wmm_qosinfo, sizeof(priv->wmm_qosinfo));
1044
1045		ret_len = sizeof(wmm_tlv->header)
1046			  + le16_to_cpu(wmm_tlv->header.len);
1047
1048		*assoc_buf += ret_len;
1049	}
1050
1051	return ret_len;
1052}
1053
1054/*
1055 * This function computes the time delay in the driver queues for a
1056 * given packet.
1057 *
1058 * When the packet is received at the OS/Driver interface, the current
1059 * time is set in the packet structure. The difference between the present
1060 * time and that received time is computed in this function and limited
1061 * based on pre-compiled limits in the driver.
1062 */
1063u8
1064mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv,
1065				  const struct sk_buff *skb)
1066{
1067	u32 queue_delay = ktime_to_ms(net_timedelta(skb->tstamp));
1068	u8 ret_val;
1069
1070	/*
1071	 * Queue delay is passed as a uint8 in units of 2ms (ms shifted
1072	 *  by 1). Min value (other than 0) is therefore 2ms, max is 510ms.
1073	 *
1074	 * Pass max value if queue_delay is beyond the uint8 range
1075	 */
1076	ret_val = (u8) (min(queue_delay, priv->wmm.drv_pkt_delay_max) >> 1);
1077
1078	mwifiex_dbg(priv->adapter, DATA, "data: WMM: Pkt Delay: %d ms,\t"
1079		    "%d ms sent to FW\n", queue_delay, ret_val);
1080
1081	return ret_val;
1082}
1083
1084/*
1085 * This function retrieves the highest priority RA list table pointer.
1086 */
1087static struct mwifiex_ra_list_tbl *
1088mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
1089				     struct mwifiex_private **priv, int *tid)
1090{
1091	struct mwifiex_private *priv_tmp;
1092	struct mwifiex_ra_list_tbl *ptr;
1093	struct mwifiex_tid_tbl *tid_ptr;
1094	atomic_t *hqp;
1095	int i, j;
1096
1097	/* check the BSS with highest priority first */
1098	for (j = adapter->priv_num - 1; j >= 0; --j) {
1099		/* iterate over BSS with the equal priority */
1100		list_for_each_entry(adapter->bss_prio_tbl[j].bss_prio_cur,
1101				    &adapter->bss_prio_tbl[j].bss_prio_head,
1102				    list) {
1103
1104try_again:
1105			priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv;
1106
1107			if (((priv_tmp->bss_mode != NL80211_IFTYPE_ADHOC) &&
1108			     !priv_tmp->port_open) ||
1109			    (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0))
1110				continue;
1111
1112			if (adapter->if_ops.is_port_ready &&
1113			    !adapter->if_ops.is_port_ready(priv_tmp))
1114				continue;
1115
1116			/* iterate over the WMM queues of the BSS */
1117			hqp = &priv_tmp->wmm.highest_queued_prio;
1118			for (i = atomic_read(hqp); i >= LOW_PRIO_TID; --i) {
1119
1120				spin_lock_bh(&priv_tmp->wmm.ra_list_spinlock);
1121
1122				tid_ptr = &(priv_tmp)->wmm.
1123					tid_tbl_ptr[tos_to_tid[i]];
1124
1125				/* iterate over receiver addresses */
1126				list_for_each_entry(ptr, &tid_ptr->ra_list,
1127						    list) {
1128
1129					if (!ptr->tx_paused &&
1130					    !skb_queue_empty(&ptr->skb_head))
1131						/* holds both locks */
1132						goto found;
1133				}
1134
1135				spin_unlock_bh(&priv_tmp->wmm.ra_list_spinlock);
1136			}
1137
1138			if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) != 0) {
1139				atomic_set(&priv_tmp->wmm.highest_queued_prio,
1140					   HIGH_PRIO_TID);
1141				/* Iterate current private once more, since
1142				 * there still exist packets in data queue
1143				 */
1144				goto try_again;
1145			} else
1146				atomic_set(&priv_tmp->wmm.highest_queued_prio,
1147					   NO_PKT_PRIO_TID);
1148		}
1149	}
1150
1151	return NULL;
1152
1153found:
1154	/* holds ra_list_spinlock */
1155	if (atomic_read(hqp) > i)
1156		atomic_set(hqp, i);
1157	spin_unlock_bh(&priv_tmp->wmm.ra_list_spinlock);
1158
1159	*priv = priv_tmp;
1160	*tid = tos_to_tid[i];
1161
1162	return ptr;
1163}
1164
1165/* This functions rotates ra and bss lists so packets are picked round robin.
1166 *
1167 * After a packet is successfully transmitted, rotate the ra list, so the ra
1168 * next to the one transmitted, will come first in the list. This way we pick
1169 * the ra' in a round robin fashion. Same applies to bss nodes of equal
1170 * priority.
1171 *
1172 * Function also increments wmm.packets_out counter.
1173 */
1174void mwifiex_rotate_priolists(struct mwifiex_private *priv,
1175				 struct mwifiex_ra_list_tbl *ra,
1176				 int tid)
1177{
1178	struct mwifiex_adapter *adapter = priv->adapter;
1179	struct mwifiex_bss_prio_tbl *tbl = adapter->bss_prio_tbl;
1180	struct mwifiex_tid_tbl *tid_ptr = &priv->wmm.tid_tbl_ptr[tid];
1181
1182	spin_lock_bh(&tbl[priv->bss_priority].bss_prio_lock);
1183	/*
1184	 * dirty trick: we remove 'head' temporarily and reinsert it after
1185	 * curr bss node. imagine list to stay fixed while head is moved
1186	 */
1187	list_move(&tbl[priv->bss_priority].bss_prio_head,
1188		  &tbl[priv->bss_priority].bss_prio_cur->list);
1189	spin_unlock_bh(&tbl[priv->bss_priority].bss_prio_lock);
1190
1191	spin_lock_bh(&priv->wmm.ra_list_spinlock);
1192	if (mwifiex_is_ralist_valid(priv, ra, tid)) {
1193		priv->wmm.packets_out[tid]++;
1194		/* same as above */
1195		list_move(&tid_ptr->ra_list, &ra->list);
1196	}
1197	spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1198}
1199
1200/*
1201 * This function checks if 11n aggregation is possible.
1202 */
1203static int
1204mwifiex_is_11n_aggragation_possible(struct mwifiex_private *priv,
1205				    struct mwifiex_ra_list_tbl *ptr,
1206				    int max_buf_size)
1207{
1208	int count = 0, total_size = 0;
1209	struct sk_buff *skb, *tmp;
1210	int max_amsdu_size;
1211
1212	if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP && priv->ap_11n_enabled &&
1213	    ptr->is_11n_enabled)
1214		max_amsdu_size = min_t(int, ptr->max_amsdu, max_buf_size);
1215	else
1216		max_amsdu_size = max_buf_size;
1217
1218	skb_queue_walk_safe(&ptr->skb_head, skb, tmp) {
1219		total_size += skb->len;
1220		if (total_size >= max_amsdu_size)
1221			break;
1222		if (++count >= MIN_NUM_AMSDU)
1223			return true;
1224	}
1225
1226	return false;
1227}
1228
1229/*
1230 * This function sends a single packet to firmware for transmission.
1231 */
1232static void
1233mwifiex_send_single_packet(struct mwifiex_private *priv,
1234			   struct mwifiex_ra_list_tbl *ptr, int ptr_index)
1235			   __releases(&priv->wmm.ra_list_spinlock)
1236{
1237	struct sk_buff *skb, *skb_next;
1238	struct mwifiex_tx_param tx_param;
1239	struct mwifiex_adapter *adapter = priv->adapter;
1240	struct mwifiex_txinfo *tx_info;
1241
1242	if (skb_queue_empty(&ptr->skb_head)) {
1243		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1244		mwifiex_dbg(adapter, DATA, "data: nothing to send\n");
1245		return;
1246	}
1247
1248	skb = skb_dequeue(&ptr->skb_head);
1249
1250	tx_info = MWIFIEX_SKB_TXCB(skb);
1251	mwifiex_dbg(adapter, DATA,
1252		    "data: dequeuing the packet %p %p\n", ptr, skb);
1253
1254	ptr->total_pkt_count--;
1255
1256	if (!skb_queue_empty(&ptr->skb_head))
1257		skb_next = skb_peek(&ptr->skb_head);
1258	else
1259		skb_next = NULL;
1260
1261	spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1262
1263	tx_param.next_pkt_len = ((skb_next) ? skb_next->len +
1264				sizeof(struct txpd) : 0);
1265
1266	if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
1267		/* Queue the packet back at the head */
1268		spin_lock_bh(&priv->wmm.ra_list_spinlock);
1269
1270		if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1271			spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1272			mwifiex_write_data_complete(adapter, skb, 0, -1);
1273			return;
1274		}
1275
1276		skb_queue_tail(&ptr->skb_head, skb);
1277
1278		ptr->total_pkt_count++;
1279		ptr->ba_pkt_count++;
1280		tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1281		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1282	} else {
1283		mwifiex_rotate_priolists(priv, ptr, ptr_index);
1284		atomic_dec(&priv->wmm.tx_pkts_queued);
1285	}
1286}
1287
1288/*
1289 * This function checks if the first packet in the given RA list
1290 * is already processed or not.
1291 */
1292static int
1293mwifiex_is_ptr_processed(struct mwifiex_private *priv,
1294			 struct mwifiex_ra_list_tbl *ptr)
1295{
1296	struct sk_buff *skb;
1297	struct mwifiex_txinfo *tx_info;
1298
1299	if (skb_queue_empty(&ptr->skb_head))
1300		return false;
1301
1302	skb = skb_peek(&ptr->skb_head);
1303
1304	tx_info = MWIFIEX_SKB_TXCB(skb);
1305	if (tx_info->flags & MWIFIEX_BUF_FLAG_REQUEUED_PKT)
1306		return true;
1307
1308	return false;
1309}
1310
1311/*
1312 * This function sends a single processed packet to firmware for
1313 * transmission.
1314 */
1315static void
1316mwifiex_send_processed_packet(struct mwifiex_private *priv,
1317			      struct mwifiex_ra_list_tbl *ptr, int ptr_index)
1318				__releases(&priv->wmm.ra_list_spinlock)
1319{
1320	struct mwifiex_tx_param tx_param;
1321	struct mwifiex_adapter *adapter = priv->adapter;
1322	int ret = -1;
1323	struct sk_buff *skb, *skb_next;
1324	struct mwifiex_txinfo *tx_info;
1325
1326	if (skb_queue_empty(&ptr->skb_head)) {
1327		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1328		return;
1329	}
1330
1331	skb = skb_dequeue(&ptr->skb_head);
1332
1333	if (adapter->data_sent || adapter->tx_lock_flag) {
1334		ptr->total_pkt_count--;
1335		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1336		skb_queue_tail(&adapter->tx_data_q, skb);
1337		atomic_dec(&priv->wmm.tx_pkts_queued);
1338		atomic_inc(&adapter->tx_queued);
1339		return;
1340	}
1341
1342	if (!skb_queue_empty(&ptr->skb_head))
1343		skb_next = skb_peek(&ptr->skb_head);
1344	else
1345		skb_next = NULL;
1346
1347	tx_info = MWIFIEX_SKB_TXCB(skb);
1348
1349	spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1350
1351	tx_param.next_pkt_len =
1352		((skb_next) ? skb_next->len +
1353		 sizeof(struct txpd) : 0);
1354	if (adapter->iface_type == MWIFIEX_USB) {
1355		ret = adapter->if_ops.host_to_card(adapter, priv->usb_port,
1356						   skb, &tx_param);
1357	} else {
1358		ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
1359						   skb, &tx_param);
1360	}
1361
1362	switch (ret) {
1363	case -EBUSY:
1364		mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
1365		spin_lock_bh(&priv->wmm.ra_list_spinlock);
1366
1367		if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1368			spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1369			mwifiex_write_data_complete(adapter, skb, 0, -1);
1370			return;
1371		}
1372
1373		skb_queue_tail(&ptr->skb_head, skb);
1374
1375		tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1376		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1377		break;
1378	case -1:
1379		mwifiex_dbg(adapter, ERROR, "host_to_card failed: %#x\n", ret);
1380		adapter->dbg.num_tx_host_to_card_failure++;
1381		mwifiex_write_data_complete(adapter, skb, 0, ret);
1382		break;
1383	case -EINPROGRESS:
1384		break;
1385	case 0:
1386		mwifiex_write_data_complete(adapter, skb, 0, ret);
1387		break;
1388	default:
1389		break;
1390	}
1391	if (ret != -EBUSY) {
1392		mwifiex_rotate_priolists(priv, ptr, ptr_index);
1393		atomic_dec(&priv->wmm.tx_pkts_queued);
1394		spin_lock_bh(&priv->wmm.ra_list_spinlock);
1395		ptr->total_pkt_count--;
1396		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1397	}
1398}
1399
1400/*
1401 * This function dequeues a packet from the highest priority list
1402 * and transmits it.
1403 */
1404static int
1405mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
1406{
1407	struct mwifiex_ra_list_tbl *ptr;
1408	struct mwifiex_private *priv = NULL;
1409	int ptr_index = 0;
1410	u8 ra[ETH_ALEN];
1411	int tid_del = 0, tid = 0;
1412
1413	ptr = mwifiex_wmm_get_highest_priolist_ptr(adapter, &priv, &ptr_index);
1414	if (!ptr)
1415		return -1;
1416
1417	tid = mwifiex_get_tid(ptr);
1418
1419	mwifiex_dbg(adapter, DATA, "data: tid=%d\n", tid);
1420
1421	spin_lock_bh(&priv->wmm.ra_list_spinlock);
1422	if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1423		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1424		return -1;
1425	}
1426
1427	if (mwifiex_is_ptr_processed(priv, ptr)) {
1428		mwifiex_send_processed_packet(priv, ptr, ptr_index);
1429		/* ra_list_spinlock has been freed in
1430		   mwifiex_send_processed_packet() */
1431		return 0;
1432	}
1433
1434	if (!ptr->is_11n_enabled ||
1435		ptr->ba_status ||
1436		priv->wps.session_enable) {
1437		if (ptr->is_11n_enabled &&
1438			ptr->ba_status &&
1439			ptr->amsdu_in_ampdu &&
1440			mwifiex_is_amsdu_allowed(priv, tid) &&
1441			mwifiex_is_11n_aggragation_possible(priv, ptr,
1442							adapter->tx_buf_size))
1443			mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index);
1444			/* ra_list_spinlock has been freed in
1445			 * mwifiex_11n_aggregate_pkt()
1446			 */
1447		else
1448			mwifiex_send_single_packet(priv, ptr, ptr_index);
1449			/* ra_list_spinlock has been freed in
1450			 * mwifiex_send_single_packet()
1451			 */
1452	} else {
1453		if (mwifiex_is_ampdu_allowed(priv, ptr, tid) &&
1454		    ptr->ba_pkt_count > ptr->ba_packet_thr) {
1455			if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
1456				mwifiex_create_ba_tbl(priv, ptr->ra, tid,
1457						      BA_SETUP_INPROGRESS);
1458				mwifiex_send_addba(priv, tid, ptr->ra);
1459			} else if (mwifiex_find_stream_to_delete
1460				   (priv, tid, &tid_del, ra)) {
1461				mwifiex_create_ba_tbl(priv, ptr->ra, tid,
1462						      BA_SETUP_INPROGRESS);
1463				mwifiex_send_delba(priv, tid_del, ra, 1);
1464			}
1465		}
1466		if (mwifiex_is_amsdu_allowed(priv, tid) &&
1467		    mwifiex_is_11n_aggragation_possible(priv, ptr,
1468							adapter->tx_buf_size))
1469			mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index);
1470			/* ra_list_spinlock has been freed in
1471			   mwifiex_11n_aggregate_pkt() */
1472		else
1473			mwifiex_send_single_packet(priv, ptr, ptr_index);
1474			/* ra_list_spinlock has been freed in
1475			   mwifiex_send_single_packet() */
1476	}
1477	return 0;
1478}
1479
1480void mwifiex_process_bypass_tx(struct mwifiex_adapter *adapter)
1481{
1482	struct mwifiex_tx_param tx_param;
1483	struct sk_buff *skb;
1484	struct mwifiex_txinfo *tx_info;
1485	struct mwifiex_private *priv;
1486	int i;
1487
1488	if (adapter->data_sent || adapter->tx_lock_flag)
1489		return;
1490
1491	for (i = 0; i < adapter->priv_num; ++i) {
1492		priv = adapter->priv[i];
1493
1494		if (!priv)
1495			continue;
1496
1497		if (adapter->if_ops.is_port_ready &&
1498		    !adapter->if_ops.is_port_ready(priv))
1499			continue;
1500
1501		if (skb_queue_empty(&priv->bypass_txq))
1502			continue;
1503
1504		skb = skb_dequeue(&priv->bypass_txq);
1505		tx_info = MWIFIEX_SKB_TXCB(skb);
1506
1507		/* no aggregation for bypass packets */
1508		tx_param.next_pkt_len = 0;
1509
1510		if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
1511			skb_queue_head(&priv->bypass_txq, skb);
1512			tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1513		} else {
1514			atomic_dec(&adapter->bypass_tx_pending);
1515		}
1516	}
1517}
1518
1519/*
1520 * This function transmits the highest priority packet awaiting in the
1521 * WMM Queues.
1522 */
1523void
1524mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter)
1525{
1526	do {
1527		if (mwifiex_dequeue_tx_packet(adapter))
1528			break;
1529		if (adapter->iface_type != MWIFIEX_SDIO) {
1530			if (adapter->data_sent ||
1531			    adapter->tx_lock_flag)
1532				break;
1533		} else {
1534			if (atomic_read(&adapter->tx_queued) >=
1535			    MWIFIEX_MAX_PKTS_TXQ)
1536				break;
1537		}
1538	} while (!mwifiex_wmm_lists_empty(adapter));
1539}