Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
   1/******************************************************************************
   2 *
   3 * Copyright(c) 2009-2012  Realtek Corporation.
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of version 2 of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program; if not, write to the Free Software Foundation, Inc.,
  16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
  17 *
  18 * The full GNU General Public License is included in this distribution in the
  19 * file called LICENSE.
  20 *
  21 * Contact Information:
  22 * wlanfae <wlanfae@realtek.com>
  23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
  24 * Hsinchu 300, Taiwan.
  25 *
  26 * Larry Finger <Larry.Finger@lwfinger.net>
  27 *
  28 *****************************************************************************/
  29
  30#include "wifi.h"
  31#include "core.h"
  32#include "pci.h"
  33#include "base.h"
  34#include "ps.h"
  35#include "efuse.h"
  36#include <linux/export.h>
  37#include <linux/kmemleak.h>
  38#include <linux/module.h>
  39
  40MODULE_AUTHOR("lizhaoming	<chaoming_li@realsil.com.cn>");
  41MODULE_AUTHOR("Realtek WlanFAE	<wlanfae@realtek.com>");
  42MODULE_AUTHOR("Larry Finger	<Larry.FInger@lwfinger.net>");
  43MODULE_LICENSE("GPL");
  44MODULE_DESCRIPTION("PCI basic driver for rtlwifi");
  45
  46static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = {
  47	PCI_VENDOR_ID_INTEL,
  48	PCI_VENDOR_ID_ATI,
  49	PCI_VENDOR_ID_AMD,
  50	PCI_VENDOR_ID_SI
  51};
  52
  53static const u8 ac_to_hwq[] = {
  54	VO_QUEUE,
  55	VI_QUEUE,
  56	BE_QUEUE,
  57	BK_QUEUE
  58};
  59
  60static u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw,
  61		       struct sk_buff *skb)
  62{
  63	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
  64	__le16 fc = rtl_get_fc(skb);
  65	u8 queue_index = skb_get_queue_mapping(skb);
  66
  67	if (unlikely(ieee80211_is_beacon(fc)))
  68		return BEACON_QUEUE;
  69	if (ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc))
  70		return MGNT_QUEUE;
  71	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
  72		if (ieee80211_is_nullfunc(fc))
  73			return HIGH_QUEUE;
  74
  75	return ac_to_hwq[queue_index];
  76}
  77
  78/* Update PCI dependent default settings*/
  79static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
  80{
  81	struct rtl_priv *rtlpriv = rtl_priv(hw);
  82	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
  83	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
  84	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
  85	u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
  86	u8 init_aspm;
  87
  88	ppsc->reg_rfps_level = 0;
  89	ppsc->support_aspm = false;
  90
  91	/*Update PCI ASPM setting */
  92	ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm;
  93	switch (rtlpci->const_pci_aspm) {
  94	case 0:
  95		/*No ASPM */
  96		break;
  97
  98	case 1:
  99		/*ASPM dynamically enabled/disable. */
 100		ppsc->reg_rfps_level |= RT_RF_LPS_LEVEL_ASPM;
 101		break;
 102
 103	case 2:
 104		/*ASPM with Clock Req dynamically enabled/disable. */
 105		ppsc->reg_rfps_level |= (RT_RF_LPS_LEVEL_ASPM |
 106					 RT_RF_OFF_LEVL_CLK_REQ);
 107		break;
 108
 109	case 3:
 110		/*
 111		 * Always enable ASPM and Clock Req
 112		 * from initialization to halt.
 113		 * */
 114		ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM);
 115		ppsc->reg_rfps_level |= (RT_RF_PS_LEVEL_ALWAYS_ASPM |
 116					 RT_RF_OFF_LEVL_CLK_REQ);
 117		break;
 118
 119	case 4:
 120		/*
 121		 * Always enable ASPM without Clock Req
 122		 * from initialization to halt.
 123		 * */
 124		ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM |
 125					  RT_RF_OFF_LEVL_CLK_REQ);
 126		ppsc->reg_rfps_level |= RT_RF_PS_LEVEL_ALWAYS_ASPM;
 127		break;
 128	}
 129
 130	ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC;
 131
 132	/*Update Radio OFF setting */
 133	switch (rtlpci->const_hwsw_rfoff_d3) {
 134	case 1:
 135		if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM)
 136			ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM;
 137		break;
 138
 139	case 2:
 140		if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM)
 141			ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM;
 142		ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC;
 143		break;
 144
 145	case 3:
 146		ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_PCI_D3;
 147		break;
 148	}
 149
 150	/*Set HW definition to determine if it supports ASPM. */
 151	switch (rtlpci->const_support_pciaspm) {
 152	case 0:{
 153			/*Not support ASPM. */
 154			bool support_aspm = false;
 155			ppsc->support_aspm = support_aspm;
 156			break;
 157		}
 158	case 1:{
 159			/*Support ASPM. */
 160			bool support_aspm = true;
 161			bool support_backdoor = true;
 162			ppsc->support_aspm = support_aspm;
 163
 164			/*if (priv->oem_id == RT_CID_TOSHIBA &&
 165			   !priv->ndis_adapter.amd_l1_patch)
 166			   support_backdoor = false; */
 167
 168			ppsc->support_backdoor = support_backdoor;
 169
 170			break;
 171		}
 172	case 2:
 173		/*ASPM value set by chipset. */
 174		if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL) {
 175			bool support_aspm = true;
 176			ppsc->support_aspm = support_aspm;
 177		}
 178		break;
 179	default:
 180		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
 181			 "switch case not processed\n");
 182		break;
 183	}
 184
 185	/* toshiba aspm issue, toshiba will set aspm selfly
 186	 * so we should not set aspm in driver */
 187	pci_read_config_byte(rtlpci->pdev, 0x80, &init_aspm);
 188	if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8192SE &&
 189		init_aspm == 0x43)
 190		ppsc->support_aspm = false;
 191}
 192
 193static bool _rtl_pci_platform_switch_device_pci_aspm(
 194			struct ieee80211_hw *hw,
 195			u8 value)
 196{
 197	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 198	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 199
 200	if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE)
 201		value |= 0x40;
 202
 203	pci_write_config_byte(rtlpci->pdev, 0x80, value);
 204
 205	return false;
 206}
 207
 208/*When we set 0x01 to enable clk request. Set 0x0 to disable clk req.*/
 209static void _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value)
 210{
 211	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 212	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 213
 214	pci_write_config_byte(rtlpci->pdev, 0x81, value);
 215
 216	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
 217		udelay(100);
 218}
 219
 220/*Disable RTL8192SE ASPM & Disable Pci Bridge ASPM*/
 221static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
 222{
 223	struct rtl_priv *rtlpriv = rtl_priv(hw);
 224	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
 225	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
 226	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 227	u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
 228	u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
 229	/*Retrieve original configuration settings. */
 230	u8 linkctrl_reg = pcipriv->ndis_adapter.linkctrl_reg;
 231	u16 pcibridge_linkctrlreg = pcipriv->ndis_adapter.
 232				pcibridge_linkctrlreg;
 233	u16 aspmlevel = 0;
 234	u8 tmp_u1b = 0;
 235
 236	if (!ppsc->support_aspm)
 237		return;
 238
 239	if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
 240		RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
 241			 "PCI(Bridge) UNKNOWN\n");
 242
 243		return;
 244	}
 245
 246	if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
 247		RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
 248		_rtl_pci_switch_clk_req(hw, 0x0);
 249	}
 250
 251	/*for promising device will in L0 state after an I/O. */
 252	pci_read_config_byte(rtlpci->pdev, 0x80, &tmp_u1b);
 253
 254	/*Set corresponding value. */
 255	aspmlevel |= BIT(0) | BIT(1);
 256	linkctrl_reg &= ~aspmlevel;
 257	pcibridge_linkctrlreg &= ~(BIT(0) | BIT(1));
 258
 259	_rtl_pci_platform_switch_device_pci_aspm(hw, linkctrl_reg);
 260	udelay(50);
 261
 262	/*4 Disable Pci Bridge ASPM */
 263	pci_write_config_byte(rtlpci->pdev, (num4bytes << 2),
 264			      pcibridge_linkctrlreg);
 265
 266	udelay(50);
 267}
 268
 269/*
 270 *Enable RTL8192SE ASPM & Enable Pci Bridge ASPM for
 271 *power saving We should follow the sequence to enable
 272 *RTL8192SE first then enable Pci Bridge ASPM
 273 *or the system will show bluescreen.
 274 */
 275static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
 276{
 277	struct rtl_priv *rtlpriv = rtl_priv(hw);
 278	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
 279	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
 280	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 281	u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
 282	u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
 283	u16 aspmlevel;
 284	u8 u_pcibridge_aspmsetting;
 285	u8 u_device_aspmsetting;
 286
 287	if (!ppsc->support_aspm)
 288		return;
 289
 290	if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
 291		RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
 292			 "PCI(Bridge) UNKNOWN\n");
 293		return;
 294	}
 295
 296	/*4 Enable Pci Bridge ASPM */
 297
 298	u_pcibridge_aspmsetting =
 299	    pcipriv->ndis_adapter.pcibridge_linkctrlreg |
 300	    rtlpci->const_hostpci_aspm_setting;
 301
 302	if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL)
 303		u_pcibridge_aspmsetting &= ~BIT(0);
 304
 305	pci_write_config_byte(rtlpci->pdev, (num4bytes << 2),
 306			      u_pcibridge_aspmsetting);
 307
 308	RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
 309		 "PlatformEnableASPM(): Write reg[%x] = %x\n",
 310		 (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10),
 311		 u_pcibridge_aspmsetting);
 312
 313	udelay(50);
 314
 315	/*Get ASPM level (with/without Clock Req) */
 316	aspmlevel = rtlpci->const_devicepci_aspm_setting;
 317	u_device_aspmsetting = pcipriv->ndis_adapter.linkctrl_reg;
 318
 319	/*_rtl_pci_platform_switch_device_pci_aspm(dev,*/
 320	/*(priv->ndis_adapter.linkctrl_reg | ASPMLevel)); */
 321
 322	u_device_aspmsetting |= aspmlevel;
 323
 324	_rtl_pci_platform_switch_device_pci_aspm(hw, u_device_aspmsetting);
 325
 326	if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
 327		_rtl_pci_switch_clk_req(hw, (ppsc->reg_rfps_level &
 328					     RT_RF_OFF_LEVL_CLK_REQ) ? 1 : 0);
 329		RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
 330	}
 331	udelay(100);
 332}
 333
 334static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw *hw)
 335{
 336	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 337
 338	bool status = false;
 339	u8 offset_e0;
 340	unsigned offset_e4;
 341
 342	pci_write_config_byte(rtlpci->pdev, 0xe0, 0xa0);
 343
 344	pci_read_config_byte(rtlpci->pdev, 0xe0, &offset_e0);
 345
 346	if (offset_e0 == 0xA0) {
 347		pci_read_config_dword(rtlpci->pdev, 0xe4, &offset_e4);
 348		if (offset_e4 & BIT(23))
 349			status = true;
 350	}
 351
 352	return status;
 353}
 354
 355static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
 356				     struct rtl_priv **buddy_priv)
 357{
 358	struct rtl_priv *rtlpriv = rtl_priv(hw);
 359	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
 360	bool find_buddy_priv = false;
 361	struct rtl_priv *tpriv = NULL;
 362	struct rtl_pci_priv *tpcipriv = NULL;
 363
 364	if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) {
 365		list_for_each_entry(tpriv, &rtlpriv->glb_var->glb_priv_list,
 366				    list) {
 367			if (tpriv) {
 368				tpcipriv = (struct rtl_pci_priv *)tpriv->priv;
 369				RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
 370					 "pcipriv->ndis_adapter.funcnumber %x\n",
 371					pcipriv->ndis_adapter.funcnumber);
 372				RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
 373					 "tpcipriv->ndis_adapter.funcnumber %x\n",
 374					tpcipriv->ndis_adapter.funcnumber);
 375
 376				if ((pcipriv->ndis_adapter.busnumber ==
 377				     tpcipriv->ndis_adapter.busnumber) &&
 378				    (pcipriv->ndis_adapter.devnumber ==
 379				    tpcipriv->ndis_adapter.devnumber) &&
 380				    (pcipriv->ndis_adapter.funcnumber !=
 381				    tpcipriv->ndis_adapter.funcnumber)) {
 382					find_buddy_priv = true;
 383					break;
 384				}
 385			}
 386		}
 387	}
 388
 389	RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
 390		 "find_buddy_priv %d\n", find_buddy_priv);
 391
 392	if (find_buddy_priv)
 393		*buddy_priv = tpriv;
 394
 395	return find_buddy_priv;
 396}
 397
 398static void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw)
 399{
 400	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
 401	struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
 402	u8 capabilityoffset = pcipriv->ndis_adapter.pcibridge_pciehdr_offset;
 403	u8 linkctrl_reg;
 404	u8 num4bbytes;
 405
 406	num4bbytes = (capabilityoffset + 0x10) / 4;
 407
 408	/*Read  Link Control Register */
 409	pci_read_config_byte(rtlpci->pdev, (num4bbytes << 2), &linkctrl_reg);
 410
 411	pcipriv->ndis_adapter.pcibridge_linkctrlreg = linkctrl_reg;
 412}
 413
 414static void rtl_pci_parse_configuration(struct pci_dev *pdev,
 415		struct ieee80211_hw *hw)
 416{
 417	struct rtl_priv *rtlpriv = rtl_priv(hw);
 418	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
 419
 420	u8 tmp;
 421	u16 linkctrl_reg;
 422
 423	/*Link Control Register */
 424	pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &linkctrl_reg);
 425	pcipriv->ndis_adapter.linkctrl_reg = (u8)linkctrl_reg;
 426
 427	RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Link Control Register =%x\n",
 428		 pcipriv->ndis_adapter.linkctrl_reg);
 429
 430	pci_read_config_byte(pdev, 0x98, &tmp);
 431	tmp |= BIT(4);
 432	pci_write_config_byte(pdev, 0x98, tmp);
 433
 434	tmp = 0x17;
 435	pci_write_config_byte(pdev, 0x70f, tmp);
 436}
 437
 438static void rtl_pci_init_aspm(struct ieee80211_hw *hw)
 439{
 440	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
 441
 442	_rtl_pci_update_default_setting(hw);
 443
 444	if (ppsc->reg_rfps_level & RT_RF_PS_LEVEL_ALWAYS_ASPM) {
 445		/*Always enable ASPM & Clock Req. */
 446		rtl_pci_enable_aspm(hw);
 447		RT_SET_PS_LEVEL(ppsc, RT_RF_PS_LEVEL_ALWAYS_ASPM);
 448	}
 449
 450}
 451
 452static void _rtl_pci_io_handler_init(struct device *dev,
 453				     struct ieee80211_hw *hw)
 454{
 455	struct rtl_priv *rtlpriv = rtl_priv(hw);
 456
 457	rtlpriv->io.dev = dev;
 458
 459	rtlpriv->io.write8_async = pci_write8_async;
 460	rtlpriv->io.write16_async = pci_write16_async;
 461	rtlpriv->io.write32_async = pci_write32_async;
 462
 463	rtlpriv->io.read8_sync = pci_read8_sync;
 464	rtlpriv->io.read16_sync = pci_read16_sync;
 465	rtlpriv->io.read32_sync = pci_read32_sync;
 466
 467}
 468
 469static bool _rtl_update_earlymode_info(struct ieee80211_hw *hw,
 470		struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc, u8 tid)
 471{
 472	struct rtl_priv *rtlpriv = rtl_priv(hw);
 473	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 474	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 475	struct sk_buff *next_skb;
 476	u8 additionlen = FCS_LEN;
 477
 478	/* here open is 4, wep/tkip is 8, aes is 12*/
 479	if (info->control.hw_key)
 480		additionlen += info->control.hw_key->icv_len;
 481
 482	/* The most skb num is 6 */
 483	tcb_desc->empkt_num = 0;
 484	spin_lock_bh(&rtlpriv->locks.waitq_lock);
 485	skb_queue_walk(&rtlpriv->mac80211.skb_waitq[tid], next_skb) {
 486		struct ieee80211_tx_info *next_info;
 487
 488		next_info = IEEE80211_SKB_CB(next_skb);
 489		if (next_info->flags & IEEE80211_TX_CTL_AMPDU) {
 490			tcb_desc->empkt_len[tcb_desc->empkt_num] =
 491				next_skb->len + additionlen;
 492			tcb_desc->empkt_num++;
 493		} else {
 494			break;
 495		}
 496
 497		if (skb_queue_is_last(&rtlpriv->mac80211.skb_waitq[tid],
 498				      next_skb))
 499			break;
 500
 501		if (tcb_desc->empkt_num >= rtlhal->max_earlymode_num)
 502			break;
 503	}
 504	spin_unlock_bh(&rtlpriv->locks.waitq_lock);
 505
 506	return true;
 507}
 508
 509/* just for early mode now */
 510static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
 511{
 512	struct rtl_priv *rtlpriv = rtl_priv(hw);
 513	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 514	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 515	struct sk_buff *skb = NULL;
 516	struct ieee80211_tx_info *info = NULL;
 517	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 518	int tid;
 519
 520	if (!rtlpriv->rtlhal.earlymode_enable)
 521		return;
 522
 523	if (rtlpriv->dm.supp_phymode_switch &&
 524	    (rtlpriv->easy_concurrent_ctl.switch_in_process ||
 525	    (rtlpriv->buddy_priv &&
 526	    rtlpriv->buddy_priv->easy_concurrent_ctl.switch_in_process)))
 527		return;
 528	/* we juse use em for BE/BK/VI/VO */
 529	for (tid = 7; tid >= 0; tid--) {
 530		u8 hw_queue = ac_to_hwq[rtl_tid_to_ac(tid)];
 531		struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
 532		while (!mac->act_scanning &&
 533		       rtlpriv->psc.rfpwr_state == ERFON) {
 534			struct rtl_tcb_desc tcb_desc;
 535			memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
 536
 537			spin_lock_bh(&rtlpriv->locks.waitq_lock);
 538			if (!skb_queue_empty(&mac->skb_waitq[tid]) &&
 539			    (ring->entries - skb_queue_len(&ring->queue) >
 540			     rtlhal->max_earlymode_num)) {
 541				skb = skb_dequeue(&mac->skb_waitq[tid]);
 542			} else {
 543				spin_unlock_bh(&rtlpriv->locks.waitq_lock);
 544				break;
 545			}
 546			spin_unlock_bh(&rtlpriv->locks.waitq_lock);
 547
 548			/* Some macaddr can't do early mode. like
 549			 * multicast/broadcast/no_qos data */
 550			info = IEEE80211_SKB_CB(skb);
 551			if (info->flags & IEEE80211_TX_CTL_AMPDU)
 552				_rtl_update_earlymode_info(hw, skb,
 553							   &tcb_desc, tid);
 554
 555			rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
 556		}
 557	}
 558}
 559
 560
 561static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
 562{
 563	struct rtl_priv *rtlpriv = rtl_priv(hw);
 564	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 565
 566	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
 567
 568	while (skb_queue_len(&ring->queue)) {
 569		struct rtl_tx_desc *entry = &ring->desc[ring->idx];
 570		struct sk_buff *skb;
 571		struct ieee80211_tx_info *info;
 572		__le16 fc;
 573		u8 tid;
 574
 575		u8 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) entry, true,
 576							  HW_DESC_OWN);
 577
 578		/*beacon packet will only use the first
 579		 *descriptor by defaut, and the own may not
 580		 *be cleared by the hardware
 581		 */
 582		if (own)
 583			return;
 584		ring->idx = (ring->idx + 1) % ring->entries;
 585
 586		skb = __skb_dequeue(&ring->queue);
 587		pci_unmap_single(rtlpci->pdev,
 588				 rtlpriv->cfg->ops->
 589					     get_desc((u8 *) entry, true,
 590						      HW_DESC_TXBUFF_ADDR),
 591				 skb->len, PCI_DMA_TODEVICE);
 592
 593		/* remove early mode header */
 594		if (rtlpriv->rtlhal.earlymode_enable)
 595			skb_pull(skb, EM_HDR_LEN);
 596
 597		RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE,
 598			 "new ring->idx:%d, free: skb_queue_len:%d, free: seq:%x\n",
 599			 ring->idx,
 600			 skb_queue_len(&ring->queue),
 601			 *(u16 *) (skb->data + 22));
 602
 603		if (prio == TXCMD_QUEUE) {
 604			dev_kfree_skb(skb);
 605			goto tx_status_ok;
 606
 607		}
 608
 609		/* for sw LPS, just after NULL skb send out, we can
 610		 * sure AP knows we are sleeping, we should not let
 611		 * rf sleep
 612		 */
 613		fc = rtl_get_fc(skb);
 614		if (ieee80211_is_nullfunc(fc)) {
 615			if (ieee80211_has_pm(fc)) {
 616				rtlpriv->mac80211.offchan_delay = true;
 617				rtlpriv->psc.state_inap = true;
 618			} else {
 619				rtlpriv->psc.state_inap = false;
 620			}
 621		}
 622		if (ieee80211_is_action(fc)) {
 623			struct ieee80211_mgmt *action_frame =
 624				(struct ieee80211_mgmt *)skb->data;
 625			if (action_frame->u.action.u.ht_smps.action ==
 626			    WLAN_HT_ACTION_SMPS) {
 627				dev_kfree_skb(skb);
 628				goto tx_status_ok;
 629			}
 630		}
 631
 632		/* update tid tx pkt num */
 633		tid = rtl_get_tid(skb);
 634		if (tid <= 7)
 635			rtlpriv->link_info.tidtx_inperiod[tid]++;
 636
 637		info = IEEE80211_SKB_CB(skb);
 638		ieee80211_tx_info_clear_status(info);
 639
 640		info->flags |= IEEE80211_TX_STAT_ACK;
 641		/*info->status.rates[0].count = 1; */
 642
 643		ieee80211_tx_status_irqsafe(hw, skb);
 644
 645		if ((ring->entries - skb_queue_len(&ring->queue))
 646				== 2) {
 647
 648			RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
 649				 "more desc left, wake skb_queue@%d, ring->idx = %d, skb_queue_len = 0x%d\n",
 650				 prio, ring->idx,
 651				 skb_queue_len(&ring->queue));
 652
 653			ieee80211_wake_queue(hw,
 654					skb_get_queue_mapping
 655					(skb));
 656		}
 657tx_status_ok:
 658		skb = NULL;
 659	}
 660
 661	if (((rtlpriv->link_info.num_rx_inperiod +
 662		rtlpriv->link_info.num_tx_inperiod) > 8) ||
 663		(rtlpriv->link_info.num_rx_inperiod > 2)) {
 664		rtlpriv->enter_ps = false;
 665		schedule_work(&rtlpriv->works.lps_change_work);
 666	}
 667}
 668
 669static void _rtl_receive_one(struct ieee80211_hw *hw, struct sk_buff *skb,
 670			     struct ieee80211_rx_status rx_status)
 671{
 672	struct rtl_priv *rtlpriv = rtl_priv(hw);
 673	struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
 674	__le16 fc = rtl_get_fc(skb);
 675	bool unicast = false;
 676	struct sk_buff *uskb = NULL;
 677	u8 *pdata;
 678
 679
 680	memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
 681
 682	if (is_broadcast_ether_addr(hdr->addr1)) {
 683		;/*TODO*/
 684	} else if (is_multicast_ether_addr(hdr->addr1)) {
 685		;/*TODO*/
 686	} else {
 687		unicast = true;
 688		rtlpriv->stats.rxbytesunicast += skb->len;
 689	}
 690
 691	if (ieee80211_is_data(fc)) {
 692		rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
 693
 694		if (unicast)
 695			rtlpriv->link_info.num_rx_inperiod++;
 696	}
 697
 698	/* static bcn for roaming */
 699	rtl_beacon_statistic(hw, skb);
 700	rtl_p2p_info(hw, (void *)skb->data, skb->len);
 701
 702	/* for sw lps */
 703	rtl_swlps_beacon(hw, (void *)skb->data, skb->len);
 704	rtl_recognize_peer(hw, (void *)skb->data, skb->len);
 705	if ((rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP) &&
 706	    (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G) &&
 707	     (ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc)))
 708		return;
 709
 710	if (unlikely(!rtl_action_proc(hw, skb, false)))
 711		return;
 712
 713	uskb = dev_alloc_skb(skb->len + 128);
 714	if (!uskb)
 715		return;		/* exit if allocation failed */
 716	memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status, sizeof(rx_status));
 717	pdata = (u8 *)skb_put(uskb, skb->len);
 718	memcpy(pdata, skb->data, skb->len);
 719
 720	ieee80211_rx_irqsafe(hw, uskb);
 721}
 722
 723static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
 724{
 725	struct rtl_priv *rtlpriv = rtl_priv(hw);
 726	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 727	int rx_queue_idx = RTL_PCI_RX_MPDU_QUEUE;
 728
 729	struct ieee80211_rx_status rx_status = { 0 };
 730	unsigned int count = rtlpci->rxringcount;
 731	u8 own;
 732	u8 tmp_one;
 733	u32 bufferaddress;
 734
 735	struct rtl_stats stats = {
 736		.signal = 0,
 737		.rate = 0,
 738	};
 739	int index = rtlpci->rx_ring[rx_queue_idx].idx;
 740
 741	if (rtlpci->driver_is_goingto_unload)
 742		return;
 743	/*RX NORMAL PKT */
 744	while (count--) {
 745		/*rx descriptor */
 746		struct rtl_rx_desc *pdesc = &rtlpci->rx_ring[rx_queue_idx].desc[
 747				index];
 748		/*rx pkt */
 749		struct sk_buff *skb = rtlpci->rx_ring[rx_queue_idx].rx_buf[
 750				index];
 751		struct sk_buff *new_skb = NULL;
 752
 753		own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
 754						       false, HW_DESC_OWN);
 755
 756		/*wait data to be filled by hardware */
 757		if (own)
 758			break;
 759
 760		rtlpriv->cfg->ops->query_rx_desc(hw, &stats,
 761						 &rx_status,
 762						 (u8 *) pdesc, skb);
 763
 764		if (stats.crc || stats.hwerror)
 765			goto done;
 766
 767		new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
 768		if (unlikely(!new_skb)) {
 769			RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV), DBG_DMESG,
 770				 "can't alloc skb for rx\n");
 771			goto done;
 772		}
 773		kmemleak_not_leak(new_skb);
 774
 775		pci_unmap_single(rtlpci->pdev,
 776				 *((dma_addr_t *) skb->cb),
 777				 rtlpci->rxbuffersize,
 778				 PCI_DMA_FROMDEVICE);
 779
 780		skb_put(skb, rtlpriv->cfg->ops->get_desc((u8 *) pdesc, false,
 781			HW_DESC_RXPKT_LEN));
 782		skb_reserve(skb, stats.rx_drvinfo_size + stats.rx_bufshift);
 783
 784		/*
 785		 * NOTICE This can not be use for mac80211,
 786		 * this is done in mac80211 code,
 787		 * if you done here sec DHCP will fail
 788		 * skb_trim(skb, skb->len - 4);
 789		 */
 790
 791		_rtl_receive_one(hw, skb, rx_status);
 792
 793		if (((rtlpriv->link_info.num_rx_inperiod +
 794		      rtlpriv->link_info.num_tx_inperiod) > 8) ||
 795		      (rtlpriv->link_info.num_rx_inperiod > 2)) {
 796			rtlpriv->enter_ps = false;
 797			schedule_work(&rtlpriv->works.lps_change_work);
 798		}
 799
 800		dev_kfree_skb_any(skb);
 801		skb = new_skb;
 802
 803		rtlpci->rx_ring[rx_queue_idx].rx_buf[index] = skb;
 804		*((dma_addr_t *) skb->cb) =
 805			    pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
 806					   rtlpci->rxbuffersize,
 807					   PCI_DMA_FROMDEVICE);
 808
 809done:
 810		bufferaddress = (*((dma_addr_t *)skb->cb));
 811		if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress))
 812			return;
 813		tmp_one = 1;
 814		rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, false,
 815					    HW_DESC_RXBUFF_ADDR,
 816					    (u8 *)&bufferaddress);
 817		rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, false,
 818					    HW_DESC_RXPKT_LEN,
 819					    (u8 *)&rtlpci->rxbuffersize);
 820
 821		if (index == rtlpci->rxringcount - 1)
 822			rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, false,
 823						    HW_DESC_RXERO,
 824						    &tmp_one);
 825
 826		rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, false, HW_DESC_RXOWN,
 827					    &tmp_one);
 828
 829		index = (index + 1) % rtlpci->rxringcount;
 830	}
 831
 832	rtlpci->rx_ring[rx_queue_idx].idx = index;
 833}
 834
 835static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
 836{
 837	struct ieee80211_hw *hw = dev_id;
 838	struct rtl_priv *rtlpriv = rtl_priv(hw);
 839	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
 840	unsigned long flags;
 841	u32 inta = 0;
 842	u32 intb = 0;
 843	irqreturn_t ret = IRQ_HANDLED;
 844
 845	spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
 846
 847	/*read ISR: 4/8bytes */
 848	rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb);
 849
 850	/*Shared IRQ or HW disappared */
 851	if (!inta || inta == 0xffff) {
 852		ret = IRQ_NONE;
 853		goto done;
 854	}
 855
 856	/*<1> beacon related */
 857	if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) {
 858		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
 859			 "beacon ok interrupt!\n");
 860	}
 861
 862	if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TBDER])) {
 863		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
 864			 "beacon err interrupt!\n");
 865	}
 866
 867	if (inta & rtlpriv->cfg->maps[RTL_IMR_BDOK]) {
 868		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "beacon interrupt!\n");
 869	}
 870
 871	if (inta & rtlpriv->cfg->maps[RTL_IMR_BCNINT]) {
 872		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
 873			 "prepare beacon for interrupt!\n");
 874		tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet);
 875	}
 876
 877	/*<3> Tx related */
 878	if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
 879		RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "IMR_TXFOVW!\n");
 880
 881	if (inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
 882		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
 883			 "Manage ok interrupt!\n");
 884		_rtl_pci_tx_isr(hw, MGNT_QUEUE);
 885	}
 886
 887	if (inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
 888		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
 889			 "HIGH_QUEUE ok interrupt!\n");
 890		_rtl_pci_tx_isr(hw, HIGH_QUEUE);
 891	}
 892
 893	if (inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
 894		rtlpriv->link_info.num_tx_inperiod++;
 895
 896		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
 897			 "BK Tx OK interrupt!\n");
 898		_rtl_pci_tx_isr(hw, BK_QUEUE);
 899	}
 900
 901	if (inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
 902		rtlpriv->link_info.num_tx_inperiod++;
 903
 904		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
 905			 "BE TX OK interrupt!\n");
 906		_rtl_pci_tx_isr(hw, BE_QUEUE);
 907	}
 908
 909	if (inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
 910		rtlpriv->link_info.num_tx_inperiod++;
 911
 912		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
 913			 "VI TX OK interrupt!\n");
 914		_rtl_pci_tx_isr(hw, VI_QUEUE);
 915	}
 916
 917	if (inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
 918		rtlpriv->link_info.num_tx_inperiod++;
 919
 920		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
 921			 "Vo TX OK interrupt!\n");
 922		_rtl_pci_tx_isr(hw, VO_QUEUE);
 923	}
 924
 925	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
 926		if (inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) {
 927			rtlpriv->link_info.num_tx_inperiod++;
 928
 929			RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
 930				 "CMD TX OK interrupt!\n");
 931			_rtl_pci_tx_isr(hw, TXCMD_QUEUE);
 932		}
 933	}
 934
 935	/*<2> Rx related */
 936	if (inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
 937		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "Rx ok interrupt!\n");
 938		_rtl_pci_rx_interrupt(hw);
 939	}
 940
 941	if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
 942		RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
 943			 "rx descriptor unavailable!\n");
 944		_rtl_pci_rx_interrupt(hw);
 945	}
 946
 947	if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
 948		RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "rx overflow !\n");
 949		_rtl_pci_rx_interrupt(hw);
 950	}
 951
 952	/*fw related*/
 953	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723AE) {
 954		if (inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) {
 955			RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
 956				 "firmware interrupt!\n");
 957			queue_delayed_work(rtlpriv->works.rtl_wq,
 958					   &rtlpriv->works.fwevt_wq, 0);
 959		}
 960	}
 961
 962	if (rtlpriv->rtlhal.earlymode_enable)
 963		tasklet_schedule(&rtlpriv->works.irq_tasklet);
 964
 965done:
 966	spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
 967	return ret;
 968}
 969
 970static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
 971{
 972	_rtl_pci_tx_chk_waitq(hw);
 973}
 974
 975static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
 976{
 977	struct rtl_priv *rtlpriv = rtl_priv(hw);
 978	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
 979	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
 980	struct rtl8192_tx_ring *ring = NULL;
 981	struct ieee80211_hdr *hdr = NULL;
 982	struct ieee80211_tx_info *info = NULL;
 983	struct sk_buff *pskb = NULL;
 984	struct rtl_tx_desc *pdesc = NULL;
 985	struct rtl_tcb_desc tcb_desc;
 986	/*This is for new trx flow*/
 987	struct rtl_tx_buffer_desc *pbuffer_desc = NULL;
 988	u8 temp_one = 1;
 989
 990	memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
 991	ring = &rtlpci->tx_ring[BEACON_QUEUE];
 992	pskb = __skb_dequeue(&ring->queue);
 993	if (pskb) {
 994		struct rtl_tx_desc *entry = &ring->desc[ring->idx];
 995		pci_unmap_single(rtlpci->pdev, rtlpriv->cfg->ops->get_desc(
 996				 (u8 *) entry, true, HW_DESC_TXBUFF_ADDR),
 997				 pskb->len, PCI_DMA_TODEVICE);
 998		kfree_skb(pskb);
 999	}
1000
1001	/*NB: the beacon data buffer must be 32-bit aligned. */
1002	pskb = ieee80211_beacon_get(hw, mac->vif);
1003	if (pskb == NULL)
1004		return;
1005	hdr = rtl_get_hdr(pskb);
1006	info = IEEE80211_SKB_CB(pskb);
1007	pdesc = &ring->desc[0];
1008	rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
1009					(u8 *)pbuffer_desc, info, NULL, pskb,
1010					BEACON_QUEUE, &tcb_desc);
1011
1012	__skb_queue_tail(&ring->queue, pskb);
1013
1014	rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true, HW_DESC_OWN,
1015				    &temp_one);
1016
1017	return;
1018}
1019
1020static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
1021{
1022	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1023	u8 i;
1024
1025	for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
1026		rtlpci->txringcount[i] = RT_TXDESC_NUM;
1027
1028	/*
1029	 *we just alloc 2 desc for beacon queue,
1030	 *because we just need first desc in hw beacon.
1031	 */
1032	rtlpci->txringcount[BEACON_QUEUE] = 2;
1033
1034	/*
1035	 *BE queue need more descriptor for performance
1036	 *consideration or, No more tx desc will happen,
1037	 *and may cause mac80211 mem leakage.
1038	 */
1039	rtlpci->txringcount[BE_QUEUE] = RT_TXDESC_NUM_BE_QUEUE;
1040
1041	rtlpci->rxbuffersize = 9100;	/*2048/1024; */
1042	rtlpci->rxringcount = RTL_PCI_MAX_RX_COUNT;	/*64; */
1043}
1044
1045static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
1046		struct pci_dev *pdev)
1047{
1048	struct rtl_priv *rtlpriv = rtl_priv(hw);
1049	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1050	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1051	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1052
1053	rtlpci->up_first_time = true;
1054	rtlpci->being_init_adapter = false;
1055
1056	rtlhal->hw = hw;
1057	rtlpci->pdev = pdev;
1058
1059	/*Tx/Rx related var */
1060	_rtl_pci_init_trx_var(hw);
1061
1062	/*IBSS*/ mac->beacon_interval = 100;
1063
1064	/*AMPDU*/
1065	mac->min_space_cfg = 0;
1066	mac->max_mss_density = 0;
1067	/*set sane AMPDU defaults */
1068	mac->current_ampdu_density = 7;
1069	mac->current_ampdu_factor = 3;
1070
1071	/*QOS*/
1072	rtlpci->acm_method = EACMWAY2_SW;
1073
1074	/*task */
1075	tasklet_init(&rtlpriv->works.irq_tasklet,
1076		     (void (*)(unsigned long))_rtl_pci_irq_tasklet,
1077		     (unsigned long)hw);
1078	tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
1079		     (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
1080		     (unsigned long)hw);
1081	INIT_WORK(&rtlpriv->works.lps_change_work,
1082		  rtl_lps_change_work_callback);
1083}
1084
1085static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
1086				 unsigned int prio, unsigned int entries)
1087{
1088	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1089	struct rtl_priv *rtlpriv = rtl_priv(hw);
1090	struct rtl_tx_desc *ring;
1091	dma_addr_t dma;
1092	u32 nextdescaddress;
1093	int i;
1094
1095	ring = pci_alloc_consistent(rtlpci->pdev,
1096				    sizeof(*ring) * entries, &dma);
1097
1098	if (!ring || (unsigned long)ring & 0xFF) {
1099		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1100			 "Cannot allocate TX ring (prio = %d)\n", prio);
1101		return -ENOMEM;
1102	}
1103
1104	memset(ring, 0, sizeof(*ring) * entries);
1105	rtlpci->tx_ring[prio].desc = ring;
1106	rtlpci->tx_ring[prio].dma = dma;
1107	rtlpci->tx_ring[prio].idx = 0;
1108	rtlpci->tx_ring[prio].entries = entries;
1109	skb_queue_head_init(&rtlpci->tx_ring[prio].queue);
1110
1111	RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "queue:%d, ring_addr:%p\n",
1112		 prio, ring);
1113
1114	for (i = 0; i < entries; i++) {
1115		nextdescaddress = (u32) dma +
1116					      ((i + 1) % entries) *
1117					      sizeof(*ring);
1118
1119		rtlpriv->cfg->ops->set_desc(hw, (u8 *)&(ring[i]),
1120					    true, HW_DESC_TX_NEXTDESC_ADDR,
1121					    (u8 *)&nextdescaddress);
1122	}
1123
1124	return 0;
1125}
1126
1127static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
1128{
1129	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1130	struct rtl_priv *rtlpriv = rtl_priv(hw);
1131	struct rtl_rx_desc *entry = NULL;
1132	int i, rx_queue_idx;
1133	u8 tmp_one = 1;
1134
1135	/*
1136	 *rx_queue_idx 0:RX_MPDU_QUEUE
1137	 *rx_queue_idx 1:RX_CMD_QUEUE
1138	 */
1139	for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE;
1140	     rx_queue_idx++) {
1141		rtlpci->rx_ring[rx_queue_idx].desc =
1142		    pci_alloc_consistent(rtlpci->pdev,
1143					 sizeof(*rtlpci->rx_ring[rx_queue_idx].
1144						desc) * rtlpci->rxringcount,
1145					 &rtlpci->rx_ring[rx_queue_idx].dma);
1146
1147		if (!rtlpci->rx_ring[rx_queue_idx].desc ||
1148		    (unsigned long)rtlpci->rx_ring[rx_queue_idx].desc & 0xFF) {
1149			RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1150				 "Cannot allocate RX ring\n");
1151			return -ENOMEM;
1152		}
1153
1154		memset(rtlpci->rx_ring[rx_queue_idx].desc, 0,
1155		       sizeof(*rtlpci->rx_ring[rx_queue_idx].desc) *
1156		       rtlpci->rxringcount);
1157
1158		rtlpci->rx_ring[rx_queue_idx].idx = 0;
1159
1160		/* If amsdu_8k is disabled, set buffersize to 4096. This
1161		 * change will reduce memory fragmentation.
1162		 */
1163		if (rtlpci->rxbuffersize > 4096 &&
1164		    rtlpriv->rtlhal.disable_amsdu_8k)
1165			rtlpci->rxbuffersize = 4096;
1166
1167		for (i = 0; i < rtlpci->rxringcount; i++) {
1168			struct sk_buff *skb =
1169			    dev_alloc_skb(rtlpci->rxbuffersize);
1170			u32 bufferaddress;
1171			if (!skb)
1172				return 0;
1173			kmemleak_not_leak(skb);
1174			entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
1175
1176			/*skb->dev = dev; */
1177
1178			rtlpci->rx_ring[rx_queue_idx].rx_buf[i] = skb;
1179
1180			/*
1181			 *just set skb->cb to mapping addr
1182			 *for pci_unmap_single use
1183			 */
1184			*((dma_addr_t *) skb->cb) =
1185			    pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
1186					   rtlpci->rxbuffersize,
1187					   PCI_DMA_FROMDEVICE);
1188
1189			bufferaddress = (*((dma_addr_t *)skb->cb));
1190			if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress)) {
1191				dev_kfree_skb_any(skb);
1192				return 1;
1193			}
1194			rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
1195						    HW_DESC_RXBUFF_ADDR,
1196						    (u8 *)&bufferaddress);
1197			rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
1198						    HW_DESC_RXPKT_LEN,
1199						    (u8 *)&rtlpci->
1200						    rxbuffersize);
1201			rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
1202						    HW_DESC_RXOWN,
1203						    &tmp_one);
1204		}
1205
1206		rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
1207					    HW_DESC_RXERO, &tmp_one);
1208	}
1209	return 0;
1210}
1211
1212static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
1213		unsigned int prio)
1214{
1215	struct rtl_priv *rtlpriv = rtl_priv(hw);
1216	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1217	struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
1218
1219	while (skb_queue_len(&ring->queue)) {
1220		struct rtl_tx_desc *entry = &ring->desc[ring->idx];
1221		struct sk_buff *skb = __skb_dequeue(&ring->queue);
1222
1223		pci_unmap_single(rtlpci->pdev,
1224				 rtlpriv->cfg->
1225					     ops->get_desc((u8 *) entry, true,
1226						   HW_DESC_TXBUFF_ADDR),
1227				 skb->len, PCI_DMA_TODEVICE);
1228		kfree_skb(skb);
1229		ring->idx = (ring->idx + 1) % ring->entries;
1230	}
1231
1232	if (ring->desc) {
1233		pci_free_consistent(rtlpci->pdev,
1234				    sizeof(*ring->desc) * ring->entries,
1235				    ring->desc, ring->dma);
1236		ring->desc = NULL;
1237	}
1238}
1239
1240static void _rtl_pci_free_rx_ring(struct rtl_pci *rtlpci)
1241{
1242	int i, rx_queue_idx;
1243
1244	/*rx_queue_idx 0:RX_MPDU_QUEUE */
1245	/*rx_queue_idx 1:RX_CMD_QUEUE */
1246	for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE;
1247	     rx_queue_idx++) {
1248		for (i = 0; i < rtlpci->rxringcount; i++) {
1249			struct sk_buff *skb =
1250			    rtlpci->rx_ring[rx_queue_idx].rx_buf[i];
1251			if (!skb)
1252				continue;
1253
1254			pci_unmap_single(rtlpci->pdev,
1255					 *((dma_addr_t *) skb->cb),
1256					 rtlpci->rxbuffersize,
1257					 PCI_DMA_FROMDEVICE);
1258			kfree_skb(skb);
1259		}
1260
1261		if (rtlpci->rx_ring[rx_queue_idx].desc) {
1262			pci_free_consistent(rtlpci->pdev,
1263				    sizeof(*rtlpci->rx_ring[rx_queue_idx].
1264					   desc) * rtlpci->rxringcount,
1265				    rtlpci->rx_ring[rx_queue_idx].desc,
1266				    rtlpci->rx_ring[rx_queue_idx].dma);
1267			rtlpci->rx_ring[rx_queue_idx].desc = NULL;
1268		}
1269	}
1270}
1271
1272static int _rtl_pci_init_trx_ring(struct ieee80211_hw *hw)
1273{
1274	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1275	int ret;
1276	int i;
1277
1278	ret = _rtl_pci_init_rx_ring(hw);
1279	if (ret)
1280		return ret;
1281
1282	for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
1283		ret = _rtl_pci_init_tx_ring(hw, i,
1284				 rtlpci->txringcount[i]);
1285		if (ret)
1286			goto err_free_rings;
1287	}
1288
1289	return 0;
1290
1291err_free_rings:
1292	_rtl_pci_free_rx_ring(rtlpci);
1293
1294	for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
1295		if (rtlpci->tx_ring[i].desc)
1296			_rtl_pci_free_tx_ring(hw, i);
1297
1298	return 1;
1299}
1300
1301static int _rtl_pci_deinit_trx_ring(struct ieee80211_hw *hw)
1302{
1303	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1304	u32 i;
1305
1306	/*free rx rings */
1307	_rtl_pci_free_rx_ring(rtlpci);
1308
1309	/*free tx rings */
1310	for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
1311		_rtl_pci_free_tx_ring(hw, i);
1312
1313	return 0;
1314}
1315
1316int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
1317{
1318	struct rtl_priv *rtlpriv = rtl_priv(hw);
1319	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1320	int i, rx_queue_idx;
1321	unsigned long flags;
1322	u8 tmp_one = 1;
1323
1324	/*rx_queue_idx 0:RX_MPDU_QUEUE */
1325	/*rx_queue_idx 1:RX_CMD_QUEUE */
1326	for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE;
1327	     rx_queue_idx++) {
1328		/*
1329		 *force the rx_ring[RX_MPDU_QUEUE/
1330		 *RX_CMD_QUEUE].idx to the first one
1331		 */
1332		if (rtlpci->rx_ring[rx_queue_idx].desc) {
1333			struct rtl_rx_desc *entry = NULL;
1334
1335			for (i = 0; i < rtlpci->rxringcount; i++) {
1336				entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
1337				rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry,
1338							    false,
1339							    HW_DESC_RXOWN,
1340							    &tmp_one);
1341			}
1342			rtlpci->rx_ring[rx_queue_idx].idx = 0;
1343		}
1344	}
1345
1346	/*
1347	 *after reset, release previous pending packet,
1348	 *and force the  tx idx to the first one
1349	 */
1350	for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
1351		if (rtlpci->tx_ring[i].desc) {
1352			struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[i];
1353
1354			while (skb_queue_len(&ring->queue)) {
1355				struct rtl_tx_desc *entry;
1356				struct sk_buff *skb;
1357
1358				spin_lock_irqsave(&rtlpriv->locks.irq_th_lock,
1359						  flags);
1360				entry = &ring->desc[ring->idx];
1361				skb = __skb_dequeue(&ring->queue);
1362				pci_unmap_single(rtlpci->pdev,
1363						 rtlpriv->cfg->ops->
1364							 get_desc((u8 *)
1365							 entry,
1366							 true,
1367							 HW_DESC_TXBUFF_ADDR),
1368						 skb->len, PCI_DMA_TODEVICE);
1369				ring->idx = (ring->idx + 1) % ring->entries;
1370				spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
1371						  flags);
1372				kfree_skb(skb);
1373			}
1374			ring->idx = 0;
1375		}
1376	}
1377
1378	return 0;
1379}
1380
1381static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
1382					struct ieee80211_sta *sta,
1383					struct sk_buff *skb)
1384{
1385	struct rtl_priv *rtlpriv = rtl_priv(hw);
1386	struct rtl_sta_info *sta_entry = NULL;
1387	u8 tid = rtl_get_tid(skb);
1388	__le16 fc = rtl_get_fc(skb);
1389
1390	if (!sta)
1391		return false;
1392	sta_entry = (struct rtl_sta_info *)sta->drv_priv;
1393
1394	if (!rtlpriv->rtlhal.earlymode_enable)
1395		return false;
1396	if (ieee80211_is_nullfunc(fc))
1397		return false;
1398	if (ieee80211_is_qos_nullfunc(fc))
1399		return false;
1400	if (ieee80211_is_pspoll(fc))
1401		return false;
1402	if (sta_entry->tids[tid].agg.agg_state != RTL_AGG_OPERATIONAL)
1403		return false;
1404	if (_rtl_mac_to_hwqueue(hw, skb) > VO_QUEUE)
1405		return false;
1406	if (tid > 7)
1407		return false;
1408
1409	/* maybe every tid should be checked */
1410	if (!rtlpriv->link_info.higher_busytxtraffic[tid])
1411		return false;
1412
1413	spin_lock_bh(&rtlpriv->locks.waitq_lock);
1414	skb_queue_tail(&rtlpriv->mac80211.skb_waitq[tid], skb);
1415	spin_unlock_bh(&rtlpriv->locks.waitq_lock);
1416
1417	return true;
1418}
1419
1420static int rtl_pci_tx(struct ieee80211_hw *hw,
1421		      struct ieee80211_sta *sta,
1422		      struct sk_buff *skb,
1423		      struct rtl_tcb_desc *ptcb_desc)
1424{
1425	struct rtl_priv *rtlpriv = rtl_priv(hw);
1426	struct rtl_sta_info *sta_entry = NULL;
1427	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1428	struct rtl8192_tx_ring *ring;
1429	struct rtl_tx_desc *pdesc;
1430	struct rtl_tx_buffer_desc *ptx_bd_desc = NULL;
1431	u8 idx;
1432	u8 hw_queue = _rtl_mac_to_hwqueue(hw, skb);
1433	unsigned long flags;
1434	struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
1435	__le16 fc = rtl_get_fc(skb);
1436	u8 *pda_addr = hdr->addr1;
1437	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1438	/*ssn */
1439	u8 tid = 0;
1440	u16 seq_number = 0;
1441	u8 own;
1442	u8 temp_one = 1;
1443
1444	if (ieee80211_is_mgmt(fc))
1445		rtl_tx_mgmt_proc(hw, skb);
1446
1447	if (rtlpriv->psc.sw_ps_enabled) {
1448		if (ieee80211_is_data(fc) && !ieee80211_is_nullfunc(fc) &&
1449			!ieee80211_has_pm(fc))
1450			hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
1451	}
1452
1453	rtl_action_proc(hw, skb, true);
1454
1455	if (is_multicast_ether_addr(pda_addr))
1456		rtlpriv->stats.txbytesmulticast += skb->len;
1457	else if (is_broadcast_ether_addr(pda_addr))
1458		rtlpriv->stats.txbytesbroadcast += skb->len;
1459	else
1460		rtlpriv->stats.txbytesunicast += skb->len;
1461
1462	spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
1463	ring = &rtlpci->tx_ring[hw_queue];
1464	if (hw_queue != BEACON_QUEUE)
1465		idx = (ring->idx + skb_queue_len(&ring->queue)) %
1466				ring->entries;
1467	else
1468		idx = 0;
1469
1470	pdesc = &ring->desc[idx];
1471	if (rtlpriv->use_new_trx_flow) {
1472		ptx_bd_desc = &ring->buffer_desc[idx];
1473	} else {
1474		own = (u8) rtlpriv->cfg->ops->get_desc((u8 *)pdesc,
1475				true, HW_DESC_OWN);
1476
1477		if ((own == 1) && (hw_queue != BEACON_QUEUE)) {
1478			RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1479				 "No more TX desc@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%d\n",
1480				 hw_queue, ring->idx, idx,
1481				 skb_queue_len(&ring->queue));
1482
1483			spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
1484					       flags);
1485			return skb->len;
1486		}
1487	}
1488
1489	if (ieee80211_is_data_qos(fc)) {
1490		tid = rtl_get_tid(skb);
1491		if (sta) {
1492			sta_entry = (struct rtl_sta_info *)sta->drv_priv;
1493			seq_number = (le16_to_cpu(hdr->seq_ctrl) &
1494				      IEEE80211_SCTL_SEQ) >> 4;
1495			seq_number += 1;
1496
1497			if (!ieee80211_has_morefrags(hdr->frame_control))
1498				sta_entry->tids[tid].seq_number = seq_number;
1499		}
1500	}
1501
1502	if (ieee80211_is_data(fc))
1503		rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
1504
1505	rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc,
1506			(u8 *)ptx_bd_desc, info, sta, skb, hw_queue, ptcb_desc);
1507
1508	__skb_queue_tail(&ring->queue, skb);
1509
1510	if (rtlpriv->use_new_trx_flow) {
1511		rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true,
1512					    HW_DESC_OWN, &hw_queue);
1513	} else {
1514		rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true,
1515					    HW_DESC_OWN, &temp_one);
1516	}
1517
1518	if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
1519	    hw_queue != BEACON_QUEUE) {
1520		RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
1521			 "less desc left, stop skb_queue@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%d\n",
1522			 hw_queue, ring->idx, idx,
1523			 skb_queue_len(&ring->queue));
1524
1525		ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
1526	}
1527
1528	spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1529
1530	rtlpriv->cfg->ops->tx_polling(hw, hw_queue);
1531
1532	return 0;
1533}
1534
1535static void rtl_pci_flush(struct ieee80211_hw *hw, bool drop)
1536{
1537	struct rtl_priv *rtlpriv = rtl_priv(hw);
1538	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1539	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1540	struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1541	u16 i = 0;
1542	int queue_id;
1543	struct rtl8192_tx_ring *ring;
1544
1545	if (mac->skip_scan)
1546		return;
1547
1548	for (queue_id = RTL_PCI_MAX_TX_QUEUE_COUNT - 1; queue_id >= 0;) {
1549		u32 queue_len;
1550		ring = &pcipriv->dev.tx_ring[queue_id];
1551		queue_len = skb_queue_len(&ring->queue);
1552		if (queue_len == 0 || queue_id == BEACON_QUEUE ||
1553			queue_id == TXCMD_QUEUE) {
1554			queue_id--;
1555			continue;
1556		} else {
1557			msleep(20);
1558			i++;
1559		}
1560
1561		/* we just wait 1s for all queues */
1562		if (rtlpriv->psc.rfpwr_state == ERFOFF ||
1563			is_hal_stop(rtlhal) || i >= 200)
1564			return;
1565	}
1566}
1567
1568static void rtl_pci_deinit(struct ieee80211_hw *hw)
1569{
1570	struct rtl_priv *rtlpriv = rtl_priv(hw);
1571	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1572
1573	_rtl_pci_deinit_trx_ring(hw);
1574
1575	synchronize_irq(rtlpci->pdev->irq);
1576	tasklet_kill(&rtlpriv->works.irq_tasklet);
1577	cancel_work_sync(&rtlpriv->works.lps_change_work);
1578
1579	flush_workqueue(rtlpriv->works.rtl_wq);
1580	destroy_workqueue(rtlpriv->works.rtl_wq);
1581
1582}
1583
1584static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
1585{
1586	struct rtl_priv *rtlpriv = rtl_priv(hw);
1587	int err;
1588
1589	_rtl_pci_init_struct(hw, pdev);
1590
1591	err = _rtl_pci_init_trx_ring(hw);
1592	if (err) {
1593		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1594			 "tx ring initialization failed\n");
1595		return err;
1596	}
1597
1598	return 0;
1599}
1600
1601static int rtl_pci_start(struct ieee80211_hw *hw)
1602{
1603	struct rtl_priv *rtlpriv = rtl_priv(hw);
1604	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1605	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1606	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1607
1608	int err;
1609
1610	rtl_pci_reset_trx_ring(hw);
1611
1612	rtlpci->driver_is_goingto_unload = false;
1613	err = rtlpriv->cfg->ops->hw_init(hw);
1614	if (err) {
1615		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1616			 "Failed to config hardware!\n");
1617		return err;
1618	}
1619
1620	rtlpriv->cfg->ops->enable_interrupt(hw);
1621	RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "enable_interrupt OK\n");
1622
1623	rtl_init_rx_config(hw);
1624
1625	/*should be after adapter start and interrupt enable. */
1626	set_hal_start(rtlhal);
1627
1628	RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
1629
1630	rtlpci->up_first_time = false;
1631
1632	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "OK\n");
1633	return 0;
1634}
1635
1636static void rtl_pci_stop(struct ieee80211_hw *hw)
1637{
1638	struct rtl_priv *rtlpriv = rtl_priv(hw);
1639	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1640	struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1641	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1642	unsigned long flags;
1643	u8 RFInProgressTimeOut = 0;
1644
1645	/*
1646	 *should be before disable interrupt&adapter
1647	 *and will do it immediately.
1648	 */
1649	set_hal_stop(rtlhal);
1650
1651	rtlpci->driver_is_goingto_unload = true;
1652	rtlpriv->cfg->ops->disable_interrupt(hw);
1653	cancel_work_sync(&rtlpriv->works.lps_change_work);
1654
1655	spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
1656	while (ppsc->rfchange_inprogress) {
1657		spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
1658		if (RFInProgressTimeOut > 100) {
1659			spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
1660			break;
1661		}
1662		mdelay(1);
1663		RFInProgressTimeOut++;
1664		spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
1665	}
1666	ppsc->rfchange_inprogress = true;
1667	spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
1668
1669	rtlpriv->cfg->ops->hw_disable(hw);
1670	/* some things are not needed if firmware not available */
1671	if (!rtlpriv->max_fw_size)
1672		return;
1673	rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
1674
1675	spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
1676	ppsc->rfchange_inprogress = false;
1677	spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
1678
1679	rtl_pci_enable_aspm(hw);
1680}
1681
1682static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1683		struct ieee80211_hw *hw)
1684{
1685	struct rtl_priv *rtlpriv = rtl_priv(hw);
1686	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1687	struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1688	struct pci_dev *bridge_pdev = pdev->bus->self;
1689	u16 venderid;
1690	u16 deviceid;
1691	u8 revisionid;
1692	u16 irqline;
1693	u8 tmp;
1694
1695	pcipriv->ndis_adapter.pcibridge_vendor = PCI_BRIDGE_VENDOR_UNKNOWN;
1696	venderid = pdev->vendor;
1697	deviceid = pdev->device;
1698	pci_read_config_byte(pdev, 0x8, &revisionid);
1699	pci_read_config_word(pdev, 0x3C, &irqline);
1700
1701	/* PCI ID 0x10ec:0x8192 occurs for both RTL8192E, which uses
1702	 * r8192e_pci, and RTL8192SE, which uses this driver. If the
1703	 * revision ID is RTL_PCI_REVISION_ID_8192PCIE (0x01), then
1704	 * the correct driver is r8192e_pci, thus this routine should
1705	 * return false.
1706	 */
1707	if (deviceid == RTL_PCI_8192SE_DID &&
1708	    revisionid == RTL_PCI_REVISION_ID_8192PCIE)
1709		return false;
1710
1711	if (deviceid == RTL_PCI_8192_DID ||
1712	    deviceid == RTL_PCI_0044_DID ||
1713	    deviceid == RTL_PCI_0047_DID ||
1714	    deviceid == RTL_PCI_8192SE_DID ||
1715	    deviceid == RTL_PCI_8174_DID ||
1716	    deviceid == RTL_PCI_8173_DID ||
1717	    deviceid == RTL_PCI_8172_DID ||
1718	    deviceid == RTL_PCI_8171_DID) {
1719		switch (revisionid) {
1720		case RTL_PCI_REVISION_ID_8192PCIE:
1721			RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1722				 "8192 PCI-E is found - vid/did=%x/%x\n",
1723				 venderid, deviceid);
1724			rtlhal->hw_type = HARDWARE_TYPE_RTL8192E;
1725			return false;
1726		case RTL_PCI_REVISION_ID_8192SE:
1727			RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1728				 "8192SE is found - vid/did=%x/%x\n",
1729				 venderid, deviceid);
1730			rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
1731			break;
1732		default:
1733			RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1734				 "Err: Unknown device - vid/did=%x/%x\n",
1735				 venderid, deviceid);
1736			rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
1737			break;
1738
1739		}
1740	} else if (deviceid == RTL_PCI_8723AE_DID) {
1741		rtlhal->hw_type = HARDWARE_TYPE_RTL8723AE;
1742		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1743			 "8723AE PCI-E is found - "
1744			 "vid/did=%x/%x\n", venderid, deviceid);
1745	} else if (deviceid == RTL_PCI_8192CET_DID ||
1746		   deviceid == RTL_PCI_8192CE_DID ||
1747		   deviceid == RTL_PCI_8191CE_DID ||
1748		   deviceid == RTL_PCI_8188CE_DID) {
1749		rtlhal->hw_type = HARDWARE_TYPE_RTL8192CE;
1750		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1751			 "8192C PCI-E is found - vid/did=%x/%x\n",
1752			 venderid, deviceid);
1753	} else if (deviceid == RTL_PCI_8192DE_DID ||
1754		   deviceid == RTL_PCI_8192DE_DID2) {
1755		rtlhal->hw_type = HARDWARE_TYPE_RTL8192DE;
1756		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1757			 "8192D PCI-E is found - vid/did=%x/%x\n",
1758			 venderid, deviceid);
1759	} else if (deviceid == RTL_PCI_8188EE_DID) {
1760		rtlhal->hw_type = HARDWARE_TYPE_RTL8188EE;
1761		RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1762			 "Find adapter, Hardware type is 8188EE\n");
1763	} else {
1764		RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1765			 "Err: Unknown device - vid/did=%x/%x\n",
1766			 venderid, deviceid);
1767
1768		rtlhal->hw_type = RTL_DEFAULT_HARDWARE_TYPE;
1769	}
1770
1771	if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) {
1772		if (revisionid == 0 || revisionid == 1) {
1773			if (revisionid == 0) {
1774				RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1775					 "Find 92DE MAC0\n");
1776				rtlhal->interfaceindex = 0;
1777			} else if (revisionid == 1) {
1778				RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1779					 "Find 92DE MAC1\n");
1780				rtlhal->interfaceindex = 1;
1781			}
1782		} else {
1783			RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1784				 "Unknown device - VendorID/DeviceID=%x/%x, Revision=%x\n",
1785				 venderid, deviceid, revisionid);
1786			rtlhal->interfaceindex = 0;
1787		}
1788	}
1789	/*find bus info */
1790	pcipriv->ndis_adapter.busnumber = pdev->bus->number;
1791	pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn);
1792	pcipriv->ndis_adapter.funcnumber = PCI_FUNC(pdev->devfn);
1793
1794	/* some ARM have no bridge_pdev and will crash here
1795	 * so we should check if bridge_pdev is NULL
1796	 */
1797	if (bridge_pdev) {
1798		/*find bridge info if available */
1799		pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor;
1800		for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) {
1801			if (bridge_pdev->vendor == pcibridge_vendors[tmp]) {
1802				pcipriv->ndis_adapter.pcibridge_vendor = tmp;
1803				RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1804					 "Pci Bridge Vendor is found index: %d\n",
1805					 tmp);
1806				break;
1807			}
1808		}
1809	}
1810
1811	if (pcipriv->ndis_adapter.pcibridge_vendor !=
1812		PCI_BRIDGE_VENDOR_UNKNOWN) {
1813		pcipriv->ndis_adapter.pcibridge_busnum =
1814		    bridge_pdev->bus->number;
1815		pcipriv->ndis_adapter.pcibridge_devnum =
1816		    PCI_SLOT(bridge_pdev->devfn);
1817		pcipriv->ndis_adapter.pcibridge_funcnum =
1818		    PCI_FUNC(bridge_pdev->devfn);
1819		pcipriv->ndis_adapter.pcibridge_pciehdr_offset =
1820		    pci_pcie_cap(bridge_pdev);
1821		pcipriv->ndis_adapter.num4bytes =
1822		    (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10) / 4;
1823
1824		rtl_pci_get_linkcontrol_field(hw);
1825
1826		if (pcipriv->ndis_adapter.pcibridge_vendor ==
1827		    PCI_BRIDGE_VENDOR_AMD) {
1828			pcipriv->ndis_adapter.amd_l1_patch =
1829			    rtl_pci_get_amd_l1_patch(hw);
1830		}
1831	}
1832
1833	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1834		 "pcidev busnumber:devnumber:funcnumber:vendor:link_ctl %d:%d:%d:%x:%x\n",
1835		 pcipriv->ndis_adapter.busnumber,
1836		 pcipriv->ndis_adapter.devnumber,
1837		 pcipriv->ndis_adapter.funcnumber,
1838		 pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg);
1839
1840	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1841		 "pci_bridge busnumber:devnumber:funcnumber:vendor:pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n",
1842		 pcipriv->ndis_adapter.pcibridge_busnum,
1843		 pcipriv->ndis_adapter.pcibridge_devnum,
1844		 pcipriv->ndis_adapter.pcibridge_funcnum,
1845		 pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor],
1846		 pcipriv->ndis_adapter.pcibridge_pciehdr_offset,
1847		 pcipriv->ndis_adapter.pcibridge_linkctrlreg,
1848		 pcipriv->ndis_adapter.amd_l1_patch);
1849
1850	rtl_pci_parse_configuration(pdev, hw);
1851	list_add_tail(&rtlpriv->list, &rtlpriv->glb_var->glb_priv_list);
1852
1853	return true;
1854}
1855
1856static int rtl_pci_intr_mode_msi(struct ieee80211_hw *hw)
1857{
1858	struct rtl_priv *rtlpriv = rtl_priv(hw);
1859	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1860	struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
1861	int ret;
1862
1863	ret = pci_enable_msi(rtlpci->pdev);
1864	if (ret < 0)
1865		return ret;
1866
1867	ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
1868			  IRQF_SHARED, KBUILD_MODNAME, hw);
1869	if (ret < 0) {
1870		pci_disable_msi(rtlpci->pdev);
1871		return ret;
1872	}
1873
1874	rtlpci->using_msi = true;
1875
1876	RT_TRACE(rtlpriv, COMP_INIT|COMP_INTR, DBG_DMESG,
1877		 "MSI Interrupt Mode!\n");
1878	return 0;
1879}
1880
1881static int rtl_pci_intr_mode_legacy(struct ieee80211_hw *hw)
1882{
1883	struct rtl_priv *rtlpriv = rtl_priv(hw);
1884	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1885	struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
1886	int ret;
1887
1888	ret = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
1889			  IRQF_SHARED, KBUILD_MODNAME, hw);
1890	if (ret < 0)
1891		return ret;
1892
1893	rtlpci->using_msi = false;
1894	RT_TRACE(rtlpriv, COMP_INIT|COMP_INTR, DBG_DMESG,
1895		 "Pin-based Interrupt Mode!\n");
1896	return 0;
1897}
1898
1899static int rtl_pci_intr_mode_decide(struct ieee80211_hw *hw)
1900{
1901	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1902	struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
1903	int ret;
1904
1905	if (rtlpci->msi_support) {
1906		ret = rtl_pci_intr_mode_msi(hw);
1907		if (ret < 0)
1908			ret = rtl_pci_intr_mode_legacy(hw);
1909	} else {
1910		ret = rtl_pci_intr_mode_legacy(hw);
1911	}
1912	return ret;
1913}
1914
1915int rtl_pci_probe(struct pci_dev *pdev,
1916			    const struct pci_device_id *id)
1917{
1918	struct ieee80211_hw *hw = NULL;
1919
1920	struct rtl_priv *rtlpriv = NULL;
1921	struct rtl_pci_priv *pcipriv = NULL;
1922	struct rtl_pci *rtlpci;
1923	unsigned long pmem_start, pmem_len, pmem_flags;
1924	int err;
1925
1926	err = pci_enable_device(pdev);
1927	if (err) {
1928		RT_ASSERT(false, "%s : Cannot enable new PCI device\n",
1929			  pci_name(pdev));
1930		return err;
1931	}
1932
1933	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
1934		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1935			RT_ASSERT(false,
1936				  "Unable to obtain 32bit DMA for consistent allocations\n");
1937			err = -ENOMEM;
1938			goto fail1;
1939		}
1940	}
1941
1942	pci_set_master(pdev);
1943
1944	hw = ieee80211_alloc_hw(sizeof(struct rtl_pci_priv) +
1945				sizeof(struct rtl_priv), &rtl_ops);
1946	if (!hw) {
1947		RT_ASSERT(false,
1948			  "%s : ieee80211 alloc failed\n", pci_name(pdev));
1949		err = -ENOMEM;
1950		goto fail1;
1951	}
1952
1953	SET_IEEE80211_DEV(hw, &pdev->dev);
1954	pci_set_drvdata(pdev, hw);
1955
1956	rtlpriv = hw->priv;
1957	rtlpriv->hw = hw;
1958	pcipriv = (void *)rtlpriv->priv;
1959	pcipriv->dev.pdev = pdev;
1960	init_completion(&rtlpriv->firmware_loading_complete);
1961
1962	/* init cfg & intf_ops */
1963	rtlpriv->rtlhal.interface = INTF_PCI;
1964	rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
1965	rtlpriv->intf_ops = &rtl_pci_ops;
1966	rtlpriv->glb_var = &rtl_global_var;
1967
1968	/*
1969	 *init dbgp flags before all
1970	 *other functions, because we will
1971	 *use it in other funtions like
1972	 *RT_TRACE/RT_PRINT/RTL_PRINT_DATA
1973	 *you can not use these macro
1974	 *before this
1975	 */
1976	rtl_dbgp_flag_init(hw);
1977
1978	/* MEM map */
1979	err = pci_request_regions(pdev, KBUILD_MODNAME);
1980	if (err) {
1981		RT_ASSERT(false, "Can't obtain PCI resources\n");
1982		goto fail1;
1983	}
1984
1985	pmem_start = pci_resource_start(pdev, rtlpriv->cfg->bar_id);
1986	pmem_len = pci_resource_len(pdev, rtlpriv->cfg->bar_id);
1987	pmem_flags = pci_resource_flags(pdev, rtlpriv->cfg->bar_id);
1988
1989	/*shared mem start */
1990	rtlpriv->io.pci_mem_start =
1991			(unsigned long)pci_iomap(pdev,
1992			rtlpriv->cfg->bar_id, pmem_len);
1993	if (rtlpriv->io.pci_mem_start == 0) {
1994		RT_ASSERT(false, "Can't map PCI mem\n");
1995		err = -ENOMEM;
1996		goto fail2;
1997	}
1998
1999	RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
2000		 "mem mapped space: start: 0x%08lx len:%08lx flags:%08lx, after map:0x%08lx\n",
2001		 pmem_start, pmem_len, pmem_flags,
2002		 rtlpriv->io.pci_mem_start);
2003
2004	/* Disable Clk Request */
2005	pci_write_config_byte(pdev, 0x81, 0);
2006	/* leave D3 mode */
2007	pci_write_config_byte(pdev, 0x44, 0);
2008	pci_write_config_byte(pdev, 0x04, 0x06);
2009	pci_write_config_byte(pdev, 0x04, 0x07);
2010
2011	/* find adapter */
2012	if (!_rtl_pci_find_adapter(pdev, hw)) {
2013		err = -ENODEV;
2014		goto fail3;
2015	}
2016
2017	/* Init IO handler */
2018	_rtl_pci_io_handler_init(&pdev->dev, hw);
2019
2020	/*like read eeprom and so on */
2021	rtlpriv->cfg->ops->read_eeprom_info(hw);
2022
2023	/*aspm */
2024	rtl_pci_init_aspm(hw);
2025
2026	/* Init mac80211 sw */
2027	err = rtl_init_core(hw);
2028	if (err) {
2029		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
2030			 "Can't allocate sw for mac80211\n");
2031		goto fail3;
2032	}
2033
2034	/* Init PCI sw */
2035	err = rtl_pci_init(hw, pdev);
2036	if (err) {
2037		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Failed to init PCI\n");
2038		goto fail3;
2039	}
2040
2041	if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
2042		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
2043		err = -ENODEV;
2044		goto fail3;
2045	}
2046
2047	rtlpriv->cfg->ops->init_sw_leds(hw);
2048
2049	err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group);
2050	if (err) {
2051		RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
2052			 "failed to create sysfs device attributes\n");
2053		goto fail3;
2054	}
2055
2056	rtlpci = rtl_pcidev(pcipriv);
2057	err = rtl_pci_intr_mode_decide(hw);
2058	if (err) {
2059		RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
2060			 "%s: failed to register IRQ handler\n",
2061			 wiphy_name(hw->wiphy));
2062		goto fail3;
2063	}
2064	rtlpci->irq_alloc = 1;
2065
2066	return 0;
2067
2068fail3:
2069	rtl_deinit_core(hw);
2070
2071	if (rtlpriv->io.pci_mem_start != 0)
2072		pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
2073
2074fail2:
2075	pci_release_regions(pdev);
2076	complete(&rtlpriv->firmware_loading_complete);
2077
2078fail1:
2079	if (hw)
2080		ieee80211_free_hw(hw);
2081	pci_disable_device(pdev);
2082
2083	return err;
2084
2085}
2086EXPORT_SYMBOL(rtl_pci_probe);
2087
2088void rtl_pci_disconnect(struct pci_dev *pdev)
2089{
2090	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
2091	struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
2092	struct rtl_priv *rtlpriv = rtl_priv(hw);
2093	struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
2094	struct rtl_mac *rtlmac = rtl_mac(rtlpriv);
2095
2096	/* just in case driver is removed before firmware callback */
2097	wait_for_completion(&rtlpriv->firmware_loading_complete);
2098	clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
2099
2100	sysfs_remove_group(&pdev->dev.kobj, &rtl_attribute_group);
2101
2102	/*ieee80211_unregister_hw will call ops_stop */
2103	if (rtlmac->mac80211_registered == 1) {
2104		ieee80211_unregister_hw(hw);
2105		rtlmac->mac80211_registered = 0;
2106	} else {
2107		rtl_deinit_deferred_work(hw);
2108		rtlpriv->intf_ops->adapter_stop(hw);
2109	}
2110	rtlpriv->cfg->ops->disable_interrupt(hw);
2111
2112	/*deinit rfkill */
2113	rtl_deinit_rfkill(hw);
2114
2115	rtl_pci_deinit(hw);
2116	rtl_deinit_core(hw);
2117	rtlpriv->cfg->ops->deinit_sw_vars(hw);
2118
2119	if (rtlpci->irq_alloc) {
2120		synchronize_irq(rtlpci->pdev->irq);
2121		free_irq(rtlpci->pdev->irq, hw);
2122		rtlpci->irq_alloc = 0;
2123	}
2124
2125	if (rtlpci->using_msi)
2126		pci_disable_msi(rtlpci->pdev);
2127
2128	list_del(&rtlpriv->list);
2129	if (rtlpriv->io.pci_mem_start != 0) {
2130		pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
2131		pci_release_regions(pdev);
2132	}
2133
2134	pci_disable_device(pdev);
2135
2136	rtl_pci_disable_aspm(hw);
2137
2138	ieee80211_free_hw(hw);
2139}
2140EXPORT_SYMBOL(rtl_pci_disconnect);
2141
2142#ifdef CONFIG_PM_SLEEP
2143/***************************************
2144kernel pci power state define:
2145PCI_D0         ((pci_power_t __force) 0)
2146PCI_D1         ((pci_power_t __force) 1)
2147PCI_D2         ((pci_power_t __force) 2)
2148PCI_D3hot      ((pci_power_t __force) 3)
2149PCI_D3cold     ((pci_power_t __force) 4)
2150PCI_UNKNOWN    ((pci_power_t __force) 5)
2151
2152This function is called when system
2153goes into suspend state mac80211 will
2154call rtl_mac_stop() from the mac80211
2155suspend function first, So there is
2156no need to call hw_disable here.
2157****************************************/
2158int rtl_pci_suspend(struct device *dev)
2159{
2160	struct pci_dev *pdev = to_pci_dev(dev);
2161	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
2162	struct rtl_priv *rtlpriv = rtl_priv(hw);
2163
2164	rtlpriv->cfg->ops->hw_suspend(hw);
2165	rtl_deinit_rfkill(hw);
2166
2167	return 0;
2168}
2169EXPORT_SYMBOL(rtl_pci_suspend);
2170
2171int rtl_pci_resume(struct device *dev)
2172{
2173	struct pci_dev *pdev = to_pci_dev(dev);
2174	struct ieee80211_hw *hw = pci_get_drvdata(pdev);
2175	struct rtl_priv *rtlpriv = rtl_priv(hw);
2176
2177	rtlpriv->cfg->ops->hw_resume(hw);
2178	rtl_init_rfkill(hw);
2179	return 0;
2180}
2181EXPORT_SYMBOL(rtl_pci_resume);
2182#endif /* CONFIG_PM_SLEEP */
2183
2184struct rtl_intf_ops rtl_pci_ops = {
2185	.read_efuse_byte = read_efuse_byte,
2186	.adapter_start = rtl_pci_start,
2187	.adapter_stop = rtl_pci_stop,
2188	.check_buddy_priv = rtl_pci_check_buddy_priv,
2189	.adapter_tx = rtl_pci_tx,
2190	.flush = rtl_pci_flush,
2191	.reset_trx_ring = rtl_pci_reset_trx_ring,
2192	.waitq_insert = rtl_pci_tx_chk_waitq_insert,
2193
2194	.disable_aspm = rtl_pci_disable_aspm,
2195	.enable_aspm = rtl_pci_enable_aspm,
2196};