Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * This file is part of wlcore
   4 *
   5 * Copyright (C) 2008-2010 Nokia Corporation
   6 * Copyright (C) 2011-2013 Texas Instruments Inc.
   7 */
   8
   9#include <linux/module.h>
  10#include <linux/firmware.h>
  11#include <linux/etherdevice.h>
  12#include <linux/vmalloc.h>
  13#include <linux/interrupt.h>
  14#include <linux/irq.h>
  15#include <linux/pm_runtime.h>
  16#include <linux/pm_wakeirq.h>
  17
  18#include "wlcore.h"
  19#include "debug.h"
  20#include "wl12xx_80211.h"
  21#include "io.h"
  22#include "tx.h"
  23#include "ps.h"
  24#include "init.h"
  25#include "debugfs.h"
  26#include "testmode.h"
  27#include "vendor_cmd.h"
  28#include "scan.h"
  29#include "hw_ops.h"
  30#include "sysfs.h"
  31
  32#define WL1271_BOOT_RETRIES 3
 
  33#define WL1271_WAKEUP_TIMEOUT 500
  34
  35static char *fwlog_param;
  36static int fwlog_mem_blocks = -1;
  37static int bug_on_recovery = -1;
  38static int no_recovery     = -1;
  39
  40static void __wl1271_op_remove_interface(struct wl1271 *wl,
  41					 struct ieee80211_vif *vif,
  42					 bool reset_tx_queues);
  43static void wlcore_op_stop_locked(struct wl1271 *wl);
  44static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
  45
  46static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
  47{
  48	int ret;
  49
  50	if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
  51		return -EINVAL;
  52
  53	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
  54		return 0;
  55
  56	if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
  57		return 0;
  58
  59	ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
  60	if (ret < 0)
  61		return ret;
  62
  63	wl1271_info("Association completed.");
  64	return 0;
  65}
  66
  67static void wl1271_reg_notify(struct wiphy *wiphy,
  68			      struct regulatory_request *request)
  69{
  70	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
  71	struct wl1271 *wl = hw->priv;
  72
  73	/* copy the current dfs region */
  74	if (request)
  75		wl->dfs_region = request->dfs_region;
  76
  77	wlcore_regdomain_config(wl);
  78}
  79
  80static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
  81				   bool enable)
  82{
  83	int ret = 0;
  84
  85	/* we should hold wl->mutex */
  86	ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
  87	if (ret < 0)
  88		goto out;
  89
  90	if (enable)
  91		set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
  92	else
  93		clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
  94out:
  95	return ret;
  96}
  97
  98/*
  99 * this function is being called when the rx_streaming interval
 100 * has beed changed or rx_streaming should be disabled
 101 */
 102int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 103{
 104	int ret = 0;
 105	int period = wl->conf.rx_streaming.interval;
 106
 107	/* don't reconfigure if rx_streaming is disabled */
 108	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
 109		goto out;
 110
 111	/* reconfigure/disable according to new streaming_period */
 112	if (period &&
 113	    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
 114	    (wl->conf.rx_streaming.always ||
 115	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
 116		ret = wl1271_set_rx_streaming(wl, wlvif, true);
 117	else {
 118		ret = wl1271_set_rx_streaming(wl, wlvif, false);
 119		/* don't cancel_work_sync since we might deadlock */
 120		del_timer_sync(&wlvif->rx_streaming_timer);
 121	}
 122out:
 123	return ret;
 124}
 125
 126static void wl1271_rx_streaming_enable_work(struct work_struct *work)
 127{
 128	int ret;
 129	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
 130						rx_streaming_enable_work);
 131	struct wl1271 *wl = wlvif->wl;
 132
 133	mutex_lock(&wl->mutex);
 134
 135	if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
 136	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
 137	    (!wl->conf.rx_streaming.always &&
 138	     !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
 139		goto out;
 140
 141	if (!wl->conf.rx_streaming.interval)
 142		goto out;
 143
 144	ret = pm_runtime_resume_and_get(wl->dev);
 145	if (ret < 0)
 
 146		goto out;
 
 147
 148	ret = wl1271_set_rx_streaming(wl, wlvif, true);
 149	if (ret < 0)
 150		goto out_sleep;
 151
 152	/* stop it after some time of inactivity */
 153	mod_timer(&wlvif->rx_streaming_timer,
 154		  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
 155
 156out_sleep:
 157	pm_runtime_mark_last_busy(wl->dev);
 158	pm_runtime_put_autosuspend(wl->dev);
 159out:
 160	mutex_unlock(&wl->mutex);
 161}
 162
 163static void wl1271_rx_streaming_disable_work(struct work_struct *work)
 164{
 165	int ret;
 166	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
 167						rx_streaming_disable_work);
 168	struct wl1271 *wl = wlvif->wl;
 169
 170	mutex_lock(&wl->mutex);
 171
 172	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
 173		goto out;
 174
 175	ret = pm_runtime_resume_and_get(wl->dev);
 176	if (ret < 0)
 
 177		goto out;
 
 178
 179	ret = wl1271_set_rx_streaming(wl, wlvif, false);
 180	if (ret)
 181		goto out_sleep;
 182
 183out_sleep:
 184	pm_runtime_mark_last_busy(wl->dev);
 185	pm_runtime_put_autosuspend(wl->dev);
 186out:
 187	mutex_unlock(&wl->mutex);
 188}
 189
 190static void wl1271_rx_streaming_timer(struct timer_list *t)
 191{
 192	struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
 193	struct wl1271 *wl = wlvif->wl;
 194	ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
 195}
 196
 197/* wl->mutex must be taken */
 198void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
 199{
 200	/* if the watchdog is not armed, don't do anything */
 201	if (wl->tx_allocated_blocks == 0)
 202		return;
 203
 204	cancel_delayed_work(&wl->tx_watchdog_work);
 205	ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
 206		msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
 207}
 208
 209static void wlcore_rc_update_work(struct work_struct *work)
 210{
 211	int ret;
 212	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
 213						rc_update_work);
 214	struct wl1271 *wl = wlvif->wl;
 215	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
 216
 217	mutex_lock(&wl->mutex);
 218
 219	if (unlikely(wl->state != WLCORE_STATE_ON))
 220		goto out;
 221
 222	ret = pm_runtime_resume_and_get(wl->dev);
 223	if (ret < 0)
 
 224		goto out;
 
 225
 226	if (ieee80211_vif_is_mesh(vif)) {
 227		ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
 228						     true, wlvif->sta.hlid);
 229		if (ret < 0)
 230			goto out_sleep;
 231	} else {
 232		wlcore_hw_sta_rc_update(wl, wlvif);
 233	}
 234
 235out_sleep:
 236	pm_runtime_mark_last_busy(wl->dev);
 237	pm_runtime_put_autosuspend(wl->dev);
 238out:
 239	mutex_unlock(&wl->mutex);
 240}
 241
 242static void wl12xx_tx_watchdog_work(struct work_struct *work)
 243{
 244	struct delayed_work *dwork;
 245	struct wl1271 *wl;
 246
 247	dwork = to_delayed_work(work);
 248	wl = container_of(dwork, struct wl1271, tx_watchdog_work);
 249
 250	mutex_lock(&wl->mutex);
 251
 252	if (unlikely(wl->state != WLCORE_STATE_ON))
 253		goto out;
 254
 255	/* Tx went out in the meantime - everything is ok */
 256	if (unlikely(wl->tx_allocated_blocks == 0))
 257		goto out;
 258
 259	/*
 260	 * if a ROC is in progress, we might not have any Tx for a long
 261	 * time (e.g. pending Tx on the non-ROC channels)
 262	 */
 263	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
 264		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
 265			     wl->conf.tx.tx_watchdog_timeout);
 266		wl12xx_rearm_tx_watchdog_locked(wl);
 267		goto out;
 268	}
 269
 270	/*
 271	 * if a scan is in progress, we might not have any Tx for a long
 272	 * time
 273	 */
 274	if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
 275		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
 276			     wl->conf.tx.tx_watchdog_timeout);
 277		wl12xx_rearm_tx_watchdog_locked(wl);
 278		goto out;
 279	}
 280
 281	/*
 282	* AP might cache a frame for a long time for a sleeping station,
 283	* so rearm the timer if there's an AP interface with stations. If
 284	* Tx is genuinely stuck we will most hopefully discover it when all
 285	* stations are removed due to inactivity.
 286	*/
 287	if (wl->active_sta_count) {
 288		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
 289			     " %d stations",
 290			      wl->conf.tx.tx_watchdog_timeout,
 291			      wl->active_sta_count);
 292		wl12xx_rearm_tx_watchdog_locked(wl);
 293		goto out;
 294	}
 295
 296	wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
 297		     wl->conf.tx.tx_watchdog_timeout);
 298	wl12xx_queue_recovery_work(wl);
 299
 300out:
 301	mutex_unlock(&wl->mutex);
 302}
 303
 304static void wlcore_adjust_conf(struct wl1271 *wl)
 305{
 306
 307	if (fwlog_param) {
 308		if (!strcmp(fwlog_param, "continuous")) {
 309			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
 310			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
 311		} else if (!strcmp(fwlog_param, "dbgpins")) {
 312			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
 313			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
 314		} else if (!strcmp(fwlog_param, "disable")) {
 315			wl->conf.fwlog.mem_blocks = 0;
 316			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
 317		} else {
 318			wl1271_error("Unknown fwlog parameter %s", fwlog_param);
 319		}
 320	}
 321
 322	if (bug_on_recovery != -1)
 323		wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
 324
 325	if (no_recovery != -1)
 326		wl->conf.recovery.no_recovery = (u8) no_recovery;
 327}
 328
 329static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
 330					struct wl12xx_vif *wlvif,
 331					u8 hlid, u8 tx_pkts)
 332{
 333	bool fw_ps;
 334
 335	fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
 336
 337	/*
 338	 * Wake up from high level PS if the STA is asleep with too little
 339	 * packets in FW or if the STA is awake.
 340	 */
 341	if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
 342		wl12xx_ps_link_end(wl, wlvif, hlid);
 343
 344	/*
 345	 * Start high-level PS if the STA is asleep with enough blocks in FW.
 346	 * Make an exception if this is the only connected link. In this
 347	 * case FW-memory congestion is less of a problem.
 348	 * Note that a single connected STA means 2*ap_count + 1 active links,
 349	 * since we must account for the global and broadcast AP links
 350	 * for each AP. The "fw_ps" check assures us the other link is a STA
 351	 * connected to the AP. Otherwise the FW would not set the PSM bit.
 352	 */
 353	else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
 354		 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
 355		wl12xx_ps_link_start(wl, wlvif, hlid, true);
 356}
 357
 358static void wl12xx_irq_update_links_status(struct wl1271 *wl,
 359					   struct wl12xx_vif *wlvif,
 360					   struct wl_fw_status *status)
 361{
 362	unsigned long cur_fw_ps_map;
 363	u8 hlid;
 364
 365	cur_fw_ps_map = status->link_ps_bitmap;
 366	if (wl->ap_fw_ps_map != cur_fw_ps_map) {
 367		wl1271_debug(DEBUG_PSM,
 368			     "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
 369			     wl->ap_fw_ps_map, cur_fw_ps_map,
 370			     wl->ap_fw_ps_map ^ cur_fw_ps_map);
 371
 372		wl->ap_fw_ps_map = cur_fw_ps_map;
 373	}
 374
 375	for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
 376		wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
 377					    wl->links[hlid].allocated_pkts);
 378}
 379
 380static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
 381{
 382	struct wl12xx_vif *wlvif;
 383	u32 old_tx_blk_count = wl->tx_blocks_available;
 384	int avail, freed_blocks;
 385	int i;
 386	int ret;
 387	struct wl1271_link *lnk;
 388
 389	ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
 390				   wl->raw_fw_status,
 391				   wl->fw_status_len, false);
 392	if (ret < 0)
 393		return ret;
 394
 395	wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
 396
 397	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
 398		     "drv_rx_counter = %d, tx_results_counter = %d)",
 399		     status->intr,
 400		     status->fw_rx_counter,
 401		     status->drv_rx_counter,
 402		     status->tx_results_counter);
 403
 404	for (i = 0; i < NUM_TX_QUEUES; i++) {
 405		/* prevent wrap-around in freed-packets counter */
 406		wl->tx_allocated_pkts[i] -=
 407				(status->counters.tx_released_pkts[i] -
 408				wl->tx_pkts_freed[i]) & 0xff;
 409
 410		wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
 411	}
 412
 413
 414	for_each_set_bit(i, wl->links_map, wl->num_links) {
 415		u8 diff;
 416		lnk = &wl->links[i];
 417
 418		/* prevent wrap-around in freed-packets counter */
 419		diff = (status->counters.tx_lnk_free_pkts[i] -
 420		       lnk->prev_freed_pkts) & 0xff;
 421
 422		if (diff == 0)
 423			continue;
 424
 425		lnk->allocated_pkts -= diff;
 426		lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
 427
 428		/* accumulate the prev_freed_pkts counter */
 429		lnk->total_freed_pkts += diff;
 430	}
 431
 432	/* prevent wrap-around in total blocks counter */
 433	if (likely(wl->tx_blocks_freed <= status->total_released_blks))
 434		freed_blocks = status->total_released_blks -
 435			       wl->tx_blocks_freed;
 436	else
 437		freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
 438			       status->total_released_blks;
 439
 440	wl->tx_blocks_freed = status->total_released_blks;
 441
 442	wl->tx_allocated_blocks -= freed_blocks;
 443
 444	/*
 445	 * If the FW freed some blocks:
 446	 * If we still have allocated blocks - re-arm the timer, Tx is
 447	 * not stuck. Otherwise, cancel the timer (no Tx currently).
 448	 */
 449	if (freed_blocks) {
 450		if (wl->tx_allocated_blocks)
 451			wl12xx_rearm_tx_watchdog_locked(wl);
 452		else
 453			cancel_delayed_work(&wl->tx_watchdog_work);
 454	}
 455
 456	avail = status->tx_total - wl->tx_allocated_blocks;
 457
 458	/*
 459	 * The FW might change the total number of TX memblocks before
 460	 * we get a notification about blocks being released. Thus, the
 461	 * available blocks calculation might yield a temporary result
 462	 * which is lower than the actual available blocks. Keeping in
 463	 * mind that only blocks that were allocated can be moved from
 464	 * TX to RX, tx_blocks_available should never decrease here.
 465	 */
 466	wl->tx_blocks_available = max((int)wl->tx_blocks_available,
 467				      avail);
 468
 469	/* if more blocks are available now, tx work can be scheduled */
 470	if (wl->tx_blocks_available > old_tx_blk_count)
 471		clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
 472
 473	/* for AP update num of allocated TX blocks per link and ps status */
 474	wl12xx_for_each_wlvif_ap(wl, wlvif) {
 475		wl12xx_irq_update_links_status(wl, wlvif, status);
 476	}
 477
 478	/* update the host-chipset time offset */
 479	wl->time_offset = (ktime_get_boottime_ns() >> 10) -
 480		(s64)(status->fw_localtime);
 481
 482	wl->fw_fast_lnk_map = status->link_fast_bitmap;
 483
 484	return 0;
 485}
 486
 487static void wl1271_flush_deferred_work(struct wl1271 *wl)
 488{
 489	struct sk_buff *skb;
 490
 491	/* Pass all received frames to the network stack */
 492	while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
 493		ieee80211_rx_ni(wl->hw, skb);
 494
 495	/* Return sent skbs to the network stack */
 496	while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
 497		ieee80211_tx_status_ni(wl->hw, skb);
 498}
 499
 500static void wl1271_netstack_work(struct work_struct *work)
 501{
 502	struct wl1271 *wl =
 503		container_of(work, struct wl1271, netstack_work);
 504
 505	do {
 506		wl1271_flush_deferred_work(wl);
 507	} while (skb_queue_len(&wl->deferred_rx_queue));
 508}
 509
 510#define WL1271_IRQ_MAX_LOOPS 256
 511
 512static int wlcore_irq_locked(struct wl1271 *wl)
 513{
 514	int ret = 0;
 515	u32 intr;
 516	int loopcount = WL1271_IRQ_MAX_LOOPS;
 517	bool run_tx_queue = true;
 518	bool done = false;
 519	unsigned int defer_count;
 520	unsigned long flags;
 521
 522	/*
 523	 * In case edge triggered interrupt must be used, we cannot iterate
 524	 * more than once without introducing race conditions with the hardirq.
 525	 */
 526	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
 527		loopcount = 1;
 528
 529	wl1271_debug(DEBUG_IRQ, "IRQ work");
 530
 531	if (unlikely(wl->state != WLCORE_STATE_ON))
 532		goto out;
 533
 534	ret = pm_runtime_resume_and_get(wl->dev);
 535	if (ret < 0)
 
 536		goto out;
 
 537
 538	while (!done && loopcount--) {
 539		smp_mb__after_atomic();
 540
 541		ret = wlcore_fw_status(wl, wl->fw_status);
 542		if (ret < 0)
 543			goto err_ret;
 544
 545		wlcore_hw_tx_immediate_compl(wl);
 546
 547		intr = wl->fw_status->intr;
 548		intr &= WLCORE_ALL_INTR_MASK;
 549		if (!intr) {
 550			done = true;
 551			continue;
 552		}
 553
 554		if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
 555			wl1271_error("HW watchdog interrupt received! starting recovery.");
 556			wl->watchdog_recovery = true;
 557			ret = -EIO;
 558
 559			/* restarting the chip. ignore any other interrupt. */
 560			goto err_ret;
 561		}
 562
 563		if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
 564			wl1271_error("SW watchdog interrupt received! "
 565				     "starting recovery.");
 566			wl->watchdog_recovery = true;
 567			ret = -EIO;
 568
 569			/* restarting the chip. ignore any other interrupt. */
 570			goto err_ret;
 571		}
 572
 573		if (likely(intr & WL1271_ACX_INTR_DATA)) {
 574			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
 575
 576			ret = wlcore_rx(wl, wl->fw_status);
 577			if (ret < 0)
 578				goto err_ret;
 579
 580			/* Check if any tx blocks were freed */
 581			if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) {
 582				if (spin_trylock_irqsave(&wl->wl_lock, flags)) {
 583					if (!wl1271_tx_total_queue_count(wl))
 584						run_tx_queue = false;
 585					spin_unlock_irqrestore(&wl->wl_lock, flags);
 586				}
 587
 588				/*
 589				 * In order to avoid starvation of the TX path,
 590				 * call the work function directly.
 591				 */
 592				if (run_tx_queue) {
 593					ret = wlcore_tx_work_locked(wl);
 594					if (ret < 0)
 595						goto err_ret;
 596				}
 597			}
 598
 599			/* check for tx results */
 600			ret = wlcore_hw_tx_delayed_compl(wl);
 601			if (ret < 0)
 602				goto err_ret;
 603
 604			/* Make sure the deferred queues don't get too long */
 605			defer_count = skb_queue_len(&wl->deferred_tx_queue) +
 606				      skb_queue_len(&wl->deferred_rx_queue);
 607			if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
 608				wl1271_flush_deferred_work(wl);
 609		}
 610
 611		if (intr & WL1271_ACX_INTR_EVENT_A) {
 612			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
 613			ret = wl1271_event_handle(wl, 0);
 614			if (ret < 0)
 615				goto err_ret;
 616		}
 617
 618		if (intr & WL1271_ACX_INTR_EVENT_B) {
 619			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
 620			ret = wl1271_event_handle(wl, 1);
 621			if (ret < 0)
 622				goto err_ret;
 623		}
 624
 625		if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
 626			wl1271_debug(DEBUG_IRQ,
 627				     "WL1271_ACX_INTR_INIT_COMPLETE");
 628
 629		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
 630			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
 631	}
 632
 633err_ret:
 634	pm_runtime_mark_last_busy(wl->dev);
 635	pm_runtime_put_autosuspend(wl->dev);
 636
 637out:
 638	return ret;
 639}
 640
 641static irqreturn_t wlcore_irq(int irq, void *cookie)
 642{
 643	int ret;
 644	unsigned long flags;
 645	struct wl1271 *wl = cookie;
 646	bool queue_tx_work = true;
 647
 648	set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
 649
 650	/* complete the ELP completion */
 651	if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) {
 652		spin_lock_irqsave(&wl->wl_lock, flags);
 653		if (wl->elp_compl)
 654			complete(wl->elp_compl);
 655		spin_unlock_irqrestore(&wl->wl_lock, flags);
 656	}
 657
 658	if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
 659		/* don't enqueue a work right now. mark it as pending */
 660		set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
 661		wl1271_debug(DEBUG_IRQ, "should not enqueue work");
 662		spin_lock_irqsave(&wl->wl_lock, flags);
 663		disable_irq_nosync(wl->irq);
 664		pm_wakeup_event(wl->dev, 0);
 665		spin_unlock_irqrestore(&wl->wl_lock, flags);
 666		goto out_handled;
 667	}
 668
 669	/* TX might be handled here, avoid redundant work */
 670	set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
 671	cancel_work_sync(&wl->tx_work);
 672
 673	mutex_lock(&wl->mutex);
 674
 675	ret = wlcore_irq_locked(wl);
 676	if (ret)
 677		wl12xx_queue_recovery_work(wl);
 678
 679	/* In case TX was not handled in wlcore_irq_locked(), queue TX work */
 680	clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
 681	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) {
 682		if (spin_trylock_irqsave(&wl->wl_lock, flags)) {
 683			if (!wl1271_tx_total_queue_count(wl))
 684				queue_tx_work = false;
 685			spin_unlock_irqrestore(&wl->wl_lock, flags);
 686		}
 687		if (queue_tx_work)
 688			ieee80211_queue_work(wl->hw, &wl->tx_work);
 689	}
 690
 691	mutex_unlock(&wl->mutex);
 692
 693out_handled:
 694	clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
 695
 696	return IRQ_HANDLED;
 697}
 698
 699struct vif_counter_data {
 700	u8 counter;
 701
 702	struct ieee80211_vif *cur_vif;
 703	bool cur_vif_running;
 704};
 705
 706static void wl12xx_vif_count_iter(void *data, u8 *mac,
 707				  struct ieee80211_vif *vif)
 708{
 709	struct vif_counter_data *counter = data;
 710
 711	counter->counter++;
 712	if (counter->cur_vif == vif)
 713		counter->cur_vif_running = true;
 714}
 715
 716/* caller must not hold wl->mutex, as it might deadlock */
 717static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
 718			       struct ieee80211_vif *cur_vif,
 719			       struct vif_counter_data *data)
 720{
 721	memset(data, 0, sizeof(*data));
 722	data->cur_vif = cur_vif;
 723
 724	ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
 725					    wl12xx_vif_count_iter, data);
 726}
 727
 728static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
 729{
 730	const struct firmware *fw;
 731	const char *fw_name;
 732	enum wl12xx_fw_type fw_type;
 733	int ret;
 734
 735	if (plt) {
 736		fw_type = WL12XX_FW_TYPE_PLT;
 737		fw_name = wl->plt_fw_name;
 738	} else {
 739		/*
 740		 * we can't call wl12xx_get_vif_count() here because
 741		 * wl->mutex is taken, so use the cached last_vif_count value
 742		 */
 743		if (wl->last_vif_count > 1 && wl->mr_fw_name) {
 744			fw_type = WL12XX_FW_TYPE_MULTI;
 745			fw_name = wl->mr_fw_name;
 746		} else {
 747			fw_type = WL12XX_FW_TYPE_NORMAL;
 748			fw_name = wl->sr_fw_name;
 749		}
 750	}
 751
 752	if (wl->fw_type == fw_type)
 753		return 0;
 754
 755	wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
 756
 757	ret = request_firmware(&fw, fw_name, wl->dev);
 758
 759	if (ret < 0) {
 760		wl1271_error("could not get firmware %s: %d", fw_name, ret);
 761		return ret;
 762	}
 763
 764	if (fw->size % 4) {
 765		wl1271_error("firmware size is not multiple of 32 bits: %zu",
 766			     fw->size);
 767		ret = -EILSEQ;
 768		goto out;
 769	}
 770
 771	vfree(wl->fw);
 772	wl->fw_type = WL12XX_FW_TYPE_NONE;
 773	wl->fw_len = fw->size;
 774	wl->fw = vmalloc(wl->fw_len);
 775
 776	if (!wl->fw) {
 777		wl1271_error("could not allocate memory for the firmware");
 778		ret = -ENOMEM;
 779		goto out;
 780	}
 781
 782	memcpy(wl->fw, fw->data, wl->fw_len);
 783	ret = 0;
 784	wl->fw_type = fw_type;
 785out:
 786	release_firmware(fw);
 787
 788	return ret;
 789}
 790
 791void wl12xx_queue_recovery_work(struct wl1271 *wl)
 792{
 793	/* Avoid a recursive recovery */
 794	if (wl->state == WLCORE_STATE_ON) {
 795		WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
 796				  &wl->flags));
 797
 798		wl->state = WLCORE_STATE_RESTARTING;
 799		set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
 800		ieee80211_queue_work(wl->hw, &wl->recovery_work);
 801	}
 802}
 803
 804size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
 805{
 806	size_t len;
 807
 808	/* Make sure we have enough room */
 809	len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
 810
 811	/* Fill the FW log file, consumed by the sysfs fwlog entry */
 812	memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
 813	wl->fwlog_size += len;
 814
 815	return len;
 816}
 817
 818static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
 819{
 820	u32 end_of_log = 0;
 821	int error;
 822
 823	if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
 824		return;
 825
 826	wl1271_info("Reading FW panic log");
 827
 828	/*
 829	 * Make sure the chip is awake and the logger isn't active.
 830	 * Do not send a stop fwlog command if the fw is hanged or if
 831	 * dbgpins are used (due to some fw bug).
 832	 */
 833	error = pm_runtime_resume_and_get(wl->dev);
 834	if (error < 0)
 
 835		return;
 
 836	if (!wl->watchdog_recovery &&
 837	    wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
 838		wl12xx_cmd_stop_fwlog(wl);
 839
 840	/* Traverse the memory blocks linked list */
 841	do {
 842		end_of_log = wlcore_event_fw_logger(wl);
 843		if (end_of_log == 0) {
 844			msleep(100);
 845			end_of_log = wlcore_event_fw_logger(wl);
 846		}
 847	} while (end_of_log != 0);
 848}
 849
 850static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 851				   u8 hlid, struct ieee80211_sta *sta)
 852{
 853	struct wl1271_station *wl_sta;
 854	u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
 855
 856	wl_sta = (void *)sta->drv_priv;
 857	wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
 858
 859	/*
 860	 * increment the initial seq number on recovery to account for
 861	 * transmitted packets that we haven't yet got in the FW status
 862	 */
 863	if (wlvif->encryption_type == KEY_GEM)
 864		sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
 865
 866	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
 867		wl_sta->total_freed_pkts += sqn_recovery_padding;
 868}
 869
 870static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
 871					struct wl12xx_vif *wlvif,
 872					u8 hlid, const u8 *addr)
 873{
 874	struct ieee80211_sta *sta;
 875	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
 876
 877	if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
 878		    is_zero_ether_addr(addr)))
 879		return;
 880
 881	rcu_read_lock();
 882	sta = ieee80211_find_sta(vif, addr);
 883	if (sta)
 884		wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
 885	rcu_read_unlock();
 886}
 887
 888static void wlcore_print_recovery(struct wl1271 *wl)
 889{
 890	u32 pc = 0;
 891	u32 hint_sts = 0;
 892	int ret;
 893
 894	wl1271_info("Hardware recovery in progress. FW ver: %s",
 895		    wl->chip.fw_ver_str);
 896
 897	/* change partitions momentarily so we can read the FW pc */
 898	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
 899	if (ret < 0)
 900		return;
 901
 902	ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
 903	if (ret < 0)
 904		return;
 905
 906	ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
 907	if (ret < 0)
 908		return;
 909
 910	wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
 911				pc, hint_sts, ++wl->recovery_count);
 912
 913	wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
 914}
 915
 916
 917static void wl1271_recovery_work(struct work_struct *work)
 918{
 919	struct wl1271 *wl =
 920		container_of(work, struct wl1271, recovery_work);
 921	struct wl12xx_vif *wlvif;
 922	struct ieee80211_vif *vif;
 923	int error;
 924
 925	mutex_lock(&wl->mutex);
 926
 927	if (wl->state == WLCORE_STATE_OFF || wl->plt)
 928		goto out_unlock;
 929
 930	error = pm_runtime_resume_and_get(wl->dev);
 931	if (error < 0)
 932		wl1271_warning("Enable for recovery failed");
 
 
 933	wlcore_disable_interrupts_nosync(wl);
 934
 935	if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
 936		if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
 937			wl12xx_read_fwlog_panic(wl);
 938		wlcore_print_recovery(wl);
 939	}
 940
 941	BUG_ON(wl->conf.recovery.bug_on_recovery &&
 942	       !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
 943
 944	clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
 945
 946	if (wl->conf.recovery.no_recovery) {
 947		wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
 948		goto out_unlock;
 949	}
 950
 951	/* Prevent spurious TX during FW restart */
 952	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
 953
 954	/* reboot the chipset */
 955	while (!list_empty(&wl->wlvif_list)) {
 956		wlvif = list_first_entry(&wl->wlvif_list,
 957				       struct wl12xx_vif, list);
 958		vif = wl12xx_wlvif_to_vif(wlvif);
 959
 960		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
 961		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
 962			wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
 963						    vif->bss_conf.bssid);
 964		}
 965
 966		__wl1271_op_remove_interface(wl, vif, false);
 967	}
 968
 969	wlcore_op_stop_locked(wl);
 970	pm_runtime_mark_last_busy(wl->dev);
 971	pm_runtime_put_autosuspend(wl->dev);
 972
 973	ieee80211_restart_hw(wl->hw);
 974
 975	/*
 976	 * Its safe to enable TX now - the queues are stopped after a request
 977	 * to restart the HW.
 978	 */
 979	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
 980
 981out_unlock:
 982	wl->watchdog_recovery = false;
 983	clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
 984	mutex_unlock(&wl->mutex);
 985}
 986
 987static int wlcore_fw_wakeup(struct wl1271 *wl)
 988{
 989	return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
 990}
 991
 992static int wl1271_setup(struct wl1271 *wl)
 993{
 994	wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
 995	if (!wl->raw_fw_status)
 996		goto err;
 997
 998	wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
 999	if (!wl->fw_status)
1000		goto err;
1001
1002	wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1003	if (!wl->tx_res_if)
1004		goto err;
1005
1006	return 0;
1007err:
1008	kfree(wl->fw_status);
1009	kfree(wl->raw_fw_status);
1010	return -ENOMEM;
1011}
1012
1013static int wl12xx_set_power_on(struct wl1271 *wl)
1014{
1015	int ret;
1016
1017	msleep(WL1271_PRE_POWER_ON_SLEEP);
1018	ret = wl1271_power_on(wl);
1019	if (ret < 0)
1020		goto out;
1021	msleep(WL1271_POWER_ON_SLEEP);
1022	wl1271_io_reset(wl);
1023	wl1271_io_init(wl);
1024
1025	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1026	if (ret < 0)
1027		goto fail;
1028
1029	/* ELP module wake up */
1030	ret = wlcore_fw_wakeup(wl);
1031	if (ret < 0)
1032		goto fail;
1033
1034out:
1035	return ret;
1036
1037fail:
1038	wl1271_power_off(wl);
1039	return ret;
1040}
1041
1042static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1043{
1044	int ret = 0;
1045
1046	ret = wl12xx_set_power_on(wl);
1047	if (ret < 0)
1048		goto out;
1049
1050	/*
1051	 * For wl127x based devices we could use the default block
1052	 * size (512 bytes), but due to a bug in the sdio driver, we
1053	 * need to set it explicitly after the chip is powered on.  To
1054	 * simplify the code and since the performance impact is
1055	 * negligible, we use the same block size for all different
1056	 * chip types.
1057	 *
1058	 * Check if the bus supports blocksize alignment and, if it
1059	 * doesn't, make sure we don't have the quirk.
1060	 */
1061	if (!wl1271_set_block_size(wl))
1062		wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1063
1064	/* TODO: make sure the lower driver has set things up correctly */
1065
1066	ret = wl1271_setup(wl);
1067	if (ret < 0)
1068		goto out;
1069
1070	ret = wl12xx_fetch_firmware(wl, plt);
1071	if (ret < 0) {
1072		kfree(wl->fw_status);
1073		kfree(wl->raw_fw_status);
1074		kfree(wl->tx_res_if);
1075	}
1076
1077out:
1078	return ret;
1079}
1080
1081int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1082{
1083	int retries = WL1271_BOOT_RETRIES;
1084	struct wiphy *wiphy = wl->hw->wiphy;
1085
1086	static const char* const PLT_MODE[] = {
1087		"PLT_OFF",
1088		"PLT_ON",
1089		"PLT_FEM_DETECT",
1090		"PLT_CHIP_AWAKE"
1091	};
1092
1093	int ret;
1094
1095	mutex_lock(&wl->mutex);
1096
1097	wl1271_notice("power up");
1098
1099	if (wl->state != WLCORE_STATE_OFF) {
1100		wl1271_error("cannot go into PLT state because not "
1101			     "in off state: %d", wl->state);
1102		ret = -EBUSY;
1103		goto out;
1104	}
1105
1106	/* Indicate to lower levels that we are now in PLT mode */
1107	wl->plt = true;
1108	wl->plt_mode = plt_mode;
1109
1110	while (retries) {
1111		retries--;
1112		ret = wl12xx_chip_wakeup(wl, true);
1113		if (ret < 0)
1114			goto power_off;
1115
1116		if (plt_mode != PLT_CHIP_AWAKE) {
1117			ret = wl->ops->plt_init(wl);
1118			if (ret < 0)
1119				goto power_off;
1120		}
1121
1122		wl->state = WLCORE_STATE_ON;
1123		wl1271_notice("firmware booted in PLT mode %s (%s)",
1124			      PLT_MODE[plt_mode],
1125			      wl->chip.fw_ver_str);
1126
1127		/* update hw/fw version info in wiphy struct */
1128		wiphy->hw_version = wl->chip.id;
1129		strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1130			sizeof(wiphy->fw_version));
1131
1132		goto out;
1133
1134power_off:
1135		wl1271_power_off(wl);
1136	}
1137
1138	wl->plt = false;
1139	wl->plt_mode = PLT_OFF;
1140
1141	wl1271_error("firmware boot in PLT mode failed despite %d retries",
1142		     WL1271_BOOT_RETRIES);
1143out:
1144	mutex_unlock(&wl->mutex);
1145
1146	return ret;
1147}
1148
1149int wl1271_plt_stop(struct wl1271 *wl)
1150{
1151	int ret = 0;
1152
1153	wl1271_notice("power down");
1154
1155	/*
1156	 * Interrupts must be disabled before setting the state to OFF.
1157	 * Otherwise, the interrupt handler might be called and exit without
1158	 * reading the interrupt status.
1159	 */
1160	wlcore_disable_interrupts(wl);
1161	mutex_lock(&wl->mutex);
1162	if (!wl->plt) {
1163		mutex_unlock(&wl->mutex);
1164
1165		/*
1166		 * This will not necessarily enable interrupts as interrupts
1167		 * may have been disabled when op_stop was called. It will,
1168		 * however, balance the above call to disable_interrupts().
1169		 */
1170		wlcore_enable_interrupts(wl);
1171
1172		wl1271_error("cannot power down because not in PLT "
1173			     "state: %d", wl->state);
1174		ret = -EBUSY;
1175		goto out;
1176	}
1177
1178	mutex_unlock(&wl->mutex);
1179
1180	wl1271_flush_deferred_work(wl);
1181	cancel_work_sync(&wl->netstack_work);
1182	cancel_work_sync(&wl->recovery_work);
1183	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1184
1185	mutex_lock(&wl->mutex);
1186	wl1271_power_off(wl);
1187	wl->flags = 0;
1188	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1189	wl->state = WLCORE_STATE_OFF;
1190	wl->plt = false;
1191	wl->plt_mode = PLT_OFF;
1192	wl->rx_counter = 0;
1193	mutex_unlock(&wl->mutex);
1194
1195out:
1196	return ret;
1197}
1198
1199static void wl1271_op_tx(struct ieee80211_hw *hw,
1200			 struct ieee80211_tx_control *control,
1201			 struct sk_buff *skb)
1202{
1203	struct wl1271 *wl = hw->priv;
1204	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1205	struct ieee80211_vif *vif = info->control.vif;
1206	struct wl12xx_vif *wlvif = NULL;
1207	unsigned long flags;
1208	int q, mapping;
1209	u8 hlid;
1210
1211	if (!vif) {
1212		wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1213		ieee80211_free_txskb(hw, skb);
1214		return;
1215	}
1216
1217	wlvif = wl12xx_vif_to_data(vif);
1218	mapping = skb_get_queue_mapping(skb);
1219	q = wl1271_tx_get_queue(mapping);
1220
1221	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1222
1223	spin_lock_irqsave(&wl->wl_lock, flags);
1224
1225	/*
1226	 * drop the packet if the link is invalid or the queue is stopped
1227	 * for any reason but watermark. Watermark is a "soft"-stop so we
1228	 * allow these packets through.
1229	 */
1230	if (hlid == WL12XX_INVALID_LINK_ID ||
1231	    (!test_bit(hlid, wlvif->links_map)) ||
1232	     (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1233	      !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1234			WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1235		wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1236		ieee80211_free_txskb(hw, skb);
1237		goto out;
1238	}
1239
1240	wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1241		     hlid, q, skb->len);
1242	skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1243
1244	wl->tx_queue_count[q]++;
1245	wlvif->tx_queue_count[q]++;
1246
1247	/*
1248	 * The workqueue is slow to process the tx_queue and we need stop
1249	 * the queue here, otherwise the queue will get too long.
1250	 */
1251	if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1252	    !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1253					WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1254		wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1255		wlcore_stop_queue_locked(wl, wlvif, q,
1256					 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1257	}
1258
1259	/*
1260	 * The chip specific setup must run before the first TX packet -
1261	 * before that, the tx_work will not be initialized!
1262	 */
1263
1264	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1265	    !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1266		ieee80211_queue_work(wl->hw, &wl->tx_work);
1267
1268out:
1269	spin_unlock_irqrestore(&wl->wl_lock, flags);
1270}
1271
1272int wl1271_tx_dummy_packet(struct wl1271 *wl)
1273{
1274	unsigned long flags;
1275	int q;
1276
1277	/* no need to queue a new dummy packet if one is already pending */
1278	if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1279		return 0;
1280
1281	q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1282
1283	spin_lock_irqsave(&wl->wl_lock, flags);
1284	set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1285	wl->tx_queue_count[q]++;
1286	spin_unlock_irqrestore(&wl->wl_lock, flags);
1287
1288	/* The FW is low on RX memory blocks, so send the dummy packet asap */
1289	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1290		return wlcore_tx_work_locked(wl);
1291
1292	/*
1293	 * If the FW TX is busy, TX work will be scheduled by the threaded
1294	 * interrupt handler function
1295	 */
1296	return 0;
1297}
1298
1299/*
1300 * The size of the dummy packet should be at least 1400 bytes. However, in
1301 * order to minimize the number of bus transactions, aligning it to 512 bytes
1302 * boundaries could be beneficial, performance wise
1303 */
1304#define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1305
1306static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1307{
1308	struct sk_buff *skb;
1309	struct ieee80211_hdr_3addr *hdr;
1310	unsigned int dummy_packet_size;
1311
1312	dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1313			    sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1314
1315	skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1316	if (!skb) {
1317		wl1271_warning("Failed to allocate a dummy packet skb");
1318		return NULL;
1319	}
1320
1321	skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1322
1323	hdr = skb_put_zero(skb, sizeof(*hdr));
1324	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1325					 IEEE80211_STYPE_NULLFUNC |
1326					 IEEE80211_FCTL_TODS);
1327
1328	skb_put_zero(skb, dummy_packet_size);
1329
1330	/* Dummy packets require the TID to be management */
1331	skb->priority = WL1271_TID_MGMT;
1332
1333	/* Initialize all fields that might be used */
1334	skb_set_queue_mapping(skb, 0);
1335	memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1336
1337	return skb;
1338}
1339
1340
1341static int
1342wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1343{
1344	int num_fields = 0, in_field = 0, fields_size = 0;
1345	int i, pattern_len = 0;
1346
1347	if (!p->mask) {
1348		wl1271_warning("No mask in WoWLAN pattern");
1349		return -EINVAL;
1350	}
1351
1352	/*
1353	 * The pattern is broken up into segments of bytes at different offsets
1354	 * that need to be checked by the FW filter. Each segment is called
1355	 * a field in the FW API. We verify that the total number of fields
1356	 * required for this pattern won't exceed FW limits (8)
1357	 * as well as the total fields buffer won't exceed the FW limit.
1358	 * Note that if there's a pattern which crosses Ethernet/IP header
1359	 * boundary a new field is required.
1360	 */
1361	for (i = 0; i < p->pattern_len; i++) {
1362		if (test_bit(i, (unsigned long *)p->mask)) {
1363			if (!in_field) {
1364				in_field = 1;
1365				pattern_len = 1;
1366			} else {
1367				if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1368					num_fields++;
1369					fields_size += pattern_len +
1370						RX_FILTER_FIELD_OVERHEAD;
1371					pattern_len = 1;
1372				} else
1373					pattern_len++;
1374			}
1375		} else {
1376			if (in_field) {
1377				in_field = 0;
1378				fields_size += pattern_len +
1379					RX_FILTER_FIELD_OVERHEAD;
1380				num_fields++;
1381			}
1382		}
1383	}
1384
1385	if (in_field) {
1386		fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1387		num_fields++;
1388	}
1389
1390	if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1391		wl1271_warning("RX Filter too complex. Too many segments");
1392		return -EINVAL;
1393	}
1394
1395	if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1396		wl1271_warning("RX filter pattern is too big");
1397		return -E2BIG;
1398	}
1399
1400	return 0;
1401}
1402
1403struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1404{
1405	return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1406}
1407
1408void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1409{
1410	int i;
1411
1412	if (filter == NULL)
1413		return;
1414
1415	for (i = 0; i < filter->num_fields; i++)
1416		kfree(filter->fields[i].pattern);
1417
1418	kfree(filter);
1419}
1420
1421int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1422				 u16 offset, u8 flags,
1423				 const u8 *pattern, u8 len)
1424{
1425	struct wl12xx_rx_filter_field *field;
1426
1427	if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1428		wl1271_warning("Max fields per RX filter. can't alloc another");
1429		return -EINVAL;
1430	}
1431
1432	field = &filter->fields[filter->num_fields];
1433
1434	field->pattern = kmemdup(pattern, len, GFP_KERNEL);
1435	if (!field->pattern) {
1436		wl1271_warning("Failed to allocate RX filter pattern");
1437		return -ENOMEM;
1438	}
1439
1440	filter->num_fields++;
1441
1442	field->offset = cpu_to_le16(offset);
1443	field->flags = flags;
1444	field->len = len;
1445
1446	return 0;
1447}
1448
1449int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1450{
1451	int i, fields_size = 0;
1452
1453	for (i = 0; i < filter->num_fields; i++)
1454		fields_size += filter->fields[i].len +
1455			sizeof(struct wl12xx_rx_filter_field) -
1456			sizeof(u8 *);
1457
1458	return fields_size;
1459}
1460
1461void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1462				    u8 *buf)
1463{
1464	int i;
1465	struct wl12xx_rx_filter_field *field;
1466
1467	for (i = 0; i < filter->num_fields; i++) {
1468		field = (struct wl12xx_rx_filter_field *)buf;
1469
1470		field->offset = filter->fields[i].offset;
1471		field->flags = filter->fields[i].flags;
1472		field->len = filter->fields[i].len;
1473
1474		memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1475		buf += sizeof(struct wl12xx_rx_filter_field) -
1476			sizeof(u8 *) + field->len;
1477	}
1478}
1479
1480/*
1481 * Allocates an RX filter returned through f
1482 * which needs to be freed using rx_filter_free()
1483 */
1484static int
1485wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1486					   struct wl12xx_rx_filter **f)
1487{
1488	int i, j, ret = 0;
1489	struct wl12xx_rx_filter *filter;
1490	u16 offset;
1491	u8 flags, len;
1492
1493	filter = wl1271_rx_filter_alloc();
1494	if (!filter) {
1495		wl1271_warning("Failed to alloc rx filter");
1496		ret = -ENOMEM;
1497		goto err;
1498	}
1499
1500	i = 0;
1501	while (i < p->pattern_len) {
1502		if (!test_bit(i, (unsigned long *)p->mask)) {
1503			i++;
1504			continue;
1505		}
1506
1507		for (j = i; j < p->pattern_len; j++) {
1508			if (!test_bit(j, (unsigned long *)p->mask))
1509				break;
1510
1511			if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1512			    j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1513				break;
1514		}
1515
1516		if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1517			offset = i;
1518			flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1519		} else {
1520			offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1521			flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1522		}
1523
1524		len = j - i;
1525
1526		ret = wl1271_rx_filter_alloc_field(filter,
1527						   offset,
1528						   flags,
1529						   &p->pattern[i], len);
1530		if (ret)
1531			goto err;
1532
1533		i = j;
1534	}
1535
1536	filter->action = FILTER_SIGNAL;
1537
1538	*f = filter;
1539	return 0;
1540
1541err:
1542	wl1271_rx_filter_free(filter);
1543	*f = NULL;
1544
1545	return ret;
1546}
1547
1548static int wl1271_configure_wowlan(struct wl1271 *wl,
1549				   struct cfg80211_wowlan *wow)
1550{
1551	int i, ret;
1552
1553	if (!wow || wow->any || !wow->n_patterns) {
1554		ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1555							  FILTER_SIGNAL);
1556		if (ret)
1557			goto out;
1558
1559		ret = wl1271_rx_filter_clear_all(wl);
1560		if (ret)
1561			goto out;
1562
1563		return 0;
1564	}
1565
1566	if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1567		return -EINVAL;
1568
1569	/* Validate all incoming patterns before clearing current FW state */
1570	for (i = 0; i < wow->n_patterns; i++) {
1571		ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1572		if (ret) {
1573			wl1271_warning("Bad wowlan pattern %d", i);
1574			return ret;
1575		}
1576	}
1577
1578	ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1579	if (ret)
1580		goto out;
1581
1582	ret = wl1271_rx_filter_clear_all(wl);
1583	if (ret)
1584		goto out;
1585
1586	/* Translate WoWLAN patterns into filters */
1587	for (i = 0; i < wow->n_patterns; i++) {
1588		struct cfg80211_pkt_pattern *p;
1589		struct wl12xx_rx_filter *filter = NULL;
1590
1591		p = &wow->patterns[i];
1592
1593		ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1594		if (ret) {
1595			wl1271_warning("Failed to create an RX filter from "
1596				       "wowlan pattern %d", i);
1597			goto out;
1598		}
1599
1600		ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1601
1602		wl1271_rx_filter_free(filter);
1603		if (ret)
1604			goto out;
1605	}
1606
1607	ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1608
1609out:
1610	return ret;
1611}
1612
1613static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1614					struct wl12xx_vif *wlvif,
1615					struct cfg80211_wowlan *wow)
1616{
1617	int ret = 0;
1618
1619	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1620		goto out;
1621
1622	ret = wl1271_configure_wowlan(wl, wow);
1623	if (ret < 0)
1624		goto out;
1625
1626	if ((wl->conf.conn.suspend_wake_up_event ==
1627	     wl->conf.conn.wake_up_event) &&
1628	    (wl->conf.conn.suspend_listen_interval ==
1629	     wl->conf.conn.listen_interval))
1630		goto out;
1631
1632	ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1633				    wl->conf.conn.suspend_wake_up_event,
1634				    wl->conf.conn.suspend_listen_interval);
1635
1636	if (ret < 0)
1637		wl1271_error("suspend: set wake up conditions failed: %d", ret);
1638out:
1639	return ret;
1640
1641}
1642
1643static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1644					struct wl12xx_vif *wlvif,
1645					struct cfg80211_wowlan *wow)
1646{
1647	int ret = 0;
1648
1649	if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1650		goto out;
1651
1652	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1653	if (ret < 0)
1654		goto out;
1655
1656	ret = wl1271_configure_wowlan(wl, wow);
1657	if (ret < 0)
1658		goto out;
1659
1660out:
1661	return ret;
1662
1663}
1664
1665static int wl1271_configure_suspend(struct wl1271 *wl,
1666				    struct wl12xx_vif *wlvif,
1667				    struct cfg80211_wowlan *wow)
1668{
1669	if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1670		return wl1271_configure_suspend_sta(wl, wlvif, wow);
1671	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1672		return wl1271_configure_suspend_ap(wl, wlvif, wow);
1673	return 0;
1674}
1675
1676static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1677{
1678	int ret = 0;
1679	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1680	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1681
1682	if ((!is_ap) && (!is_sta))
1683		return;
1684
1685	if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1686	    (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1687		return;
1688
1689	wl1271_configure_wowlan(wl, NULL);
1690
1691	if (is_sta) {
1692		if ((wl->conf.conn.suspend_wake_up_event ==
1693		     wl->conf.conn.wake_up_event) &&
1694		    (wl->conf.conn.suspend_listen_interval ==
1695		     wl->conf.conn.listen_interval))
1696			return;
1697
1698		ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1699				    wl->conf.conn.wake_up_event,
1700				    wl->conf.conn.listen_interval);
1701
1702		if (ret < 0)
1703			wl1271_error("resume: wake up conditions failed: %d",
1704				     ret);
1705
1706	} else if (is_ap) {
1707		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1708	}
1709}
1710
1711static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
1712					    struct cfg80211_wowlan *wow)
1713{
1714	struct wl1271 *wl = hw->priv;
1715	struct wl12xx_vif *wlvif;
1716	unsigned long flags;
1717	int ret;
1718
1719	wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1720	WARN_ON(!wow);
1721
1722	/* we want to perform the recovery before suspending */
1723	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1724		wl1271_warning("postponing suspend to perform recovery");
1725		return -EBUSY;
1726	}
1727
1728	wl1271_tx_flush(wl);
1729
1730	mutex_lock(&wl->mutex);
1731
1732	ret = pm_runtime_resume_and_get(wl->dev);
1733	if (ret < 0) {
 
1734		mutex_unlock(&wl->mutex);
1735		return ret;
1736	}
1737
1738	wl->wow_enabled = true;
1739	wl12xx_for_each_wlvif(wl, wlvif) {
1740		if (wlcore_is_p2p_mgmt(wlvif))
1741			continue;
1742
1743		ret = wl1271_configure_suspend(wl, wlvif, wow);
1744		if (ret < 0) {
1745			goto out_sleep;
1746		}
1747	}
1748
1749	/* disable fast link flow control notifications from FW */
1750	ret = wlcore_hw_interrupt_notify(wl, false);
1751	if (ret < 0)
1752		goto out_sleep;
1753
1754	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1755	ret = wlcore_hw_rx_ba_filter(wl,
1756				     !!wl->conf.conn.suspend_rx_ba_activity);
1757	if (ret < 0)
1758		goto out_sleep;
1759
1760out_sleep:
1761	pm_runtime_put_noidle(wl->dev);
1762	mutex_unlock(&wl->mutex);
1763
1764	if (ret < 0) {
1765		wl1271_warning("couldn't prepare device to suspend");
1766		return ret;
1767	}
1768
1769	/* flush any remaining work */
1770	wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1771
1772	flush_work(&wl->tx_work);
1773
1774	/*
1775	 * Cancel the watchdog even if above tx_flush failed. We will detect
1776	 * it on resume anyway.
1777	 */
1778	cancel_delayed_work(&wl->tx_watchdog_work);
1779
1780	/*
1781	 * set suspended flag to avoid triggering a new threaded_irq
1782	 * work.
1783	 */
1784	spin_lock_irqsave(&wl->wl_lock, flags);
1785	set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1786	spin_unlock_irqrestore(&wl->wl_lock, flags);
1787
1788	return pm_runtime_force_suspend(wl->dev);
1789}
1790
1791static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
1792{
1793	struct wl1271 *wl = hw->priv;
1794	struct wl12xx_vif *wlvif;
1795	unsigned long flags;
1796	bool run_irq_work = false, pending_recovery;
1797	int ret;
1798
1799	wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1800		     wl->wow_enabled);
1801	WARN_ON(!wl->wow_enabled);
1802
1803	ret = pm_runtime_force_resume(wl->dev);
1804	if (ret < 0) {
1805		wl1271_error("ELP wakeup failure!");
1806		goto out_sleep;
1807	}
1808
1809	/*
1810	 * re-enable irq_work enqueuing, and call irq_work directly if
1811	 * there is a pending work.
1812	 */
1813	spin_lock_irqsave(&wl->wl_lock, flags);
1814	clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1815	if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1816		run_irq_work = true;
1817	spin_unlock_irqrestore(&wl->wl_lock, flags);
1818
1819	mutex_lock(&wl->mutex);
1820
1821	/* test the recovery flag before calling any SDIO functions */
1822	pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1823				    &wl->flags);
1824
1825	if (run_irq_work) {
1826		wl1271_debug(DEBUG_MAC80211,
1827			     "run postponed irq_work directly");
1828
1829		/* don't talk to the HW if recovery is pending */
1830		if (!pending_recovery) {
1831			ret = wlcore_irq_locked(wl);
1832			if (ret)
1833				wl12xx_queue_recovery_work(wl);
1834		}
1835
1836		wlcore_enable_interrupts(wl);
1837	}
1838
1839	if (pending_recovery) {
1840		wl1271_warning("queuing forgotten recovery on resume");
1841		ieee80211_queue_work(wl->hw, &wl->recovery_work);
1842		goto out_sleep;
1843	}
1844
1845	ret = pm_runtime_resume_and_get(wl->dev);
1846	if (ret < 0)
 
1847		goto out;
 
1848
1849	wl12xx_for_each_wlvif(wl, wlvif) {
1850		if (wlcore_is_p2p_mgmt(wlvif))
1851			continue;
1852
1853		wl1271_configure_resume(wl, wlvif);
1854	}
1855
1856	ret = wlcore_hw_interrupt_notify(wl, true);
1857	if (ret < 0)
1858		goto out_sleep;
1859
1860	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1861	ret = wlcore_hw_rx_ba_filter(wl, false);
1862	if (ret < 0)
1863		goto out_sleep;
1864
1865out_sleep:
1866	pm_runtime_mark_last_busy(wl->dev);
1867	pm_runtime_put_autosuspend(wl->dev);
1868
1869out:
1870	wl->wow_enabled = false;
1871
1872	/*
1873	 * Set a flag to re-init the watchdog on the first Tx after resume.
1874	 * That way we avoid possible conditions where Tx-complete interrupts
1875	 * fail to arrive and we perform a spurious recovery.
1876	 */
1877	set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1878	mutex_unlock(&wl->mutex);
1879
1880	return 0;
1881}
1882
1883static int wl1271_op_start(struct ieee80211_hw *hw)
1884{
1885	wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1886
1887	/*
1888	 * We have to delay the booting of the hardware because
1889	 * we need to know the local MAC address before downloading and
1890	 * initializing the firmware. The MAC address cannot be changed
1891	 * after boot, and without the proper MAC address, the firmware
1892	 * will not function properly.
1893	 *
1894	 * The MAC address is first known when the corresponding interface
1895	 * is added. That is where we will initialize the hardware.
1896	 */
1897
1898	return 0;
1899}
1900
1901static void wlcore_op_stop_locked(struct wl1271 *wl)
1902{
1903	int i;
1904
1905	if (wl->state == WLCORE_STATE_OFF) {
1906		if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1907					&wl->flags))
1908			wlcore_enable_interrupts(wl);
1909
1910		return;
1911	}
1912
1913	/*
1914	 * this must be before the cancel_work calls below, so that the work
1915	 * functions don't perform further work.
1916	 */
1917	wl->state = WLCORE_STATE_OFF;
1918
1919	/*
1920	 * Use the nosync variant to disable interrupts, so the mutex could be
1921	 * held while doing so without deadlocking.
1922	 */
1923	wlcore_disable_interrupts_nosync(wl);
1924
1925	mutex_unlock(&wl->mutex);
1926
1927	wlcore_synchronize_interrupts(wl);
1928	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1929		cancel_work_sync(&wl->recovery_work);
1930	wl1271_flush_deferred_work(wl);
1931	cancel_delayed_work_sync(&wl->scan_complete_work);
1932	cancel_work_sync(&wl->netstack_work);
1933	cancel_work_sync(&wl->tx_work);
1934	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1935
1936	/* let's notify MAC80211 about the remaining pending TX frames */
1937	mutex_lock(&wl->mutex);
1938	wl12xx_tx_reset(wl);
1939
1940	wl1271_power_off(wl);
1941	/*
1942	 * In case a recovery was scheduled, interrupts were disabled to avoid
1943	 * an interrupt storm. Now that the power is down, it is safe to
1944	 * re-enable interrupts to balance the disable depth
1945	 */
1946	if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1947		wlcore_enable_interrupts(wl);
1948
1949	wl->band = NL80211_BAND_2GHZ;
1950
1951	wl->rx_counter = 0;
1952	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1953	wl->channel_type = NL80211_CHAN_NO_HT;
1954	wl->tx_blocks_available = 0;
1955	wl->tx_allocated_blocks = 0;
1956	wl->tx_results_count = 0;
1957	wl->tx_packets_count = 0;
1958	wl->time_offset = 0;
1959	wl->ap_fw_ps_map = 0;
1960	wl->ap_ps_map = 0;
1961	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1962	memset(wl->roles_map, 0, sizeof(wl->roles_map));
1963	memset(wl->links_map, 0, sizeof(wl->links_map));
1964	memset(wl->roc_map, 0, sizeof(wl->roc_map));
1965	memset(wl->session_ids, 0, sizeof(wl->session_ids));
1966	memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1967	wl->active_sta_count = 0;
1968	wl->active_link_count = 0;
1969
1970	/* The system link is always allocated */
1971	wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1972	wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1973	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1974
1975	/*
1976	 * this is performed after the cancel_work calls and the associated
1977	 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1978	 * get executed before all these vars have been reset.
1979	 */
1980	wl->flags = 0;
1981
1982	wl->tx_blocks_freed = 0;
1983
1984	for (i = 0; i < NUM_TX_QUEUES; i++) {
1985		wl->tx_pkts_freed[i] = 0;
1986		wl->tx_allocated_pkts[i] = 0;
1987	}
1988
1989	wl1271_debugfs_reset(wl);
1990
1991	kfree(wl->raw_fw_status);
1992	wl->raw_fw_status = NULL;
1993	kfree(wl->fw_status);
1994	wl->fw_status = NULL;
1995	kfree(wl->tx_res_if);
1996	wl->tx_res_if = NULL;
1997	kfree(wl->target_mem_map);
1998	wl->target_mem_map = NULL;
1999
2000	/*
2001	 * FW channels must be re-calibrated after recovery,
2002	 * save current Reg-Domain channel configuration and clear it.
2003	 */
2004	memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2005	       sizeof(wl->reg_ch_conf_pending));
2006	memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2007}
2008
2009static void wlcore_op_stop(struct ieee80211_hw *hw)
2010{
2011	struct wl1271 *wl = hw->priv;
2012
2013	wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2014
2015	mutex_lock(&wl->mutex);
2016
2017	wlcore_op_stop_locked(wl);
2018
2019	mutex_unlock(&wl->mutex);
2020}
2021
2022static void wlcore_channel_switch_work(struct work_struct *work)
2023{
2024	struct delayed_work *dwork;
2025	struct wl1271 *wl;
2026	struct ieee80211_vif *vif;
2027	struct wl12xx_vif *wlvif;
2028	int ret;
2029
2030	dwork = to_delayed_work(work);
2031	wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2032	wl = wlvif->wl;
2033
2034	wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2035
2036	mutex_lock(&wl->mutex);
2037
2038	if (unlikely(wl->state != WLCORE_STATE_ON))
2039		goto out;
2040
2041	/* check the channel switch is still ongoing */
2042	if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2043		goto out;
2044
2045	vif = wl12xx_wlvif_to_vif(wlvif);
2046	ieee80211_chswitch_done(vif, false);
2047
2048	ret = pm_runtime_resume_and_get(wl->dev);
2049	if (ret < 0)
 
2050		goto out;
 
2051
2052	wl12xx_cmd_stop_channel_switch(wl, wlvif);
2053
2054	pm_runtime_mark_last_busy(wl->dev);
2055	pm_runtime_put_autosuspend(wl->dev);
2056out:
2057	mutex_unlock(&wl->mutex);
2058}
2059
2060static void wlcore_connection_loss_work(struct work_struct *work)
2061{
2062	struct delayed_work *dwork;
2063	struct wl1271 *wl;
2064	struct ieee80211_vif *vif;
2065	struct wl12xx_vif *wlvif;
2066
2067	dwork = to_delayed_work(work);
2068	wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2069	wl = wlvif->wl;
2070
2071	wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2072
2073	mutex_lock(&wl->mutex);
2074
2075	if (unlikely(wl->state != WLCORE_STATE_ON))
2076		goto out;
2077
2078	/* Call mac80211 connection loss */
2079	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2080		goto out;
2081
2082	vif = wl12xx_wlvif_to_vif(wlvif);
2083	ieee80211_connection_loss(vif);
2084out:
2085	mutex_unlock(&wl->mutex);
2086}
2087
2088static void wlcore_pending_auth_complete_work(struct work_struct *work)
2089{
2090	struct delayed_work *dwork;
2091	struct wl1271 *wl;
2092	struct wl12xx_vif *wlvif;
2093	unsigned long time_spare;
2094	int ret;
2095
2096	dwork = to_delayed_work(work);
2097	wlvif = container_of(dwork, struct wl12xx_vif,
2098			     pending_auth_complete_work);
2099	wl = wlvif->wl;
2100
2101	mutex_lock(&wl->mutex);
2102
2103	if (unlikely(wl->state != WLCORE_STATE_ON))
2104		goto out;
2105
2106	/*
2107	 * Make sure a second really passed since the last auth reply. Maybe
2108	 * a second auth reply arrived while we were stuck on the mutex.
2109	 * Check for a little less than the timeout to protect from scheduler
2110	 * irregularities.
2111	 */
2112	time_spare = jiffies +
2113			msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2114	if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2115		goto out;
2116
2117	ret = pm_runtime_resume_and_get(wl->dev);
2118	if (ret < 0)
 
2119		goto out;
 
2120
2121	/* cancel the ROC if active */
2122	wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2123
2124	pm_runtime_mark_last_busy(wl->dev);
2125	pm_runtime_put_autosuspend(wl->dev);
2126out:
2127	mutex_unlock(&wl->mutex);
2128}
2129
2130static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2131{
2132	u8 policy = find_first_zero_bit(wl->rate_policies_map,
2133					WL12XX_MAX_RATE_POLICIES);
2134	if (policy >= WL12XX_MAX_RATE_POLICIES)
2135		return -EBUSY;
2136
2137	__set_bit(policy, wl->rate_policies_map);
2138	*idx = policy;
2139	return 0;
2140}
2141
2142static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2143{
2144	if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2145		return;
2146
2147	__clear_bit(*idx, wl->rate_policies_map);
2148	*idx = WL12XX_MAX_RATE_POLICIES;
2149}
2150
2151static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2152{
2153	u8 policy = find_first_zero_bit(wl->klv_templates_map,
2154					WLCORE_MAX_KLV_TEMPLATES);
2155	if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2156		return -EBUSY;
2157
2158	__set_bit(policy, wl->klv_templates_map);
2159	*idx = policy;
2160	return 0;
2161}
2162
2163static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2164{
2165	if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2166		return;
2167
2168	__clear_bit(*idx, wl->klv_templates_map);
2169	*idx = WLCORE_MAX_KLV_TEMPLATES;
2170}
2171
2172static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2173{
2174	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2175
2176	switch (wlvif->bss_type) {
2177	case BSS_TYPE_AP_BSS:
2178		if (wlvif->p2p)
2179			return WL1271_ROLE_P2P_GO;
2180		else if (ieee80211_vif_is_mesh(vif))
2181			return WL1271_ROLE_MESH_POINT;
2182		else
2183			return WL1271_ROLE_AP;
2184
2185	case BSS_TYPE_STA_BSS:
2186		if (wlvif->p2p)
2187			return WL1271_ROLE_P2P_CL;
2188		else
2189			return WL1271_ROLE_STA;
2190
2191	case BSS_TYPE_IBSS:
2192		return WL1271_ROLE_IBSS;
2193
2194	default:
2195		wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2196	}
2197	return WL12XX_INVALID_ROLE_TYPE;
2198}
2199
2200static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2201{
2202	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2203	int i;
2204
2205	/* clear everything but the persistent data */
2206	memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2207
2208	switch (ieee80211_vif_type_p2p(vif)) {
2209	case NL80211_IFTYPE_P2P_CLIENT:
2210		wlvif->p2p = 1;
2211		fallthrough;
2212	case NL80211_IFTYPE_STATION:
2213	case NL80211_IFTYPE_P2P_DEVICE:
2214		wlvif->bss_type = BSS_TYPE_STA_BSS;
2215		break;
2216	case NL80211_IFTYPE_ADHOC:
2217		wlvif->bss_type = BSS_TYPE_IBSS;
2218		break;
2219	case NL80211_IFTYPE_P2P_GO:
2220		wlvif->p2p = 1;
2221		fallthrough;
2222	case NL80211_IFTYPE_AP:
2223	case NL80211_IFTYPE_MESH_POINT:
2224		wlvif->bss_type = BSS_TYPE_AP_BSS;
2225		break;
2226	default:
2227		wlvif->bss_type = MAX_BSS_TYPE;
2228		return -EOPNOTSUPP;
2229	}
2230
2231	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2232	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2233	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2234
2235	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2236	    wlvif->bss_type == BSS_TYPE_IBSS) {
2237		/* init sta/ibss data */
2238		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2239		wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2240		wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2241		wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2242		wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2243		wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2244		wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2245		wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2246	} else {
2247		/* init ap data */
2248		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2249		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2250		wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2251		wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2252		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2253			wl12xx_allocate_rate_policy(wl,
2254						&wlvif->ap.ucast_rate_idx[i]);
2255		wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2256		/*
2257		 * TODO: check if basic_rate shouldn't be
2258		 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2259		 * instead (the same thing for STA above).
2260		*/
2261		wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2262		/* TODO: this seems to be used only for STA, check it */
2263		wlvif->rate_set = CONF_TX_ENABLED_RATES;
2264	}
2265
2266	wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2267	wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2268	wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2269
2270	/*
2271	 * mac80211 configures some values globally, while we treat them
2272	 * per-interface. thus, on init, we have to copy them from wl
2273	 */
2274	wlvif->band = wl->band;
2275	wlvif->channel = wl->channel;
2276	wlvif->power_level = wl->power_level;
2277	wlvif->channel_type = wl->channel_type;
2278
2279	INIT_WORK(&wlvif->rx_streaming_enable_work,
2280		  wl1271_rx_streaming_enable_work);
2281	INIT_WORK(&wlvif->rx_streaming_disable_work,
2282		  wl1271_rx_streaming_disable_work);
2283	INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2284	INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2285			  wlcore_channel_switch_work);
2286	INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2287			  wlcore_connection_loss_work);
2288	INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2289			  wlcore_pending_auth_complete_work);
2290	INIT_LIST_HEAD(&wlvif->list);
2291
2292	timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
2293	return 0;
2294}
2295
2296static int wl12xx_init_fw(struct wl1271 *wl)
2297{
2298	int retries = WL1271_BOOT_RETRIES;
2299	bool booted = false;
2300	struct wiphy *wiphy = wl->hw->wiphy;
2301	int ret;
2302
2303	while (retries) {
2304		retries--;
2305		ret = wl12xx_chip_wakeup(wl, false);
2306		if (ret < 0)
2307			goto power_off;
2308
2309		ret = wl->ops->boot(wl);
2310		if (ret < 0)
2311			goto power_off;
2312
2313		ret = wl1271_hw_init(wl);
2314		if (ret < 0)
2315			goto irq_disable;
2316
2317		booted = true;
2318		break;
2319
2320irq_disable:
2321		mutex_unlock(&wl->mutex);
2322		/* Unlocking the mutex in the middle of handling is
2323		   inherently unsafe. In this case we deem it safe to do,
2324		   because we need to let any possibly pending IRQ out of
2325		   the system (and while we are WLCORE_STATE_OFF the IRQ
2326		   work function will not do anything.) Also, any other
2327		   possible concurrent operations will fail due to the
2328		   current state, hence the wl1271 struct should be safe. */
2329		wlcore_disable_interrupts(wl);
2330		wl1271_flush_deferred_work(wl);
2331		cancel_work_sync(&wl->netstack_work);
2332		mutex_lock(&wl->mutex);
2333power_off:
2334		wl1271_power_off(wl);
2335	}
2336
2337	if (!booted) {
2338		wl1271_error("firmware boot failed despite %d retries",
2339			     WL1271_BOOT_RETRIES);
2340		goto out;
2341	}
2342
2343	wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2344
2345	/* update hw/fw version info in wiphy struct */
2346	wiphy->hw_version = wl->chip.id;
2347	strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2348		sizeof(wiphy->fw_version));
2349
2350	/*
2351	 * Now we know if 11a is supported (info from the NVS), so disable
2352	 * 11a channels if not supported
2353	 */
2354	if (!wl->enable_11a)
2355		wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2356
2357	wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2358		     wl->enable_11a ? "" : "not ");
2359
2360	wl->state = WLCORE_STATE_ON;
2361out:
2362	return ret;
2363}
2364
2365static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2366{
2367	return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2368}
2369
2370/*
2371 * Check whether a fw switch (i.e. moving from one loaded
2372 * fw to another) is needed. This function is also responsible
2373 * for updating wl->last_vif_count, so it must be called before
2374 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2375 * will be used).
2376 */
2377static bool wl12xx_need_fw_change(struct wl1271 *wl,
2378				  struct vif_counter_data vif_counter_data,
2379				  bool add)
2380{
2381	enum wl12xx_fw_type current_fw = wl->fw_type;
2382	u8 vif_count = vif_counter_data.counter;
2383
2384	if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2385		return false;
2386
2387	/* increase the vif count if this is a new vif */
2388	if (add && !vif_counter_data.cur_vif_running)
2389		vif_count++;
2390
2391	wl->last_vif_count = vif_count;
2392
2393	/* no need for fw change if the device is OFF */
2394	if (wl->state == WLCORE_STATE_OFF)
2395		return false;
2396
2397	/* no need for fw change if a single fw is used */
2398	if (!wl->mr_fw_name)
2399		return false;
2400
2401	if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2402		return true;
2403	if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2404		return true;
2405
2406	return false;
2407}
2408
2409/*
2410 * Enter "forced psm". Make sure the sta is in psm against the ap,
2411 * to make the fw switch a bit more disconnection-persistent.
2412 */
2413static void wl12xx_force_active_psm(struct wl1271 *wl)
2414{
2415	struct wl12xx_vif *wlvif;
2416
2417	wl12xx_for_each_wlvif_sta(wl, wlvif) {
2418		wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2419	}
2420}
2421
2422struct wlcore_hw_queue_iter_data {
2423	unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2424	/* current vif */
2425	struct ieee80211_vif *vif;
2426	/* is the current vif among those iterated */
2427	bool cur_running;
2428};
2429
2430static void wlcore_hw_queue_iter(void *data, u8 *mac,
2431				 struct ieee80211_vif *vif)
2432{
2433	struct wlcore_hw_queue_iter_data *iter_data = data;
2434
2435	if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2436	    WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2437		return;
2438
2439	if (iter_data->cur_running || vif == iter_data->vif) {
2440		iter_data->cur_running = true;
2441		return;
2442	}
2443
2444	__set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2445}
2446
2447static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2448					 struct wl12xx_vif *wlvif)
2449{
2450	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2451	struct wlcore_hw_queue_iter_data iter_data = {};
2452	int i, q_base;
2453
2454	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2455		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2456		return 0;
2457	}
2458
2459	iter_data.vif = vif;
2460
2461	/* mark all bits taken by active interfaces */
2462	ieee80211_iterate_active_interfaces_atomic(wl->hw,
2463					IEEE80211_IFACE_ITER_RESUME_ALL,
2464					wlcore_hw_queue_iter, &iter_data);
2465
2466	/* the current vif is already running in mac80211 (resume/recovery) */
2467	if (iter_data.cur_running) {
2468		wlvif->hw_queue_base = vif->hw_queue[0];
2469		wl1271_debug(DEBUG_MAC80211,
2470			     "using pre-allocated hw queue base %d",
2471			     wlvif->hw_queue_base);
2472
2473		/* interface type might have changed type */
2474		goto adjust_cab_queue;
2475	}
2476
2477	q_base = find_first_zero_bit(iter_data.hw_queue_map,
2478				     WLCORE_NUM_MAC_ADDRESSES);
2479	if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2480		return -EBUSY;
2481
2482	wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2483	wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2484		     wlvif->hw_queue_base);
2485
2486	for (i = 0; i < NUM_TX_QUEUES; i++) {
2487		wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2488		/* register hw queues in mac80211 */
2489		vif->hw_queue[i] = wlvif->hw_queue_base + i;
2490	}
2491
2492adjust_cab_queue:
2493	/* the last places are reserved for cab queues per interface */
2494	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2495		vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2496				 wlvif->hw_queue_base / NUM_TX_QUEUES;
2497	else
2498		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2499
2500	return 0;
2501}
2502
2503static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2504				   struct ieee80211_vif *vif)
2505{
2506	struct wl1271 *wl = hw->priv;
2507	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2508	struct vif_counter_data vif_count;
2509	int ret = 0;
2510	u8 role_type;
2511
2512	if (wl->plt) {
2513		wl1271_error("Adding Interface not allowed while in PLT mode");
2514		return -EBUSY;
2515	}
2516
2517	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2518			     IEEE80211_VIF_SUPPORTS_UAPSD |
2519			     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2520
2521	wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2522		     ieee80211_vif_type_p2p(vif), vif->addr);
2523
2524	wl12xx_get_vif_count(hw, vif, &vif_count);
2525
2526	mutex_lock(&wl->mutex);
2527
2528	/*
2529	 * in some very corner case HW recovery scenarios its possible to
2530	 * get here before __wl1271_op_remove_interface is complete, so
2531	 * opt out if that is the case.
2532	 */
2533	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2534	    test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2535		ret = -EBUSY;
2536		goto out;
2537	}
2538
2539
2540	ret = wl12xx_init_vif_data(wl, vif);
2541	if (ret < 0)
2542		goto out;
2543
2544	wlvif->wl = wl;
2545	role_type = wl12xx_get_role_type(wl, wlvif);
2546	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2547		ret = -EINVAL;
2548		goto out;
2549	}
2550
2551	ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2552	if (ret < 0)
2553		goto out;
2554
2555	/*
2556	 * TODO: after the nvs issue will be solved, move this block
2557	 * to start(), and make sure here the driver is ON.
2558	 */
2559	if (wl->state == WLCORE_STATE_OFF) {
2560		/*
2561		 * we still need this in order to configure the fw
2562		 * while uploading the nvs
2563		 */
2564		memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2565
2566		ret = wl12xx_init_fw(wl);
2567		if (ret < 0)
2568			goto out;
2569	}
2570
2571	/*
2572	 * Call runtime PM only after possible wl12xx_init_fw() above
2573	 * is done. Otherwise we do not have interrupts enabled.
2574	 */
2575	ret = pm_runtime_resume_and_get(wl->dev);
2576	if (ret < 0)
 
2577		goto out_unlock;
 
2578
2579	if (wl12xx_need_fw_change(wl, vif_count, true)) {
2580		wl12xx_force_active_psm(wl);
2581		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2582		mutex_unlock(&wl->mutex);
2583		wl1271_recovery_work(&wl->recovery_work);
2584		return 0;
2585	}
2586
2587	if (!wlcore_is_p2p_mgmt(wlvif)) {
2588		ret = wl12xx_cmd_role_enable(wl, vif->addr,
2589					     role_type, &wlvif->role_id);
2590		if (ret < 0)
2591			goto out;
2592
2593		ret = wl1271_init_vif_specific(wl, vif);
2594		if (ret < 0)
2595			goto out;
2596
2597	} else {
2598		ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2599					     &wlvif->dev_role_id);
2600		if (ret < 0)
2601			goto out;
2602
2603		/* needed mainly for configuring rate policies */
2604		ret = wl1271_sta_hw_init(wl, wlvif);
2605		if (ret < 0)
2606			goto out;
2607	}
2608
2609	list_add(&wlvif->list, &wl->wlvif_list);
2610	set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2611
2612	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2613		wl->ap_count++;
2614	else
2615		wl->sta_count++;
2616out:
2617	pm_runtime_mark_last_busy(wl->dev);
2618	pm_runtime_put_autosuspend(wl->dev);
2619out_unlock:
2620	mutex_unlock(&wl->mutex);
2621
2622	return ret;
2623}
2624
2625static void __wl1271_op_remove_interface(struct wl1271 *wl,
2626					 struct ieee80211_vif *vif,
2627					 bool reset_tx_queues)
2628{
2629	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2630	int i, ret;
2631	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2632
2633	wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2634
2635	if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2636		return;
2637
2638	/* because of hardware recovery, we may get here twice */
2639	if (wl->state == WLCORE_STATE_OFF)
2640		return;
2641
2642	wl1271_info("down");
2643
2644	if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2645	    wl->scan_wlvif == wlvif) {
2646		struct cfg80211_scan_info info = {
2647			.aborted = true,
2648		};
2649
2650		/*
2651		 * Rearm the tx watchdog just before idling scan. This
2652		 * prevents just-finished scans from triggering the watchdog
2653		 */
2654		wl12xx_rearm_tx_watchdog_locked(wl);
2655
2656		wl->scan.state = WL1271_SCAN_STATE_IDLE;
2657		memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2658		wl->scan_wlvif = NULL;
2659		wl->scan.req = NULL;
2660		ieee80211_scan_completed(wl->hw, &info);
2661	}
2662
2663	if (wl->sched_vif == wlvif)
2664		wl->sched_vif = NULL;
2665
2666	if (wl->roc_vif == vif) {
2667		wl->roc_vif = NULL;
2668		ieee80211_remain_on_channel_expired(wl->hw);
2669	}
2670
2671	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2672		/* disable active roles */
2673		ret = pm_runtime_resume_and_get(wl->dev);
2674		if (ret < 0)
 
2675			goto deinit;
 
2676
2677		if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2678		    wlvif->bss_type == BSS_TYPE_IBSS) {
2679			if (wl12xx_dev_role_started(wlvif))
2680				wl12xx_stop_dev(wl, wlvif);
2681		}
2682
2683		if (!wlcore_is_p2p_mgmt(wlvif)) {
2684			ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2685			if (ret < 0) {
2686				pm_runtime_put_noidle(wl->dev);
2687				goto deinit;
2688			}
2689		} else {
2690			ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2691			if (ret < 0) {
2692				pm_runtime_put_noidle(wl->dev);
2693				goto deinit;
2694			}
2695		}
2696
2697		pm_runtime_mark_last_busy(wl->dev);
2698		pm_runtime_put_autosuspend(wl->dev);
2699	}
2700deinit:
2701	wl12xx_tx_reset_wlvif(wl, wlvif);
2702
2703	/* clear all hlids (except system_hlid) */
2704	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2705
2706	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2707	    wlvif->bss_type == BSS_TYPE_IBSS) {
2708		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2709		wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2710		wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2711		wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2712		wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2713	} else {
2714		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2715		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2716		wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2717		wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2718		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2719			wl12xx_free_rate_policy(wl,
2720						&wlvif->ap.ucast_rate_idx[i]);
2721		wl1271_free_ap_keys(wl, wlvif);
2722	}
2723
2724	dev_kfree_skb(wlvif->probereq);
2725	wlvif->probereq = NULL;
2726	if (wl->last_wlvif == wlvif)
2727		wl->last_wlvif = NULL;
2728	list_del(&wlvif->list);
2729	memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2730	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2731	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2732
2733	if (is_ap)
2734		wl->ap_count--;
2735	else
2736		wl->sta_count--;
2737
2738	/*
2739	 * Last AP, have more stations. Configure sleep auth according to STA.
2740	 * Don't do thin on unintended recovery.
2741	 */
2742	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2743	    !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2744		goto unlock;
2745
2746	if (wl->ap_count == 0 && is_ap) {
2747		/* mask ap events */
2748		wl->event_mask &= ~wl->ap_event_mask;
2749		wl1271_event_unmask(wl);
2750	}
2751
2752	if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2753		u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2754		/* Configure for power according to debugfs */
2755		if (sta_auth != WL1271_PSM_ILLEGAL)
2756			wl1271_acx_sleep_auth(wl, sta_auth);
2757		/* Configure for ELP power saving */
2758		else
2759			wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2760	}
2761
2762unlock:
2763	mutex_unlock(&wl->mutex);
2764
2765	del_timer_sync(&wlvif->rx_streaming_timer);
2766	cancel_work_sync(&wlvif->rx_streaming_enable_work);
2767	cancel_work_sync(&wlvif->rx_streaming_disable_work);
2768	cancel_work_sync(&wlvif->rc_update_work);
2769	cancel_delayed_work_sync(&wlvif->connection_loss_work);
2770	cancel_delayed_work_sync(&wlvif->channel_switch_work);
2771	cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2772
2773	mutex_lock(&wl->mutex);
2774}
2775
2776static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2777				       struct ieee80211_vif *vif)
2778{
2779	struct wl1271 *wl = hw->priv;
2780	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2781	struct wl12xx_vif *iter;
2782	struct vif_counter_data vif_count;
2783
2784	wl12xx_get_vif_count(hw, vif, &vif_count);
2785	mutex_lock(&wl->mutex);
2786
2787	if (wl->state == WLCORE_STATE_OFF ||
2788	    !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2789		goto out;
2790
2791	/*
2792	 * wl->vif can be null here if someone shuts down the interface
2793	 * just when hardware recovery has been started.
2794	 */
2795	wl12xx_for_each_wlvif(wl, iter) {
2796		if (iter != wlvif)
2797			continue;
2798
2799		__wl1271_op_remove_interface(wl, vif, true);
2800		break;
2801	}
2802	WARN_ON(iter != wlvif);
2803	if (wl12xx_need_fw_change(wl, vif_count, false)) {
2804		wl12xx_force_active_psm(wl);
2805		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2806		wl12xx_queue_recovery_work(wl);
2807	}
2808out:
2809	mutex_unlock(&wl->mutex);
2810}
2811
2812static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2813				      struct ieee80211_vif *vif,
2814				      enum nl80211_iftype new_type, bool p2p)
2815{
2816	struct wl1271 *wl = hw->priv;
2817	int ret;
2818
2819	set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2820	wl1271_op_remove_interface(hw, vif);
2821
2822	vif->type = new_type;
2823	vif->p2p = p2p;
2824	ret = wl1271_op_add_interface(hw, vif);
2825
2826	clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2827	return ret;
2828}
2829
2830static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2831{
2832	int ret;
2833	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2834
2835	/*
2836	 * One of the side effects of the JOIN command is that is clears
2837	 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2838	 * to a WPA/WPA2 access point will therefore kill the data-path.
2839	 * Currently the only valid scenario for JOIN during association
2840	 * is on roaming, in which case we will also be given new keys.
2841	 * Keep the below message for now, unless it starts bothering
2842	 * users who really like to roam a lot :)
2843	 */
2844	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2845		wl1271_info("JOIN while associated.");
2846
2847	/* clear encryption type */
2848	wlvif->encryption_type = KEY_NONE;
2849
2850	if (is_ibss)
2851		ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2852	else
 
 
 
 
 
 
 
 
 
 
 
 
2853		ret = wl12xx_cmd_role_start_sta(wl, wlvif);
 
2854
2855	return ret;
2856}
2857
2858static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2859			    int offset)
2860{
2861	u8 ssid_len;
2862	const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2863					 skb->len - offset);
2864
2865	if (!ptr) {
2866		wl1271_error("No SSID in IEs!");
2867		return -ENOENT;
2868	}
2869
2870	ssid_len = ptr[1];
2871	if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2872		wl1271_error("SSID is too long!");
2873		return -EINVAL;
2874	}
2875
2876	wlvif->ssid_len = ssid_len;
2877	memcpy(wlvif->ssid, ptr+2, ssid_len);
2878	return 0;
2879}
2880
2881static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2882{
2883	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2884	struct sk_buff *skb;
2885	int ieoffset;
2886
2887	/* we currently only support setting the ssid from the ap probe req */
2888	if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2889		return -EINVAL;
2890
2891	skb = ieee80211_ap_probereq_get(wl->hw, vif);
2892	if (!skb)
2893		return -EINVAL;
2894
2895	ieoffset = offsetof(struct ieee80211_mgmt,
2896			    u.probe_req.variable);
2897	wl1271_ssid_set(wlvif, skb, ieoffset);
2898	dev_kfree_skb(skb);
2899
2900	return 0;
2901}
2902
2903static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2904			    struct ieee80211_bss_conf *bss_conf,
2905			    u32 sta_rate_set)
2906{
2907	struct ieee80211_vif *vif = container_of(bss_conf, struct ieee80211_vif,
2908						 bss_conf);
2909	int ieoffset;
2910	int ret;
2911
2912	wlvif->aid = vif->cfg.aid;
2913	wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2914	wlvif->beacon_int = bss_conf->beacon_int;
2915	wlvif->wmm_enabled = bss_conf->qos;
2916
2917	set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2918
2919	/*
2920	 * with wl1271, we don't need to update the
2921	 * beacon_int and dtim_period, because the firmware
2922	 * updates it by itself when the first beacon is
2923	 * received after a join.
2924	 */
2925	ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2926	if (ret < 0)
2927		return ret;
2928
2929	/*
2930	 * Get a template for hardware connection maintenance
2931	 */
2932	dev_kfree_skb(wlvif->probereq);
2933	wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2934							wlvif,
2935							NULL);
2936	ieoffset = offsetof(struct ieee80211_mgmt,
2937			    u.probe_req.variable);
2938	wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2939
2940	/* enable the connection monitoring feature */
2941	ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2942	if (ret < 0)
2943		return ret;
2944
2945	/*
2946	 * The join command disable the keep-alive mode, shut down its process,
2947	 * and also clear the template config, so we need to reset it all after
2948	 * the join. The acx_aid starts the keep-alive process, and the order
2949	 * of the commands below is relevant.
2950	 */
2951	ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2952	if (ret < 0)
2953		return ret;
2954
2955	ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2956	if (ret < 0)
2957		return ret;
2958
2959	ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2960	if (ret < 0)
2961		return ret;
2962
2963	ret = wl1271_acx_keep_alive_config(wl, wlvif,
2964					   wlvif->sta.klv_template_id,
2965					   ACX_KEEP_ALIVE_TPL_VALID);
2966	if (ret < 0)
2967		return ret;
2968
2969	/*
2970	 * The default fw psm configuration is AUTO, while mac80211 default
2971	 * setting is off (ACTIVE), so sync the fw with the correct value.
2972	 */
2973	ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2974	if (ret < 0)
2975		return ret;
2976
2977	if (sta_rate_set) {
2978		wlvif->rate_set =
2979			wl1271_tx_enabled_rates_get(wl,
2980						    sta_rate_set,
2981						    wlvif->band);
2982		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2983		if (ret < 0)
2984			return ret;
2985	}
2986
2987	return ret;
2988}
2989
2990static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2991{
2992	int ret;
2993	bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2994
2995	/* make sure we are connected (sta) joined */
2996	if (sta &&
2997	    !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2998		return false;
2999
3000	/* make sure we are joined (ibss) */
3001	if (!sta &&
3002	    test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3003		return false;
3004
3005	if (sta) {
3006		/* use defaults when not associated */
3007		wlvif->aid = 0;
3008
3009		/* free probe-request template */
3010		dev_kfree_skb(wlvif->probereq);
3011		wlvif->probereq = NULL;
3012
3013		/* disable connection monitor features */
3014		ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3015		if (ret < 0)
3016			return ret;
3017
3018		/* Disable the keep-alive feature */
3019		ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3020		if (ret < 0)
3021			return ret;
3022
3023		/* disable beacon filtering */
3024		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3025		if (ret < 0)
3026			return ret;
3027	}
3028
3029	if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3030		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3031
3032		wl12xx_cmd_stop_channel_switch(wl, wlvif);
3033		ieee80211_chswitch_done(vif, false);
3034		cancel_delayed_work(&wlvif->channel_switch_work);
3035	}
3036
3037	/* invalidate keep-alive template */
3038	wl1271_acx_keep_alive_config(wl, wlvif,
3039				     wlvif->sta.klv_template_id,
3040				     ACX_KEEP_ALIVE_TPL_INVALID);
3041
3042	return 0;
3043}
3044
3045static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3046{
3047	wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3048	wlvif->rate_set = wlvif->basic_rate_set;
3049}
3050
3051static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3052				   bool idle)
3053{
3054	bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3055
3056	if (idle == cur_idle)
3057		return;
3058
3059	if (idle) {
3060		clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3061	} else {
3062		/* The current firmware only supports sched_scan in idle */
3063		if (wl->sched_vif == wlvif)
3064			wl->ops->sched_scan_stop(wl, wlvif);
3065
3066		set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3067	}
3068}
3069
3070static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3071			     struct ieee80211_conf *conf, u32 changed)
3072{
3073	int ret;
3074
3075	if (wlcore_is_p2p_mgmt(wlvif))
3076		return 0;
3077
3078	if (conf->power_level != wlvif->power_level) {
3079		ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3080		if (ret < 0)
3081			return ret;
3082
3083		wlvif->power_level = conf->power_level;
3084	}
3085
3086	return 0;
3087}
3088
3089static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3090{
3091	struct wl1271 *wl = hw->priv;
3092	struct wl12xx_vif *wlvif;
3093	struct ieee80211_conf *conf = &hw->conf;
3094	int ret = 0;
3095
3096	wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3097		     " changed 0x%x",
3098		     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3099		     conf->power_level,
3100		     conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3101			 changed);
3102
3103	mutex_lock(&wl->mutex);
3104
3105	if (changed & IEEE80211_CONF_CHANGE_POWER)
3106		wl->power_level = conf->power_level;
3107
3108	if (unlikely(wl->state != WLCORE_STATE_ON))
3109		goto out;
3110
3111	ret = pm_runtime_resume_and_get(wl->dev);
3112	if (ret < 0)
 
3113		goto out;
 
3114
3115	/* configure each interface */
3116	wl12xx_for_each_wlvif(wl, wlvif) {
3117		ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3118		if (ret < 0)
3119			goto out_sleep;
3120	}
3121
3122out_sleep:
3123	pm_runtime_mark_last_busy(wl->dev);
3124	pm_runtime_put_autosuspend(wl->dev);
3125
3126out:
3127	mutex_unlock(&wl->mutex);
3128
3129	return ret;
3130}
3131
3132struct wl1271_filter_params {
3133	bool enabled;
3134	int mc_list_length;
3135	u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3136};
3137
3138static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3139				       struct netdev_hw_addr_list *mc_list)
3140{
3141	struct wl1271_filter_params *fp;
3142	struct netdev_hw_addr *ha;
3143
3144	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3145	if (!fp) {
3146		wl1271_error("Out of memory setting filters.");
3147		return 0;
3148	}
3149
3150	/* update multicast filtering parameters */
3151	fp->mc_list_length = 0;
3152	if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3153		fp->enabled = false;
3154	} else {
3155		fp->enabled = true;
3156		netdev_hw_addr_list_for_each(ha, mc_list) {
3157			memcpy(fp->mc_list[fp->mc_list_length],
3158					ha->addr, ETH_ALEN);
3159			fp->mc_list_length++;
3160		}
3161	}
3162
3163	return (u64)(unsigned long)fp;
3164}
3165
3166#define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3167				  FIF_FCSFAIL | \
3168				  FIF_BCN_PRBRESP_PROMISC | \
3169				  FIF_CONTROL | \
3170				  FIF_OTHER_BSS)
3171
3172static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3173				       unsigned int changed,
3174				       unsigned int *total, u64 multicast)
3175{
3176	struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3177	struct wl1271 *wl = hw->priv;
3178	struct wl12xx_vif *wlvif;
3179
3180	int ret;
3181
3182	wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3183		     " total %x", changed, *total);
3184
3185	mutex_lock(&wl->mutex);
3186
3187	*total &= WL1271_SUPPORTED_FILTERS;
3188	changed &= WL1271_SUPPORTED_FILTERS;
3189
3190	if (unlikely(wl->state != WLCORE_STATE_ON))
3191		goto out;
3192
3193	ret = pm_runtime_resume_and_get(wl->dev);
3194	if (ret < 0)
 
3195		goto out;
 
3196
3197	wl12xx_for_each_wlvif(wl, wlvif) {
3198		if (wlcore_is_p2p_mgmt(wlvif))
3199			continue;
3200
3201		if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3202			if (*total & FIF_ALLMULTI)
3203				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3204								   false,
3205								   NULL, 0);
3206			else if (fp)
3207				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3208							fp->enabled,
3209							fp->mc_list,
3210							fp->mc_list_length);
3211			if (ret < 0)
3212				goto out_sleep;
3213		}
3214
3215		/*
3216		 * If interface in AP mode and created with allmulticast then disable
3217		 * the firmware filters so that all multicast packets are passed
3218		 * This is mandatory for MDNS based discovery protocols 
3219		 */
3220		if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
3221			if (*total & FIF_ALLMULTI) {
3222				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3223							false,
3224							NULL, 0);
3225				if (ret < 0)
3226					goto out_sleep;
3227			}
3228		}
3229	}
3230
3231	/*
3232	 * the fw doesn't provide an api to configure the filters. instead,
3233	 * the filters configuration is based on the active roles / ROC
3234	 * state.
3235	 */
3236
3237out_sleep:
3238	pm_runtime_mark_last_busy(wl->dev);
3239	pm_runtime_put_autosuspend(wl->dev);
3240
3241out:
3242	mutex_unlock(&wl->mutex);
3243	kfree(fp);
3244}
3245
3246static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3247				u8 id, u8 key_type, u8 key_size,
3248				const u8 *key, u8 hlid, u32 tx_seq_32,
3249				u16 tx_seq_16, bool is_pairwise)
3250{
3251	struct wl1271_ap_key *ap_key;
3252	int i;
3253
3254	wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3255
3256	if (key_size > MAX_KEY_SIZE)
3257		return -EINVAL;
3258
3259	/*
3260	 * Find next free entry in ap_keys. Also check we are not replacing
3261	 * an existing key.
3262	 */
3263	for (i = 0; i < MAX_NUM_KEYS; i++) {
3264		if (wlvif->ap.recorded_keys[i] == NULL)
3265			break;
3266
3267		if (wlvif->ap.recorded_keys[i]->id == id) {
3268			wl1271_warning("trying to record key replacement");
3269			return -EINVAL;
3270		}
3271	}
3272
3273	if (i == MAX_NUM_KEYS)
3274		return -EBUSY;
3275
3276	ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3277	if (!ap_key)
3278		return -ENOMEM;
3279
3280	ap_key->id = id;
3281	ap_key->key_type = key_type;
3282	ap_key->key_size = key_size;
3283	memcpy(ap_key->key, key, key_size);
3284	ap_key->hlid = hlid;
3285	ap_key->tx_seq_32 = tx_seq_32;
3286	ap_key->tx_seq_16 = tx_seq_16;
3287	ap_key->is_pairwise = is_pairwise;
3288
3289	wlvif->ap.recorded_keys[i] = ap_key;
3290	return 0;
3291}
3292
3293static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3294{
3295	int i;
3296
3297	for (i = 0; i < MAX_NUM_KEYS; i++) {
3298		kfree(wlvif->ap.recorded_keys[i]);
3299		wlvif->ap.recorded_keys[i] = NULL;
3300	}
3301}
3302
3303static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3304{
3305	int i, ret = 0;
3306	struct wl1271_ap_key *key;
3307	bool wep_key_added = false;
3308
3309	for (i = 0; i < MAX_NUM_KEYS; i++) {
3310		u8 hlid;
3311		if (wlvif->ap.recorded_keys[i] == NULL)
3312			break;
3313
3314		key = wlvif->ap.recorded_keys[i];
3315		hlid = key->hlid;
3316		if (hlid == WL12XX_INVALID_LINK_ID)
3317			hlid = wlvif->ap.bcast_hlid;
3318
3319		ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3320					    key->id, key->key_type,
3321					    key->key_size, key->key,
3322					    hlid, key->tx_seq_32,
3323					    key->tx_seq_16, key->is_pairwise);
3324		if (ret < 0)
3325			goto out;
3326
3327		if (key->key_type == KEY_WEP)
3328			wep_key_added = true;
3329	}
3330
3331	if (wep_key_added) {
3332		ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3333						     wlvif->ap.bcast_hlid);
3334		if (ret < 0)
3335			goto out;
3336	}
3337
3338out:
3339	wl1271_free_ap_keys(wl, wlvif);
3340	return ret;
3341}
3342
3343static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3344		       u16 action, u8 id, u8 key_type,
3345		       u8 key_size, const u8 *key, u32 tx_seq_32,
3346		       u16 tx_seq_16, struct ieee80211_sta *sta,
3347		       bool is_pairwise)
3348{
3349	int ret;
3350	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3351
3352	if (is_ap) {
3353		struct wl1271_station *wl_sta;
3354		u8 hlid;
3355
3356		if (sta) {
3357			wl_sta = (struct wl1271_station *)sta->drv_priv;
3358			hlid = wl_sta->hlid;
3359		} else {
3360			hlid = wlvif->ap.bcast_hlid;
3361		}
3362
3363		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3364			/*
3365			 * We do not support removing keys after AP shutdown.
3366			 * Pretend we do to make mac80211 happy.
3367			 */
3368			if (action != KEY_ADD_OR_REPLACE)
3369				return 0;
3370
3371			ret = wl1271_record_ap_key(wl, wlvif, id,
3372					     key_type, key_size,
3373					     key, hlid, tx_seq_32,
3374					     tx_seq_16, is_pairwise);
3375		} else {
3376			ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3377					     id, key_type, key_size,
3378					     key, hlid, tx_seq_32,
3379					     tx_seq_16, is_pairwise);
3380		}
3381
3382		if (ret < 0)
3383			return ret;
3384	} else {
3385		const u8 *addr;
3386		static const u8 bcast_addr[ETH_ALEN] = {
3387			0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3388		};
3389
3390		addr = sta ? sta->addr : bcast_addr;
3391
3392		if (is_zero_ether_addr(addr)) {
3393			/* We dont support TX only encryption */
3394			return -EOPNOTSUPP;
3395		}
3396
3397		/* The wl1271 does not allow to remove unicast keys - they
3398		   will be cleared automatically on next CMD_JOIN. Ignore the
3399		   request silently, as we dont want the mac80211 to emit
3400		   an error message. */
3401		if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3402			return 0;
3403
3404		/* don't remove key if hlid was already deleted */
3405		if (action == KEY_REMOVE &&
3406		    wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3407			return 0;
3408
3409		ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3410					     id, key_type, key_size,
3411					     key, addr, tx_seq_32,
3412					     tx_seq_16);
3413		if (ret < 0)
3414			return ret;
3415
3416	}
3417
3418	return 0;
3419}
3420
3421static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3422			     struct ieee80211_vif *vif,
3423			     struct ieee80211_sta *sta,
3424			     struct ieee80211_key_conf *key_conf)
3425{
3426	struct wl1271 *wl = hw->priv;
3427	int ret;
3428	bool might_change_spare =
3429		key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3430		key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3431
3432	if (might_change_spare) {
3433		/*
3434		 * stop the queues and flush to ensure the next packets are
3435		 * in sync with FW spare block accounting
3436		 */
3437		wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3438		wl1271_tx_flush(wl);
3439	}
3440
3441	mutex_lock(&wl->mutex);
3442
3443	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3444		ret = -EAGAIN;
3445		goto out_wake_queues;
3446	}
3447
3448	ret = pm_runtime_resume_and_get(wl->dev);
3449	if (ret < 0)
 
3450		goto out_wake_queues;
 
3451
3452	ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3453
3454	pm_runtime_mark_last_busy(wl->dev);
3455	pm_runtime_put_autosuspend(wl->dev);
3456
3457out_wake_queues:
3458	if (might_change_spare)
3459		wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3460
3461	mutex_unlock(&wl->mutex);
3462
3463	return ret;
3464}
3465
3466int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3467		   struct ieee80211_vif *vif,
3468		   struct ieee80211_sta *sta,
3469		   struct ieee80211_key_conf *key_conf)
3470{
3471	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3472	int ret;
3473	u32 tx_seq_32 = 0;
3474	u16 tx_seq_16 = 0;
3475	u8 key_type;
3476	u8 hlid;
3477	bool is_pairwise;
3478
3479	wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3480
3481	wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3482	wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3483		     key_conf->cipher, key_conf->keyidx,
3484		     key_conf->keylen, key_conf->flags);
3485	wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3486
3487	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3488		if (sta) {
3489			struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3490			hlid = wl_sta->hlid;
3491		} else {
3492			hlid = wlvif->ap.bcast_hlid;
3493		}
3494	else
3495		hlid = wlvif->sta.hlid;
3496
3497	if (hlid != WL12XX_INVALID_LINK_ID) {
3498		u64 tx_seq = wl->links[hlid].total_freed_pkts;
3499		tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3500		tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3501	}
3502
3503	switch (key_conf->cipher) {
3504	case WLAN_CIPHER_SUITE_WEP40:
3505	case WLAN_CIPHER_SUITE_WEP104:
3506		key_type = KEY_WEP;
3507
3508		key_conf->hw_key_idx = key_conf->keyidx;
3509		break;
3510	case WLAN_CIPHER_SUITE_TKIP:
3511		key_type = KEY_TKIP;
3512		key_conf->hw_key_idx = key_conf->keyidx;
3513		break;
3514	case WLAN_CIPHER_SUITE_CCMP:
3515		key_type = KEY_AES;
3516		key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3517		break;
3518	case WL1271_CIPHER_SUITE_GEM:
3519		key_type = KEY_GEM;
3520		break;
3521	default:
3522		wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3523
3524		return -EOPNOTSUPP;
3525	}
3526
3527	is_pairwise = key_conf->flags & IEEE80211_KEY_FLAG_PAIRWISE;
3528
3529	switch (cmd) {
3530	case SET_KEY:
3531		ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3532				 key_conf->keyidx, key_type,
3533				 key_conf->keylen, key_conf->key,
3534				 tx_seq_32, tx_seq_16, sta, is_pairwise);
3535		if (ret < 0) {
3536			wl1271_error("Could not add or replace key");
3537			return ret;
3538		}
3539
3540		/*
3541		 * reconfiguring arp response if the unicast (or common)
3542		 * encryption key type was changed
3543		 */
3544		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3545		    (sta || key_type == KEY_WEP) &&
3546		    wlvif->encryption_type != key_type) {
3547			wlvif->encryption_type = key_type;
3548			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3549			if (ret < 0) {
3550				wl1271_warning("build arp rsp failed: %d", ret);
3551				return ret;
3552			}
3553		}
3554		break;
3555
3556	case DISABLE_KEY:
3557		ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3558				     key_conf->keyidx, key_type,
3559				     key_conf->keylen, key_conf->key,
3560				     0, 0, sta, is_pairwise);
3561		if (ret < 0) {
3562			wl1271_error("Could not remove key");
3563			return ret;
3564		}
3565		break;
3566
3567	default:
3568		wl1271_error("Unsupported key cmd 0x%x", cmd);
3569		return -EOPNOTSUPP;
3570	}
3571
3572	return ret;
3573}
3574EXPORT_SYMBOL_GPL(wlcore_set_key);
3575
3576static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3577					  struct ieee80211_vif *vif,
3578					  int key_idx)
3579{
3580	struct wl1271 *wl = hw->priv;
3581	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3582	int ret;
3583
3584	wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3585		     key_idx);
3586
3587	/* we don't handle unsetting of default key */
3588	if (key_idx == -1)
3589		return;
3590
3591	mutex_lock(&wl->mutex);
3592
3593	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3594		ret = -EAGAIN;
3595		goto out_unlock;
3596	}
3597
3598	ret = pm_runtime_resume_and_get(wl->dev);
3599	if (ret < 0)
 
3600		goto out_unlock;
 
3601
3602	wlvif->default_key = key_idx;
3603
3604	/* the default WEP key needs to be configured at least once */
3605	if (wlvif->encryption_type == KEY_WEP) {
3606		ret = wl12xx_cmd_set_default_wep_key(wl,
3607				key_idx,
3608				wlvif->sta.hlid);
3609		if (ret < 0)
3610			goto out_sleep;
3611	}
3612
3613out_sleep:
3614	pm_runtime_mark_last_busy(wl->dev);
3615	pm_runtime_put_autosuspend(wl->dev);
3616
3617out_unlock:
3618	mutex_unlock(&wl->mutex);
3619}
3620
3621void wlcore_regdomain_config(struct wl1271 *wl)
3622{
3623	int ret;
3624
3625	if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3626		return;
3627
3628	mutex_lock(&wl->mutex);
3629
3630	if (unlikely(wl->state != WLCORE_STATE_ON))
3631		goto out;
3632
3633	ret = pm_runtime_resume_and_get(wl->dev);
3634	if (ret < 0)
 
3635		goto out;
 
3636
3637	ret = wlcore_cmd_regdomain_config_locked(wl);
3638	if (ret < 0) {
3639		wl12xx_queue_recovery_work(wl);
3640		goto out;
3641	}
3642
3643	pm_runtime_mark_last_busy(wl->dev);
3644	pm_runtime_put_autosuspend(wl->dev);
3645out:
3646	mutex_unlock(&wl->mutex);
3647}
3648
3649static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3650			     struct ieee80211_vif *vif,
3651			     struct ieee80211_scan_request *hw_req)
3652{
3653	struct cfg80211_scan_request *req = &hw_req->req;
3654	struct wl1271 *wl = hw->priv;
3655	int ret;
3656	u8 *ssid = NULL;
3657	size_t len = 0;
3658
3659	wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3660
3661	if (req->n_ssids) {
3662		ssid = req->ssids[0].ssid;
3663		len = req->ssids[0].ssid_len;
3664	}
3665
3666	mutex_lock(&wl->mutex);
3667
3668	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3669		/*
3670		 * We cannot return -EBUSY here because cfg80211 will expect
3671		 * a call to ieee80211_scan_completed if we do - in this case
3672		 * there won't be any call.
3673		 */
3674		ret = -EAGAIN;
3675		goto out;
3676	}
3677
3678	ret = pm_runtime_resume_and_get(wl->dev);
3679	if (ret < 0)
 
3680		goto out;
 
3681
3682	/* fail if there is any role in ROC */
3683	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3684		/* don't allow scanning right now */
3685		ret = -EBUSY;
3686		goto out_sleep;
3687	}
3688
3689	ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3690out_sleep:
3691	pm_runtime_mark_last_busy(wl->dev);
3692	pm_runtime_put_autosuspend(wl->dev);
3693out:
3694	mutex_unlock(&wl->mutex);
3695
3696	return ret;
3697}
3698
3699static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3700				     struct ieee80211_vif *vif)
3701{
3702	struct wl1271 *wl = hw->priv;
3703	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3704	struct cfg80211_scan_info info = {
3705		.aborted = true,
3706	};
3707	int ret;
3708
3709	wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3710
3711	mutex_lock(&wl->mutex);
3712
3713	if (unlikely(wl->state != WLCORE_STATE_ON))
3714		goto out;
3715
3716	if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3717		goto out;
3718
3719	ret = pm_runtime_resume_and_get(wl->dev);
3720	if (ret < 0)
 
3721		goto out;
 
3722
3723	if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3724		ret = wl->ops->scan_stop(wl, wlvif);
3725		if (ret < 0)
3726			goto out_sleep;
3727	}
3728
3729	/*
3730	 * Rearm the tx watchdog just before idling scan. This
3731	 * prevents just-finished scans from triggering the watchdog
3732	 */
3733	wl12xx_rearm_tx_watchdog_locked(wl);
3734
3735	wl->scan.state = WL1271_SCAN_STATE_IDLE;
3736	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3737	wl->scan_wlvif = NULL;
3738	wl->scan.req = NULL;
3739	ieee80211_scan_completed(wl->hw, &info);
3740
3741out_sleep:
3742	pm_runtime_mark_last_busy(wl->dev);
3743	pm_runtime_put_autosuspend(wl->dev);
3744out:
3745	mutex_unlock(&wl->mutex);
3746
3747	cancel_delayed_work_sync(&wl->scan_complete_work);
3748}
3749
3750static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3751				      struct ieee80211_vif *vif,
3752				      struct cfg80211_sched_scan_request *req,
3753				      struct ieee80211_scan_ies *ies)
3754{
3755	struct wl1271 *wl = hw->priv;
3756	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3757	int ret;
3758
3759	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3760
3761	mutex_lock(&wl->mutex);
3762
3763	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3764		ret = -EAGAIN;
3765		goto out;
3766	}
3767
3768	ret = pm_runtime_resume_and_get(wl->dev);
3769	if (ret < 0)
 
3770		goto out;
 
3771
3772	ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3773	if (ret < 0)
3774		goto out_sleep;
3775
3776	wl->sched_vif = wlvif;
3777
3778out_sleep:
3779	pm_runtime_mark_last_busy(wl->dev);
3780	pm_runtime_put_autosuspend(wl->dev);
3781out:
3782	mutex_unlock(&wl->mutex);
3783	return ret;
3784}
3785
3786static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3787				     struct ieee80211_vif *vif)
3788{
3789	struct wl1271 *wl = hw->priv;
3790	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3791	int ret;
3792
3793	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3794
3795	mutex_lock(&wl->mutex);
3796
3797	if (unlikely(wl->state != WLCORE_STATE_ON))
3798		goto out;
3799
3800	ret = pm_runtime_resume_and_get(wl->dev);
3801	if (ret < 0)
 
3802		goto out;
 
3803
3804	wl->ops->sched_scan_stop(wl, wlvif);
3805
3806	pm_runtime_mark_last_busy(wl->dev);
3807	pm_runtime_put_autosuspend(wl->dev);
3808out:
3809	mutex_unlock(&wl->mutex);
3810
3811	return 0;
3812}
3813
3814static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3815{
3816	struct wl1271 *wl = hw->priv;
3817	int ret = 0;
3818
3819	mutex_lock(&wl->mutex);
3820
3821	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3822		ret = -EAGAIN;
3823		goto out;
3824	}
3825
3826	ret = pm_runtime_resume_and_get(wl->dev);
3827	if (ret < 0)
 
3828		goto out;
 
3829
3830	ret = wl1271_acx_frag_threshold(wl, value);
3831	if (ret < 0)
3832		wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3833
3834	pm_runtime_mark_last_busy(wl->dev);
3835	pm_runtime_put_autosuspend(wl->dev);
3836
3837out:
3838	mutex_unlock(&wl->mutex);
3839
3840	return ret;
3841}
3842
3843static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3844{
3845	struct wl1271 *wl = hw->priv;
3846	struct wl12xx_vif *wlvif;
3847	int ret = 0;
3848
3849	mutex_lock(&wl->mutex);
3850
3851	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3852		ret = -EAGAIN;
3853		goto out;
3854	}
3855
3856	ret = pm_runtime_resume_and_get(wl->dev);
3857	if (ret < 0)
 
3858		goto out;
 
3859
3860	wl12xx_for_each_wlvif(wl, wlvif) {
3861		ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3862		if (ret < 0)
3863			wl1271_warning("set rts threshold failed: %d", ret);
3864	}
3865	pm_runtime_mark_last_busy(wl->dev);
3866	pm_runtime_put_autosuspend(wl->dev);
3867
3868out:
3869	mutex_unlock(&wl->mutex);
3870
3871	return ret;
3872}
3873
3874static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3875{
3876	int len;
3877	const u8 *next, *end = skb->data + skb->len;
3878	u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3879					skb->len - ieoffset);
3880	if (!ie)
3881		return;
3882	len = ie[1] + 2;
3883	next = ie + len;
3884	memmove(ie, next, end - next);
3885	skb_trim(skb, skb->len - len);
3886}
3887
3888static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3889					    unsigned int oui, u8 oui_type,
3890					    int ieoffset)
3891{
3892	int len;
3893	const u8 *next, *end = skb->data + skb->len;
3894	u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3895					       skb->data + ieoffset,
3896					       skb->len - ieoffset);
3897	if (!ie)
3898		return;
3899	len = ie[1] + 2;
3900	next = ie + len;
3901	memmove(ie, next, end - next);
3902	skb_trim(skb, skb->len - len);
3903}
3904
3905static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3906					 struct ieee80211_vif *vif)
3907{
3908	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3909	struct sk_buff *skb;
3910	int ret;
3911
3912	skb = ieee80211_proberesp_get(wl->hw, vif);
3913	if (!skb)
3914		return -EOPNOTSUPP;
3915
3916	ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3917				      CMD_TEMPL_AP_PROBE_RESPONSE,
3918				      skb->data,
3919				      skb->len, 0,
3920				      rates);
3921	dev_kfree_skb(skb);
3922
3923	if (ret < 0)
3924		goto out;
3925
3926	wl1271_debug(DEBUG_AP, "probe response updated");
3927	set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3928
3929out:
3930	return ret;
3931}
3932
3933static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3934					     struct ieee80211_vif *vif,
3935					     u8 *probe_rsp_data,
3936					     size_t probe_rsp_len,
3937					     u32 rates)
3938{
3939	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
 
3940	u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3941	int ssid_ie_offset, ie_offset, templ_len;
3942	const u8 *ptr;
3943
3944	/* no need to change probe response if the SSID is set correctly */
3945	if (wlvif->ssid_len > 0)
3946		return wl1271_cmd_template_set(wl, wlvif->role_id,
3947					       CMD_TEMPL_AP_PROBE_RESPONSE,
3948					       probe_rsp_data,
3949					       probe_rsp_len, 0,
3950					       rates);
3951
3952	if (probe_rsp_len + vif->cfg.ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3953		wl1271_error("probe_rsp template too big");
3954		return -EINVAL;
3955	}
3956
3957	/* start searching from IE offset */
3958	ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3959
3960	ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3961			       probe_rsp_len - ie_offset);
3962	if (!ptr) {
3963		wl1271_error("No SSID in beacon!");
3964		return -EINVAL;
3965	}
3966
3967	ssid_ie_offset = ptr - probe_rsp_data;
3968	ptr += (ptr[1] + 2);
3969
3970	memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3971
3972	/* insert SSID from bss_conf */
3973	probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3974	probe_rsp_templ[ssid_ie_offset + 1] = vif->cfg.ssid_len;
3975	memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3976	       vif->cfg.ssid, vif->cfg.ssid_len);
3977	templ_len = ssid_ie_offset + 2 + vif->cfg.ssid_len;
3978
3979	memcpy(probe_rsp_templ + ssid_ie_offset + 2 + vif->cfg.ssid_len,
3980	       ptr, probe_rsp_len - (ptr - probe_rsp_data));
3981	templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3982
3983	return wl1271_cmd_template_set(wl, wlvif->role_id,
3984				       CMD_TEMPL_AP_PROBE_RESPONSE,
3985				       probe_rsp_templ,
3986				       templ_len, 0,
3987				       rates);
3988}
3989
3990static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3991				       struct ieee80211_vif *vif,
3992				       struct ieee80211_bss_conf *bss_conf,
3993				       u32 changed)
3994{
3995	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3996	int ret = 0;
3997
3998	if (changed & BSS_CHANGED_ERP_SLOT) {
3999		if (bss_conf->use_short_slot)
4000			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
4001		else
4002			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
4003		if (ret < 0) {
4004			wl1271_warning("Set slot time failed %d", ret);
4005			goto out;
4006		}
4007	}
4008
4009	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
4010		if (bss_conf->use_short_preamble)
4011			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
4012		else
4013			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
4014	}
4015
4016	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
4017		if (bss_conf->use_cts_prot)
4018			ret = wl1271_acx_cts_protect(wl, wlvif,
4019						     CTSPROTECT_ENABLE);
4020		else
4021			ret = wl1271_acx_cts_protect(wl, wlvif,
4022						     CTSPROTECT_DISABLE);
4023		if (ret < 0) {
4024			wl1271_warning("Set ctsprotect failed %d", ret);
4025			goto out;
4026		}
4027	}
4028
4029out:
4030	return ret;
4031}
4032
4033static int wlcore_set_beacon_template(struct wl1271 *wl,
4034				      struct ieee80211_vif *vif,
4035				      bool is_ap)
4036{
4037	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4038	struct ieee80211_hdr *hdr;
4039	u32 min_rate;
4040	int ret;
4041	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4042	struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif, 0);
4043	u16 tmpl_id;
4044
4045	if (!beacon) {
4046		ret = -EINVAL;
4047		goto out;
4048	}
4049
4050	wl1271_debug(DEBUG_MASTER, "beacon updated");
4051
4052	ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4053	if (ret < 0) {
4054		dev_kfree_skb(beacon);
4055		goto out;
4056	}
4057	min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4058	tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4059		CMD_TEMPL_BEACON;
4060	ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4061				      beacon->data,
4062				      beacon->len, 0,
4063				      min_rate);
4064	if (ret < 0) {
4065		dev_kfree_skb(beacon);
4066		goto out;
4067	}
4068
4069	wlvif->wmm_enabled =
4070		cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4071					WLAN_OUI_TYPE_MICROSOFT_WMM,
4072					beacon->data + ieoffset,
4073					beacon->len - ieoffset);
4074
4075	/*
4076	 * In case we already have a probe-resp beacon set explicitly
4077	 * by usermode, don't use the beacon data.
4078	 */
4079	if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4080		goto end_bcn;
4081
4082	/* remove TIM ie from probe response */
4083	wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4084
4085	/*
4086	 * remove p2p ie from probe response.
4087	 * the fw reponds to probe requests that don't include
4088	 * the p2p ie. probe requests with p2p ie will be passed,
4089	 * and will be responded by the supplicant (the spec
4090	 * forbids including the p2p ie when responding to probe
4091	 * requests that didn't include it).
4092	 */
4093	wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4094				WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4095
4096	hdr = (struct ieee80211_hdr *) beacon->data;
4097	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4098					 IEEE80211_STYPE_PROBE_RESP);
4099	if (is_ap)
4100		ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4101							   beacon->data,
4102							   beacon->len,
4103							   min_rate);
4104	else
4105		ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4106					      CMD_TEMPL_PROBE_RESPONSE,
4107					      beacon->data,
4108					      beacon->len, 0,
4109					      min_rate);
4110end_bcn:
4111	dev_kfree_skb(beacon);
4112	if (ret < 0)
4113		goto out;
4114
4115out:
4116	return ret;
4117}
4118
4119static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4120					  struct ieee80211_vif *vif,
4121					  struct ieee80211_bss_conf *bss_conf,
4122					  u32 changed)
4123{
4124	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4125	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4126	int ret = 0;
4127
4128	if (changed & BSS_CHANGED_BEACON_INT) {
4129		wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4130			bss_conf->beacon_int);
4131
4132		wlvif->beacon_int = bss_conf->beacon_int;
4133	}
4134
4135	if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4136		u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4137
4138		wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4139	}
4140
4141	if (changed & BSS_CHANGED_BEACON) {
4142		ret = wlcore_set_beacon_template(wl, vif, is_ap);
4143		if (ret < 0)
4144			goto out;
4145
4146		if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4147				       &wlvif->flags)) {
4148			ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4149			if (ret < 0)
4150				goto out;
4151		}
4152	}
4153out:
4154	if (ret != 0)
4155		wl1271_error("beacon info change failed: %d", ret);
4156	return ret;
4157}
4158
4159/* AP mode changes */
4160static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4161				       struct ieee80211_vif *vif,
4162				       struct ieee80211_bss_conf *bss_conf,
4163				       u32 changed)
4164{
4165	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4166	int ret = 0;
4167
4168	if (changed & BSS_CHANGED_BASIC_RATES) {
4169		u32 rates = bss_conf->basic_rates;
4170
4171		wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4172								 wlvif->band);
4173		wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4174							wlvif->basic_rate_set);
4175
4176		ret = wl1271_init_ap_rates(wl, wlvif);
4177		if (ret < 0) {
4178			wl1271_error("AP rate policy change failed %d", ret);
4179			goto out;
4180		}
4181
4182		ret = wl1271_ap_init_templates(wl, vif);
4183		if (ret < 0)
4184			goto out;
4185
4186		/* No need to set probe resp template for mesh */
4187		if (!ieee80211_vif_is_mesh(vif)) {
4188			ret = wl1271_ap_set_probe_resp_tmpl(wl,
4189							    wlvif->basic_rate,
4190							    vif);
4191			if (ret < 0)
4192				goto out;
4193		}
4194
4195		ret = wlcore_set_beacon_template(wl, vif, true);
4196		if (ret < 0)
4197			goto out;
4198	}
4199
4200	ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4201	if (ret < 0)
4202		goto out;
4203
4204	if (changed & BSS_CHANGED_BEACON_ENABLED) {
4205		if (bss_conf->enable_beacon) {
4206			if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4207				ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4208				if (ret < 0)
4209					goto out;
4210
4211				ret = wl1271_ap_init_hwenc(wl, wlvif);
4212				if (ret < 0)
4213					goto out;
4214
4215				set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4216				wl1271_debug(DEBUG_AP, "started AP");
4217			}
4218		} else {
4219			if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4220				/*
4221				 * AP might be in ROC in case we have just
4222				 * sent auth reply. handle it.
4223				 */
4224				if (test_bit(wlvif->role_id, wl->roc_map))
4225					wl12xx_croc(wl, wlvif->role_id);
4226
4227				ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4228				if (ret < 0)
4229					goto out;
4230
4231				clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4232				clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4233					  &wlvif->flags);
4234				wl1271_debug(DEBUG_AP, "stopped AP");
4235			}
4236		}
4237	}
4238
4239	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4240	if (ret < 0)
4241		goto out;
4242
4243	/* Handle HT information change */
4244	if ((changed & BSS_CHANGED_HT) &&
4245	    (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4246		ret = wl1271_acx_set_ht_information(wl, wlvif,
4247					bss_conf->ht_operation_mode);
4248		if (ret < 0) {
4249			wl1271_warning("Set ht information failed %d", ret);
4250			goto out;
4251		}
4252	}
4253
4254out:
4255	return;
4256}
4257
4258static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4259			    struct ieee80211_vif *vif, u32 sta_rate_set)
 
4260{
4261	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
4262	u32 rates;
4263	int ret;
4264
4265	wl1271_debug(DEBUG_MAC80211,
4266	     "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4267	     bss_conf->bssid, vif->cfg.aid,
4268	     bss_conf->beacon_int,
4269	     bss_conf->basic_rates, sta_rate_set);
4270
4271	wlvif->beacon_int = bss_conf->beacon_int;
4272	rates = bss_conf->basic_rates;
4273	wlvif->basic_rate_set =
4274		wl1271_tx_enabled_rates_get(wl, rates,
4275					    wlvif->band);
4276	wlvif->basic_rate =
4277		wl1271_tx_min_rate_get(wl,
4278				       wlvif->basic_rate_set);
4279
4280	if (sta_rate_set)
4281		wlvif->rate_set =
4282			wl1271_tx_enabled_rates_get(wl,
4283						sta_rate_set,
4284						wlvif->band);
4285
4286	/* we only support sched_scan while not connected */
4287	if (wl->sched_vif == wlvif)
4288		wl->ops->sched_scan_stop(wl, wlvif);
4289
4290	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4291	if (ret < 0)
4292		return ret;
4293
4294	ret = wl12xx_cmd_build_null_data(wl, wlvif);
4295	if (ret < 0)
4296		return ret;
4297
4298	ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4299	if (ret < 0)
4300		return ret;
4301
4302	wlcore_set_ssid(wl, wlvif);
4303
4304	set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4305
4306	return 0;
4307}
4308
4309static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4310{
4311	int ret;
4312
4313	/* revert back to minimum rates for the current band */
4314	wl1271_set_band_rate(wl, wlvif);
4315	wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4316
4317	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4318	if (ret < 0)
4319		return ret;
4320
4321	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4322	    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4323		ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4324		if (ret < 0)
4325			return ret;
4326	}
4327
4328	clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4329	return 0;
4330}
4331/* STA/IBSS mode changes */
4332static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4333					struct ieee80211_vif *vif,
4334					struct ieee80211_bss_conf *bss_conf,
4335					u32 changed)
4336{
4337	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4338	bool do_join = false;
4339	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4340	bool ibss_joined = false;
4341	u32 sta_rate_set = 0;
4342	int ret;
4343	struct ieee80211_sta *sta;
4344	bool sta_exists = false;
4345	struct ieee80211_sta_ht_cap sta_ht_cap;
4346
4347	if (is_ibss) {
4348		ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4349						     changed);
4350		if (ret < 0)
4351			goto out;
4352	}
4353
4354	if (changed & BSS_CHANGED_IBSS) {
4355		if (vif->cfg.ibss_joined) {
4356			set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4357			ibss_joined = true;
4358		} else {
4359			wlcore_unset_assoc(wl, wlvif);
4360			wl12xx_cmd_role_stop_sta(wl, wlvif);
4361		}
4362	}
4363
4364	if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4365		do_join = true;
4366
4367	/* Need to update the SSID (for filtering etc) */
4368	if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4369		do_join = true;
4370
4371	if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4372		wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4373			     bss_conf->enable_beacon ? "enabled" : "disabled");
4374
4375		do_join = true;
4376	}
4377
4378	if (changed & BSS_CHANGED_IDLE && !is_ibss)
4379		wl1271_sta_handle_idle(wl, wlvif, vif->cfg.idle);
4380
4381	if (changed & BSS_CHANGED_CQM) {
4382		bool enable = false;
4383		if (bss_conf->cqm_rssi_thold)
4384			enable = true;
4385		ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4386						  bss_conf->cqm_rssi_thold,
4387						  bss_conf->cqm_rssi_hyst);
4388		if (ret < 0)
4389			goto out;
4390		wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4391	}
4392
4393	if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4394		       BSS_CHANGED_ASSOC)) {
4395		rcu_read_lock();
4396		sta = ieee80211_find_sta(vif, bss_conf->bssid);
4397		if (sta) {
4398			u8 *rx_mask = sta->deflink.ht_cap.mcs.rx_mask;
4399
4400			/* save the supp_rates of the ap */
4401			sta_rate_set = sta->deflink.supp_rates[wlvif->band];
4402			if (sta->deflink.ht_cap.ht_supported)
4403				sta_rate_set |=
4404					(rx_mask[0] << HW_HT_RATES_OFFSET) |
4405					(rx_mask[1] << HW_MIMO_RATES_OFFSET);
4406			sta_ht_cap = sta->deflink.ht_cap;
4407			sta_exists = true;
4408		}
4409
4410		rcu_read_unlock();
4411	}
4412
4413	if (changed & BSS_CHANGED_BSSID) {
4414		if (!is_zero_ether_addr(bss_conf->bssid)) {
4415			ret = wlcore_set_bssid(wl, wlvif, vif,
4416					       sta_rate_set);
4417			if (ret < 0)
4418				goto out;
4419
4420			/* Need to update the BSSID (for filtering etc) */
4421			do_join = true;
4422		} else {
4423			ret = wlcore_clear_bssid(wl, wlvif);
4424			if (ret < 0)
4425				goto out;
4426		}
4427	}
4428
4429	if (changed & BSS_CHANGED_IBSS) {
4430		wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4431			     vif->cfg.ibss_joined);
4432
4433		if (vif->cfg.ibss_joined) {
4434			u32 rates = bss_conf->basic_rates;
4435			wlvif->basic_rate_set =
4436				wl1271_tx_enabled_rates_get(wl, rates,
4437							    wlvif->band);
4438			wlvif->basic_rate =
4439				wl1271_tx_min_rate_get(wl,
4440						       wlvif->basic_rate_set);
4441
4442			/* by default, use 11b + OFDM rates */
4443			wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4444			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4445			if (ret < 0)
4446				goto out;
4447		}
4448	}
4449
4450	if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4451		/* enable beacon filtering */
4452		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4453		if (ret < 0)
4454			goto out;
4455	}
4456
4457	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4458	if (ret < 0)
4459		goto out;
4460
4461	if (do_join) {
4462		ret = wlcore_join(wl, wlvif);
4463		if (ret < 0) {
4464			wl1271_warning("cmd join failed %d", ret);
4465			goto out;
4466		}
4467	}
4468
4469	if (changed & BSS_CHANGED_ASSOC) {
4470		if (vif->cfg.assoc) {
4471			ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4472					       sta_rate_set);
4473			if (ret < 0)
4474				goto out;
4475
4476			if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4477				wl12xx_set_authorized(wl, wlvif);
4478		} else {
4479			wlcore_unset_assoc(wl, wlvif);
4480		}
4481	}
4482
4483	if (changed & BSS_CHANGED_PS) {
4484		if (vif->cfg.ps &&
4485		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4486		    !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4487			int ps_mode;
4488			char *ps_mode_str;
4489
4490			if (wl->conf.conn.forced_ps) {
4491				ps_mode = STATION_POWER_SAVE_MODE;
4492				ps_mode_str = "forced";
4493			} else {
4494				ps_mode = STATION_AUTO_PS_MODE;
4495				ps_mode_str = "auto";
4496			}
4497
4498			wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4499
4500			ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4501			if (ret < 0)
4502				wl1271_warning("enter %s ps failed %d",
4503					       ps_mode_str, ret);
4504		} else if (!vif->cfg.ps &&
4505			   test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4506			wl1271_debug(DEBUG_PSM, "auto ps disabled");
4507
4508			ret = wl1271_ps_set_mode(wl, wlvif,
4509						 STATION_ACTIVE_MODE);
4510			if (ret < 0)
4511				wl1271_warning("exit auto ps failed %d", ret);
4512		}
4513	}
4514
4515	/* Handle new association with HT. Do this after join. */
4516	if (sta_exists) {
4517		bool enabled =
4518			bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4519
4520		ret = wlcore_hw_set_peer_cap(wl,
4521					     &sta_ht_cap,
4522					     enabled,
4523					     wlvif->rate_set,
4524					     wlvif->sta.hlid);
4525		if (ret < 0) {
4526			wl1271_warning("Set ht cap failed %d", ret);
4527			goto out;
4528
4529		}
4530
4531		if (enabled) {
4532			ret = wl1271_acx_set_ht_information(wl, wlvif,
4533						bss_conf->ht_operation_mode);
4534			if (ret < 0) {
4535				wl1271_warning("Set ht information failed %d",
4536					       ret);
4537				goto out;
4538			}
4539		}
4540	}
4541
4542	/* Handle arp filtering. Done after join. */
4543	if ((changed & BSS_CHANGED_ARP_FILTER) ||
4544	    (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4545		__be32 addr = vif->cfg.arp_addr_list[0];
4546		wlvif->sta.qos = bss_conf->qos;
4547		WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4548
4549		if (vif->cfg.arp_addr_cnt == 1 && vif->cfg.assoc) {
4550			wlvif->ip_addr = addr;
4551			/*
4552			 * The template should have been configured only upon
4553			 * association. however, it seems that the correct ip
4554			 * isn't being set (when sending), so we have to
4555			 * reconfigure the template upon every ip change.
4556			 */
4557			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4558			if (ret < 0) {
4559				wl1271_warning("build arp rsp failed: %d", ret);
4560				goto out;
4561			}
4562
4563			ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4564				(ACX_ARP_FILTER_ARP_FILTERING |
4565				 ACX_ARP_FILTER_AUTO_ARP),
4566				addr);
4567		} else {
4568			wlvif->ip_addr = 0;
4569			ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4570		}
4571
4572		if (ret < 0)
4573			goto out;
4574	}
4575
4576out:
4577	return;
4578}
4579
4580static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4581				       struct ieee80211_vif *vif,
4582				       struct ieee80211_bss_conf *bss_conf,
4583				       u64 changed)
4584{
4585	struct wl1271 *wl = hw->priv;
4586	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4587	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4588	int ret;
4589
4590	wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4591		     wlvif->role_id, (int)changed);
4592
4593	/*
4594	 * make sure to cancel pending disconnections if our association
4595	 * state changed
4596	 */
4597	if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4598		cancel_delayed_work_sync(&wlvif->connection_loss_work);
4599
4600	if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4601	    !bss_conf->enable_beacon)
4602		wl1271_tx_flush(wl);
4603
4604	mutex_lock(&wl->mutex);
4605
4606	if (unlikely(wl->state != WLCORE_STATE_ON))
4607		goto out;
4608
4609	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4610		goto out;
4611
4612	ret = pm_runtime_resume_and_get(wl->dev);
4613	if (ret < 0)
 
4614		goto out;
 
4615
4616	if ((changed & BSS_CHANGED_TXPOWER) &&
4617	    bss_conf->txpower != wlvif->power_level) {
4618
4619		ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4620		if (ret < 0)
4621			goto out;
4622
4623		wlvif->power_level = bss_conf->txpower;
4624	}
4625
4626	if (is_ap)
4627		wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4628	else
4629		wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4630
4631	pm_runtime_mark_last_busy(wl->dev);
4632	pm_runtime_put_autosuspend(wl->dev);
4633
4634out:
4635	mutex_unlock(&wl->mutex);
4636}
4637
4638static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4639				 struct ieee80211_chanctx_conf *ctx)
4640{
4641	wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4642		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4643		     cfg80211_get_chandef_type(&ctx->def));
4644	return 0;
4645}
4646
4647static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4648				     struct ieee80211_chanctx_conf *ctx)
4649{
4650	wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4651		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4652		     cfg80211_get_chandef_type(&ctx->def));
4653}
4654
4655static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4656				     struct ieee80211_chanctx_conf *ctx,
4657				     u32 changed)
4658{
4659	struct wl1271 *wl = hw->priv;
4660	struct wl12xx_vif *wlvif;
4661	int ret;
4662	int channel = ieee80211_frequency_to_channel(
4663		ctx->def.chan->center_freq);
4664
4665	wl1271_debug(DEBUG_MAC80211,
4666		     "mac80211 change chanctx %d (type %d) changed 0x%x",
4667		     channel, cfg80211_get_chandef_type(&ctx->def), changed);
4668
4669	mutex_lock(&wl->mutex);
4670
4671	ret = pm_runtime_resume_and_get(wl->dev);
4672	if (ret < 0)
 
4673		goto out;
 
4674
4675	wl12xx_for_each_wlvif(wl, wlvif) {
4676		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4677
4678		rcu_read_lock();
4679		if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != ctx) {
4680			rcu_read_unlock();
4681			continue;
4682		}
4683		rcu_read_unlock();
4684
4685		/* start radar if needed */
4686		if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4687		    wlvif->bss_type == BSS_TYPE_AP_BSS &&
4688		    ctx->radar_enabled && !wlvif->radar_enabled &&
4689		    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4690			wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4691			wlcore_hw_set_cac(wl, wlvif, true);
4692			wlvif->radar_enabled = true;
4693		}
4694	}
4695
4696	pm_runtime_mark_last_busy(wl->dev);
4697	pm_runtime_put_autosuspend(wl->dev);
4698out:
4699	mutex_unlock(&wl->mutex);
4700}
4701
4702static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4703					struct ieee80211_vif *vif,
4704					struct ieee80211_bss_conf *link_conf,
4705					struct ieee80211_chanctx_conf *ctx)
4706{
4707	struct wl1271 *wl = hw->priv;
4708	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4709	int channel = ieee80211_frequency_to_channel(
4710		ctx->def.chan->center_freq);
4711	int ret = -EINVAL;
4712
4713	wl1271_debug(DEBUG_MAC80211,
4714		     "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4715		     wlvif->role_id, channel,
4716		     cfg80211_get_chandef_type(&ctx->def),
4717		     ctx->radar_enabled, ctx->def.chan->dfs_state);
4718
4719	mutex_lock(&wl->mutex);
4720
4721	if (unlikely(wl->state != WLCORE_STATE_ON))
4722		goto out;
4723
4724	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4725		goto out;
4726
4727	ret = pm_runtime_resume_and_get(wl->dev);
4728	if (ret < 0)
 
4729		goto out;
 
4730
4731	wlvif->band = ctx->def.chan->band;
4732	wlvif->channel = channel;
4733	wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4734
4735	/* update default rates according to the band */
4736	wl1271_set_band_rate(wl, wlvif);
4737
4738	if (ctx->radar_enabled &&
4739	    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4740		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4741		wlcore_hw_set_cac(wl, wlvif, true);
4742		wlvif->radar_enabled = true;
4743	}
4744
4745	pm_runtime_mark_last_busy(wl->dev);
4746	pm_runtime_put_autosuspend(wl->dev);
4747out:
4748	mutex_unlock(&wl->mutex);
4749
4750	return 0;
4751}
4752
4753static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4754					   struct ieee80211_vif *vif,
4755					   struct ieee80211_bss_conf *link_conf,
4756					   struct ieee80211_chanctx_conf *ctx)
4757{
4758	struct wl1271 *wl = hw->priv;
4759	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4760	int ret;
4761
4762	wl1271_debug(DEBUG_MAC80211,
4763		     "mac80211 unassign chanctx (role %d) %d (type %d)",
4764		     wlvif->role_id,
4765		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4766		     cfg80211_get_chandef_type(&ctx->def));
4767
4768	wl1271_tx_flush(wl);
4769
4770	mutex_lock(&wl->mutex);
4771
4772	if (unlikely(wl->state != WLCORE_STATE_ON))
4773		goto out;
4774
4775	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4776		goto out;
4777
4778	ret = pm_runtime_resume_and_get(wl->dev);
4779	if (ret < 0)
 
4780		goto out;
 
4781
4782	if (wlvif->radar_enabled) {
4783		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4784		wlcore_hw_set_cac(wl, wlvif, false);
4785		wlvif->radar_enabled = false;
4786	}
4787
4788	pm_runtime_mark_last_busy(wl->dev);
4789	pm_runtime_put_autosuspend(wl->dev);
4790out:
4791	mutex_unlock(&wl->mutex);
4792}
4793
4794static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4795				    struct wl12xx_vif *wlvif,
4796				    struct ieee80211_chanctx_conf *new_ctx)
4797{
4798	int channel = ieee80211_frequency_to_channel(
4799		new_ctx->def.chan->center_freq);
4800
4801	wl1271_debug(DEBUG_MAC80211,
4802		     "switch vif (role %d) %d -> %d chan_type: %d",
4803		     wlvif->role_id, wlvif->channel, channel,
4804		     cfg80211_get_chandef_type(&new_ctx->def));
4805
4806	if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4807		return 0;
4808
4809	WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4810
4811	if (wlvif->radar_enabled) {
4812		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4813		wlcore_hw_set_cac(wl, wlvif, false);
4814		wlvif->radar_enabled = false;
4815	}
4816
4817	wlvif->band = new_ctx->def.chan->band;
4818	wlvif->channel = channel;
4819	wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4820
4821	/* start radar if needed */
4822	if (new_ctx->radar_enabled) {
4823		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4824		wlcore_hw_set_cac(wl, wlvif, true);
4825		wlvif->radar_enabled = true;
4826	}
4827
4828	return 0;
4829}
4830
4831static int
4832wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4833			     struct ieee80211_vif_chanctx_switch *vifs,
4834			     int n_vifs,
4835			     enum ieee80211_chanctx_switch_mode mode)
4836{
4837	struct wl1271 *wl = hw->priv;
4838	int i, ret;
4839
4840	wl1271_debug(DEBUG_MAC80211,
4841		     "mac80211 switch chanctx n_vifs %d mode %d",
4842		     n_vifs, mode);
4843
4844	mutex_lock(&wl->mutex);
4845
4846	ret = pm_runtime_resume_and_get(wl->dev);
4847	if (ret < 0)
 
4848		goto out;
 
4849
4850	for (i = 0; i < n_vifs; i++) {
4851		struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4852
4853		ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4854		if (ret)
4855			goto out_sleep;
4856	}
4857out_sleep:
4858	pm_runtime_mark_last_busy(wl->dev);
4859	pm_runtime_put_autosuspend(wl->dev);
4860out:
4861	mutex_unlock(&wl->mutex);
4862
4863	return 0;
4864}
4865
4866static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4867			     struct ieee80211_vif *vif,
4868			     unsigned int link_id, u16 queue,
4869			     const struct ieee80211_tx_queue_params *params)
4870{
4871	struct wl1271 *wl = hw->priv;
4872	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4873	u8 ps_scheme;
4874	int ret = 0;
4875
4876	if (wlcore_is_p2p_mgmt(wlvif))
4877		return 0;
4878
4879	mutex_lock(&wl->mutex);
4880
4881	wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4882
4883	if (params->uapsd)
4884		ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4885	else
4886		ps_scheme = CONF_PS_SCHEME_LEGACY;
4887
4888	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4889		goto out;
4890
4891	ret = pm_runtime_resume_and_get(wl->dev);
4892	if (ret < 0)
 
4893		goto out;
 
4894
4895	/*
4896	 * the txop is confed in units of 32us by the mac80211,
4897	 * we need us
4898	 */
4899	ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4900				params->cw_min, params->cw_max,
4901				params->aifs, params->txop << 5);
4902	if (ret < 0)
4903		goto out_sleep;
4904
4905	ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4906				 CONF_CHANNEL_TYPE_EDCF,
4907				 wl1271_tx_get_queue(queue),
4908				 ps_scheme, CONF_ACK_POLICY_LEGACY,
4909				 0, 0);
4910
4911out_sleep:
4912	pm_runtime_mark_last_busy(wl->dev);
4913	pm_runtime_put_autosuspend(wl->dev);
4914
4915out:
4916	mutex_unlock(&wl->mutex);
4917
4918	return ret;
4919}
4920
4921static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4922			     struct ieee80211_vif *vif)
4923{
4924
4925	struct wl1271 *wl = hw->priv;
4926	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4927	u64 mactime = ULLONG_MAX;
4928	int ret;
4929
4930	wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4931
4932	mutex_lock(&wl->mutex);
4933
4934	if (unlikely(wl->state != WLCORE_STATE_ON))
4935		goto out;
4936
4937	ret = pm_runtime_resume_and_get(wl->dev);
4938	if (ret < 0)
 
4939		goto out;
 
4940
4941	ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4942	if (ret < 0)
4943		goto out_sleep;
4944
4945out_sleep:
4946	pm_runtime_mark_last_busy(wl->dev);
4947	pm_runtime_put_autosuspend(wl->dev);
4948
4949out:
4950	mutex_unlock(&wl->mutex);
4951	return mactime;
4952}
4953
4954static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4955				struct survey_info *survey)
4956{
4957	struct ieee80211_conf *conf = &hw->conf;
4958
4959	if (idx != 0)
4960		return -ENOENT;
4961
4962	survey->channel = conf->chandef.chan;
4963	survey->filled = 0;
4964	return 0;
4965}
4966
4967static int wl1271_allocate_sta(struct wl1271 *wl,
4968			     struct wl12xx_vif *wlvif,
4969			     struct ieee80211_sta *sta)
4970{
4971	struct wl1271_station *wl_sta;
4972	int ret;
4973
4974
4975	if (wl->active_sta_count >= wl->max_ap_stations) {
4976		wl1271_warning("could not allocate HLID - too much stations");
4977		return -EBUSY;
4978	}
4979
4980	wl_sta = (struct wl1271_station *)sta->drv_priv;
4981	ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4982	if (ret < 0) {
4983		wl1271_warning("could not allocate HLID - too many links");
4984		return -EBUSY;
4985	}
4986
4987	/* use the previous security seq, if this is a recovery/resume */
4988	wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4989
4990	set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4991	memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4992	wl->active_sta_count++;
4993	return 0;
4994}
4995
4996void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4997{
4998	if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4999		return;
5000
5001	clear_bit(hlid, wlvif->ap.sta_hlid_map);
5002	__clear_bit(hlid, &wl->ap_ps_map);
5003	__clear_bit(hlid, &wl->ap_fw_ps_map);
5004
5005	/*
5006	 * save the last used PN in the private part of iee80211_sta,
5007	 * in case of recovery/suspend
5008	 */
5009	wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
5010
5011	wl12xx_free_link(wl, wlvif, &hlid);
5012	wl->active_sta_count--;
5013
5014	/*
5015	 * rearm the tx watchdog when the last STA is freed - give the FW a
5016	 * chance to return STA-buffered packets before complaining.
5017	 */
5018	if (wl->active_sta_count == 0)
5019		wl12xx_rearm_tx_watchdog_locked(wl);
5020}
5021
5022static int wl12xx_sta_add(struct wl1271 *wl,
5023			  struct wl12xx_vif *wlvif,
5024			  struct ieee80211_sta *sta)
5025{
5026	struct wl1271_station *wl_sta;
5027	int ret = 0;
5028	u8 hlid;
5029
5030	wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
5031
5032	ret = wl1271_allocate_sta(wl, wlvif, sta);
5033	if (ret < 0)
5034		return ret;
5035
5036	wl_sta = (struct wl1271_station *)sta->drv_priv;
5037	hlid = wl_sta->hlid;
5038
5039	ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5040	if (ret < 0)
5041		wl1271_free_sta(wl, wlvif, hlid);
5042
5043	return ret;
5044}
5045
5046static int wl12xx_sta_remove(struct wl1271 *wl,
5047			     struct wl12xx_vif *wlvif,
5048			     struct ieee80211_sta *sta)
5049{
5050	struct wl1271_station *wl_sta;
5051	int ret = 0, id;
5052
5053	wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5054
5055	wl_sta = (struct wl1271_station *)sta->drv_priv;
5056	id = wl_sta->hlid;
5057	if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5058		return -EINVAL;
5059
5060	ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5061	if (ret < 0)
5062		return ret;
5063
5064	wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5065	return ret;
5066}
5067
5068static void wlcore_roc_if_possible(struct wl1271 *wl,
5069				   struct wl12xx_vif *wlvif)
5070{
5071	if (find_first_bit(wl->roc_map,
5072			   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5073		return;
5074
5075	if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5076		return;
5077
5078	wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5079}
5080
5081/*
5082 * when wl_sta is NULL, we treat this call as if coming from a
5083 * pending auth reply.
5084 * wl->mutex must be taken and the FW must be awake when the call
5085 * takes place.
5086 */
5087void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5088			      struct wl1271_station *wl_sta, bool in_conn)
5089{
5090	if (in_conn) {
5091		if (WARN_ON(wl_sta && wl_sta->in_connection))
5092			return;
5093
5094		if (!wlvif->ap_pending_auth_reply &&
5095		    !wlvif->inconn_count)
5096			wlcore_roc_if_possible(wl, wlvif);
5097
5098		if (wl_sta) {
5099			wl_sta->in_connection = true;
5100			wlvif->inconn_count++;
5101		} else {
5102			wlvif->ap_pending_auth_reply = true;
5103		}
5104	} else {
5105		if (wl_sta && !wl_sta->in_connection)
5106			return;
5107
5108		if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5109			return;
5110
5111		if (WARN_ON(wl_sta && !wlvif->inconn_count))
5112			return;
5113
5114		if (wl_sta) {
5115			wl_sta->in_connection = false;
5116			wlvif->inconn_count--;
5117		} else {
5118			wlvif->ap_pending_auth_reply = false;
5119		}
5120
5121		if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5122		    test_bit(wlvif->role_id, wl->roc_map))
5123			wl12xx_croc(wl, wlvif->role_id);
5124	}
5125}
5126
5127static int wl12xx_update_sta_state(struct wl1271 *wl,
5128				   struct wl12xx_vif *wlvif,
5129				   struct ieee80211_sta *sta,
5130				   enum ieee80211_sta_state old_state,
5131				   enum ieee80211_sta_state new_state)
5132{
5133	struct wl1271_station *wl_sta;
5134	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5135	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5136	int ret;
5137
5138	wl_sta = (struct wl1271_station *)sta->drv_priv;
5139
5140	/* Add station (AP mode) */
5141	if (is_ap &&
5142	    old_state == IEEE80211_STA_NOTEXIST &&
5143	    new_state == IEEE80211_STA_NONE) {
5144		ret = wl12xx_sta_add(wl, wlvif, sta);
5145		if (ret)
5146			return ret;
5147
5148		wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5149	}
5150
5151	/* Remove station (AP mode) */
5152	if (is_ap &&
5153	    old_state == IEEE80211_STA_NONE &&
5154	    new_state == IEEE80211_STA_NOTEXIST) {
5155		/* must not fail */
5156		wl12xx_sta_remove(wl, wlvif, sta);
5157
5158		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5159	}
5160
5161	/* Authorize station (AP mode) */
5162	if (is_ap &&
5163	    new_state == IEEE80211_STA_AUTHORIZED) {
5164		ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5165		if (ret < 0)
5166			return ret;
5167
5168		/* reconfigure rates */
5169		ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
5170		if (ret < 0)
5171			return ret;
5172
5173		ret = wl1271_acx_set_ht_capabilities(wl, &sta->deflink.ht_cap,
5174						     true,
5175						     wl_sta->hlid);
5176		if (ret)
5177			return ret;
5178
5179		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5180	}
5181
5182	/* Authorize station */
5183	if (is_sta &&
5184	    new_state == IEEE80211_STA_AUTHORIZED) {
5185		set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5186		ret = wl12xx_set_authorized(wl, wlvif);
5187		if (ret)
5188			return ret;
5189	}
5190
5191	if (is_sta &&
5192	    old_state == IEEE80211_STA_AUTHORIZED &&
5193	    new_state == IEEE80211_STA_ASSOC) {
5194		clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5195		clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5196	}
5197
5198	/* save seq number on disassoc (suspend) */
5199	if (is_sta &&
5200	    old_state == IEEE80211_STA_ASSOC &&
5201	    new_state == IEEE80211_STA_AUTH) {
5202		wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5203		wlvif->total_freed_pkts = 0;
5204	}
5205
5206	/* restore seq number on assoc (resume) */
5207	if (is_sta &&
5208	    old_state == IEEE80211_STA_AUTH &&
5209	    new_state == IEEE80211_STA_ASSOC) {
5210		wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5211	}
5212
5213	/* clear ROCs on failure or authorization */
5214	if (is_sta &&
5215	    (new_state == IEEE80211_STA_AUTHORIZED ||
5216	     new_state == IEEE80211_STA_NOTEXIST)) {
5217		if (test_bit(wlvif->role_id, wl->roc_map))
5218			wl12xx_croc(wl, wlvif->role_id);
5219	}
5220
5221	if (is_sta &&
5222	    old_state == IEEE80211_STA_NOTEXIST &&
5223	    new_state == IEEE80211_STA_NONE) {
5224		if (find_first_bit(wl->roc_map,
5225				   WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5226			WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5227			wl12xx_roc(wl, wlvif, wlvif->role_id,
5228				   wlvif->band, wlvif->channel);
5229		}
5230	}
5231	return 0;
5232}
5233
5234static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5235			       struct ieee80211_vif *vif,
5236			       struct ieee80211_sta *sta,
5237			       enum ieee80211_sta_state old_state,
5238			       enum ieee80211_sta_state new_state)
5239{
5240	struct wl1271 *wl = hw->priv;
5241	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5242	int ret;
5243
5244	wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5245		     sta->aid, old_state, new_state);
5246
5247	mutex_lock(&wl->mutex);
5248
5249	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5250		ret = -EBUSY;
5251		goto out;
5252	}
5253
5254	ret = pm_runtime_resume_and_get(wl->dev);
5255	if (ret < 0)
 
5256		goto out;
 
5257
5258	ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5259
5260	pm_runtime_mark_last_busy(wl->dev);
5261	pm_runtime_put_autosuspend(wl->dev);
5262out:
5263	mutex_unlock(&wl->mutex);
5264	if (new_state < old_state)
5265		return 0;
5266	return ret;
5267}
5268
5269static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5270				  struct ieee80211_vif *vif,
5271				  struct ieee80211_ampdu_params *params)
5272{
5273	struct wl1271 *wl = hw->priv;
5274	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5275	int ret;
5276	u8 hlid, *ba_bitmap;
5277	struct ieee80211_sta *sta = params->sta;
5278	enum ieee80211_ampdu_mlme_action action = params->action;
5279	u16 tid = params->tid;
5280	u16 *ssn = &params->ssn;
5281
5282	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5283		     tid);
5284
5285	/* sanity check - the fields in FW are only 8bits wide */
5286	if (WARN_ON(tid > 0xFF))
5287		return -ENOTSUPP;
5288
5289	mutex_lock(&wl->mutex);
5290
5291	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5292		ret = -EAGAIN;
5293		goto out;
5294	}
5295
5296	if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5297		hlid = wlvif->sta.hlid;
5298	} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5299		struct wl1271_station *wl_sta;
5300
5301		wl_sta = (struct wl1271_station *)sta->drv_priv;
5302		hlid = wl_sta->hlid;
5303	} else {
5304		ret = -EINVAL;
5305		goto out;
5306	}
5307
5308	ba_bitmap = &wl->links[hlid].ba_bitmap;
5309
5310	ret = pm_runtime_resume_and_get(wl->dev);
5311	if (ret < 0)
 
5312		goto out;
 
5313
5314	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5315		     tid, action);
5316
5317	switch (action) {
5318	case IEEE80211_AMPDU_RX_START:
5319		if (!wlvif->ba_support || !wlvif->ba_allowed) {
5320			ret = -ENOTSUPP;
5321			break;
5322		}
5323
5324		if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5325			ret = -EBUSY;
5326			wl1271_debug(DEBUG_RX, "exceeded max RX BA sessions");
5327			break;
5328		}
5329
5330		if (*ba_bitmap & BIT(tid)) {
5331			ret = -EINVAL;
5332			wl1271_error("cannot enable RX BA session on active "
5333				     "tid: %d", tid);
5334			break;
5335		}
5336
5337		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5338				hlid,
5339				params->buf_size);
5340
5341		if (!ret) {
5342			*ba_bitmap |= BIT(tid);
5343			wl->ba_rx_session_count++;
5344		}
5345		break;
5346
5347	case IEEE80211_AMPDU_RX_STOP:
5348		if (!(*ba_bitmap & BIT(tid))) {
5349			/*
5350			 * this happens on reconfig - so only output a debug
5351			 * message for now, and don't fail the function.
5352			 */
5353			wl1271_debug(DEBUG_MAC80211,
5354				     "no active RX BA session on tid: %d",
5355				     tid);
5356			ret = 0;
5357			break;
5358		}
5359
5360		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5361							 hlid, 0);
5362		if (!ret) {
5363			*ba_bitmap &= ~BIT(tid);
5364			wl->ba_rx_session_count--;
5365		}
5366		break;
5367
5368	/*
5369	 * The BA initiator session management in FW independently.
5370	 * Falling break here on purpose for all TX APDU commands.
5371	 */
5372	case IEEE80211_AMPDU_TX_START:
5373	case IEEE80211_AMPDU_TX_STOP_CONT:
5374	case IEEE80211_AMPDU_TX_STOP_FLUSH:
5375	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5376	case IEEE80211_AMPDU_TX_OPERATIONAL:
5377		ret = -EINVAL;
5378		break;
5379
5380	default:
5381		wl1271_error("Incorrect ampdu action id=%x\n", action);
5382		ret = -EINVAL;
5383	}
5384
5385	pm_runtime_mark_last_busy(wl->dev);
5386	pm_runtime_put_autosuspend(wl->dev);
5387
5388out:
5389	mutex_unlock(&wl->mutex);
5390
5391	return ret;
5392}
5393
5394static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5395				   struct ieee80211_vif *vif,
5396				   const struct cfg80211_bitrate_mask *mask)
5397{
5398	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5399	struct wl1271 *wl = hw->priv;
5400	int i, ret = 0;
5401
5402	wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5403		mask->control[NL80211_BAND_2GHZ].legacy,
5404		mask->control[NL80211_BAND_5GHZ].legacy);
5405
5406	mutex_lock(&wl->mutex);
5407
5408	for (i = 0; i < WLCORE_NUM_BANDS; i++)
5409		wlvif->bitrate_masks[i] =
5410			wl1271_tx_enabled_rates_get(wl,
5411						    mask->control[i].legacy,
5412						    i);
5413
5414	if (unlikely(wl->state != WLCORE_STATE_ON))
5415		goto out;
5416
5417	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5418	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5419
5420		ret = pm_runtime_resume_and_get(wl->dev);
5421		if (ret < 0)
 
5422			goto out;
 
5423
5424		wl1271_set_band_rate(wl, wlvif);
5425		wlvif->basic_rate =
5426			wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5427		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5428
5429		pm_runtime_mark_last_busy(wl->dev);
5430		pm_runtime_put_autosuspend(wl->dev);
5431	}
5432out:
5433	mutex_unlock(&wl->mutex);
5434
5435	return ret;
5436}
5437
5438static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5439				     struct ieee80211_vif *vif,
5440				     struct ieee80211_channel_switch *ch_switch)
5441{
5442	struct wl1271 *wl = hw->priv;
5443	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5444	int ret;
5445
5446	wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5447
5448	wl1271_tx_flush(wl);
5449
5450	mutex_lock(&wl->mutex);
5451
5452	if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5453		if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5454			ieee80211_chswitch_done(vif, false);
5455		goto out;
5456	} else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5457		goto out;
5458	}
5459
5460	ret = pm_runtime_resume_and_get(wl->dev);
5461	if (ret < 0)
 
5462		goto out;
 
5463
5464	/* TODO: change mac80211 to pass vif as param */
5465
5466	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5467		unsigned long delay_usec;
5468
5469		ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5470		if (ret)
5471			goto out_sleep;
5472
5473		set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5474
5475		/* indicate failure 5 seconds after channel switch time */
5476		delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5477			ch_switch->count;
5478		ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5479					     usecs_to_jiffies(delay_usec) +
5480					     msecs_to_jiffies(5000));
5481	}
5482
5483out_sleep:
5484	pm_runtime_mark_last_busy(wl->dev);
5485	pm_runtime_put_autosuspend(wl->dev);
5486
5487out:
5488	mutex_unlock(&wl->mutex);
5489}
5490
5491static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5492					struct wl12xx_vif *wlvif,
5493					u8 eid)
5494{
5495	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5496	struct sk_buff *beacon =
5497		ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif), 0);
5498
5499	if (!beacon)
5500		return NULL;
5501
5502	return cfg80211_find_ie(eid,
5503				beacon->data + ieoffset,
5504				beacon->len - ieoffset);
5505}
5506
5507static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5508				u8 *csa_count)
5509{
5510	const u8 *ie;
5511	const struct ieee80211_channel_sw_ie *ie_csa;
5512
5513	ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5514	if (!ie)
5515		return -EINVAL;
5516
5517	ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5518	*csa_count = ie_csa->count;
5519
5520	return 0;
5521}
5522
5523static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5524					    struct ieee80211_vif *vif,
5525					    struct cfg80211_chan_def *chandef)
5526{
5527	struct wl1271 *wl = hw->priv;
5528	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5529	struct ieee80211_channel_switch ch_switch = {
5530		.block_tx = true,
5531		.chandef = *chandef,
5532	};
5533	int ret;
5534
5535	wl1271_debug(DEBUG_MAC80211,
5536		     "mac80211 channel switch beacon (role %d)",
5537		     wlvif->role_id);
5538
5539	ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5540	if (ret < 0) {
5541		wl1271_error("error getting beacon (for CSA counter)");
5542		return;
5543	}
5544
5545	mutex_lock(&wl->mutex);
5546
5547	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5548		ret = -EBUSY;
5549		goto out;
5550	}
5551
5552	ret = pm_runtime_resume_and_get(wl->dev);
5553	if (ret < 0)
 
5554		goto out;
 
5555
5556	ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5557	if (ret)
5558		goto out_sleep;
5559
5560	set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5561
5562out_sleep:
5563	pm_runtime_mark_last_busy(wl->dev);
5564	pm_runtime_put_autosuspend(wl->dev);
5565out:
5566	mutex_unlock(&wl->mutex);
5567}
5568
5569static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5570			    u32 queues, bool drop)
5571{
5572	struct wl1271 *wl = hw->priv;
5573
5574	wl1271_tx_flush(wl);
5575}
5576
5577static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5578				       struct ieee80211_vif *vif,
5579				       struct ieee80211_channel *chan,
5580				       int duration,
5581				       enum ieee80211_roc_type type)
5582{
5583	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5584	struct wl1271 *wl = hw->priv;
5585	int channel, active_roc, ret = 0;
5586
5587	channel = ieee80211_frequency_to_channel(chan->center_freq);
5588
5589	wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5590		     channel, wlvif->role_id);
5591
5592	mutex_lock(&wl->mutex);
5593
5594	if (unlikely(wl->state != WLCORE_STATE_ON))
5595		goto out;
5596
5597	/* return EBUSY if we can't ROC right now */
5598	active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5599	if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5600		wl1271_warning("active roc on role %d", active_roc);
5601		ret = -EBUSY;
5602		goto out;
5603	}
5604
5605	ret = pm_runtime_resume_and_get(wl->dev);
5606	if (ret < 0)
 
5607		goto out;
 
5608
5609	ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5610	if (ret < 0)
5611		goto out_sleep;
5612
5613	wl->roc_vif = vif;
5614	ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5615				     msecs_to_jiffies(duration));
5616out_sleep:
5617	pm_runtime_mark_last_busy(wl->dev);
5618	pm_runtime_put_autosuspend(wl->dev);
5619out:
5620	mutex_unlock(&wl->mutex);
5621	return ret;
5622}
5623
5624static int __wlcore_roc_completed(struct wl1271 *wl)
5625{
5626	struct wl12xx_vif *wlvif;
5627	int ret;
5628
5629	/* already completed */
5630	if (unlikely(!wl->roc_vif))
5631		return 0;
5632
5633	wlvif = wl12xx_vif_to_data(wl->roc_vif);
5634
5635	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5636		return -EBUSY;
5637
5638	ret = wl12xx_stop_dev(wl, wlvif);
5639	if (ret < 0)
5640		return ret;
5641
5642	wl->roc_vif = NULL;
5643
5644	return 0;
5645}
5646
5647static int wlcore_roc_completed(struct wl1271 *wl)
5648{
5649	int ret;
5650
5651	wl1271_debug(DEBUG_MAC80211, "roc complete");
5652
5653	mutex_lock(&wl->mutex);
5654
5655	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5656		ret = -EBUSY;
5657		goto out;
5658	}
5659
5660	ret = pm_runtime_resume_and_get(wl->dev);
5661	if (ret < 0)
 
5662		goto out;
 
5663
5664	ret = __wlcore_roc_completed(wl);
5665
5666	pm_runtime_mark_last_busy(wl->dev);
5667	pm_runtime_put_autosuspend(wl->dev);
5668out:
5669	mutex_unlock(&wl->mutex);
5670
5671	return ret;
5672}
5673
5674static void wlcore_roc_complete_work(struct work_struct *work)
5675{
5676	struct delayed_work *dwork;
5677	struct wl1271 *wl;
5678	int ret;
5679
5680	dwork = to_delayed_work(work);
5681	wl = container_of(dwork, struct wl1271, roc_complete_work);
5682
5683	ret = wlcore_roc_completed(wl);
5684	if (!ret)
5685		ieee80211_remain_on_channel_expired(wl->hw);
5686}
5687
5688static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
5689					      struct ieee80211_vif *vif)
5690{
5691	struct wl1271 *wl = hw->priv;
5692
5693	wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5694
5695	/* TODO: per-vif */
5696	wl1271_tx_flush(wl);
5697
5698	/*
5699	 * we can't just flush_work here, because it might deadlock
5700	 * (as we might get called from the same workqueue)
5701	 */
5702	cancel_delayed_work_sync(&wl->roc_complete_work);
5703	wlcore_roc_completed(wl);
5704
5705	return 0;
5706}
5707
5708static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5709				    struct ieee80211_vif *vif,
5710				    struct ieee80211_sta *sta,
5711				    u32 changed)
5712{
5713	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5714
5715	wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5716
5717	if (!(changed & IEEE80211_RC_BW_CHANGED))
5718		return;
5719
5720	/* this callback is atomic, so schedule a new work */
5721	wlvif->rc_update_bw = sta->deflink.bandwidth;
5722	memcpy(&wlvif->rc_ht_cap, &sta->deflink.ht_cap,
5723	       sizeof(sta->deflink.ht_cap));
5724	ieee80211_queue_work(hw, &wlvif->rc_update_work);
5725}
5726
5727static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5728				     struct ieee80211_vif *vif,
5729				     struct ieee80211_sta *sta,
5730				     struct station_info *sinfo)
5731{
5732	struct wl1271 *wl = hw->priv;
5733	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5734	s8 rssi_dbm;
5735	int ret;
5736
5737	wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5738
5739	mutex_lock(&wl->mutex);
5740
5741	if (unlikely(wl->state != WLCORE_STATE_ON))
5742		goto out;
5743
5744	ret = pm_runtime_resume_and_get(wl->dev);
5745	if (ret < 0)
 
5746		goto out_sleep;
 
5747
5748	ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5749	if (ret < 0)
5750		goto out_sleep;
5751
5752	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
5753	sinfo->signal = rssi_dbm;
5754
5755out_sleep:
5756	pm_runtime_mark_last_busy(wl->dev);
5757	pm_runtime_put_autosuspend(wl->dev);
5758
5759out:
5760	mutex_unlock(&wl->mutex);
5761}
5762
5763static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5764					     struct ieee80211_sta *sta)
5765{
5766	struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5767	struct wl1271 *wl = hw->priv;
5768	u8 hlid = wl_sta->hlid;
5769
5770	/* return in units of Kbps */
5771	return (wl->links[hlid].fw_rate_mbps * 1000);
5772}
5773
5774static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5775{
5776	struct wl1271 *wl = hw->priv;
5777	bool ret = false;
5778
5779	mutex_lock(&wl->mutex);
5780
5781	if (unlikely(wl->state != WLCORE_STATE_ON))
5782		goto out;
5783
5784	/* packets are considered pending if in the TX queue or the FW */
5785	ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5786out:
5787	mutex_unlock(&wl->mutex);
5788
5789	return ret;
5790}
5791
5792/* can't be const, mac80211 writes to this */
5793static struct ieee80211_rate wl1271_rates[] = {
5794	{ .bitrate = 10,
5795	  .hw_value = CONF_HW_BIT_RATE_1MBPS,
5796	  .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5797	{ .bitrate = 20,
5798	  .hw_value = CONF_HW_BIT_RATE_2MBPS,
5799	  .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5800	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5801	{ .bitrate = 55,
5802	  .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5803	  .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5804	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5805	{ .bitrate = 110,
5806	  .hw_value = CONF_HW_BIT_RATE_11MBPS,
5807	  .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5808	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5809	{ .bitrate = 60,
5810	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5811	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5812	{ .bitrate = 90,
5813	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5814	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5815	{ .bitrate = 120,
5816	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5817	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5818	{ .bitrate = 180,
5819	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5820	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5821	{ .bitrate = 240,
5822	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5823	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5824	{ .bitrate = 360,
5825	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5826	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5827	{ .bitrate = 480,
5828	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5829	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5830	{ .bitrate = 540,
5831	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5832	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5833};
5834
5835/* can't be const, mac80211 writes to this */
5836static struct ieee80211_channel wl1271_channels[] = {
5837	{ .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5838	{ .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5839	{ .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5840	{ .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5841	{ .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5842	{ .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5843	{ .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5844	{ .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5845	{ .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5846	{ .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5847	{ .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5848	{ .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5849	{ .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5850	{ .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5851};
5852
5853/* can't be const, mac80211 writes to this */
5854static struct ieee80211_supported_band wl1271_band_2ghz = {
5855	.channels = wl1271_channels,
5856	.n_channels = ARRAY_SIZE(wl1271_channels),
5857	.bitrates = wl1271_rates,
5858	.n_bitrates = ARRAY_SIZE(wl1271_rates),
5859};
5860
5861/* 5 GHz data rates for WL1273 */
5862static struct ieee80211_rate wl1271_rates_5ghz[] = {
5863	{ .bitrate = 60,
5864	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5865	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5866	{ .bitrate = 90,
5867	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5868	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5869	{ .bitrate = 120,
5870	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5871	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5872	{ .bitrate = 180,
5873	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5874	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5875	{ .bitrate = 240,
5876	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5877	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5878	{ .bitrate = 360,
5879	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5880	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5881	{ .bitrate = 480,
5882	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5883	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5884	{ .bitrate = 540,
5885	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5886	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5887};
5888
5889/* 5 GHz band channels for WL1273 */
5890static struct ieee80211_channel wl1271_channels_5ghz[] = {
5891	{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5892	{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5893	{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5894	{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5895	{ .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5896	{ .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5897	{ .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5898	{ .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5899	{ .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5900	{ .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5901	{ .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5902	{ .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5903	{ .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5904	{ .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5905	{ .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5906	{ .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5907	{ .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5908	{ .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5909	{ .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5910	{ .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5911	{ .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5912	{ .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5913	{ .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5914	{ .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5915	{ .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5916	{ .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5917	{ .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5918	{ .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5919	{ .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5920	{ .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5921	{ .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5922};
5923
5924static struct ieee80211_supported_band wl1271_band_5ghz = {
5925	.channels = wl1271_channels_5ghz,
5926	.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5927	.bitrates = wl1271_rates_5ghz,
5928	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5929};
5930
5931static const struct ieee80211_ops wl1271_ops = {
5932	.start = wl1271_op_start,
5933	.stop = wlcore_op_stop,
5934	.add_interface = wl1271_op_add_interface,
5935	.remove_interface = wl1271_op_remove_interface,
5936	.change_interface = wl12xx_op_change_interface,
5937#ifdef CONFIG_PM
5938	.suspend = wl1271_op_suspend,
5939	.resume = wl1271_op_resume,
5940#endif
5941	.config = wl1271_op_config,
5942	.prepare_multicast = wl1271_op_prepare_multicast,
5943	.configure_filter = wl1271_op_configure_filter,
5944	.tx = wl1271_op_tx,
5945	.wake_tx_queue = ieee80211_handle_wake_tx_queue,
5946	.set_key = wlcore_op_set_key,
5947	.hw_scan = wl1271_op_hw_scan,
5948	.cancel_hw_scan = wl1271_op_cancel_hw_scan,
5949	.sched_scan_start = wl1271_op_sched_scan_start,
5950	.sched_scan_stop = wl1271_op_sched_scan_stop,
5951	.bss_info_changed = wl1271_op_bss_info_changed,
5952	.set_frag_threshold = wl1271_op_set_frag_threshold,
5953	.set_rts_threshold = wl1271_op_set_rts_threshold,
5954	.conf_tx = wl1271_op_conf_tx,
5955	.get_tsf = wl1271_op_get_tsf,
5956	.get_survey = wl1271_op_get_survey,
5957	.sta_state = wl12xx_op_sta_state,
5958	.ampdu_action = wl1271_op_ampdu_action,
5959	.tx_frames_pending = wl1271_tx_frames_pending,
5960	.set_bitrate_mask = wl12xx_set_bitrate_mask,
5961	.set_default_unicast_key = wl1271_op_set_default_key_idx,
5962	.channel_switch = wl12xx_op_channel_switch,
5963	.channel_switch_beacon = wlcore_op_channel_switch_beacon,
5964	.flush = wlcore_op_flush,
5965	.remain_on_channel = wlcore_op_remain_on_channel,
5966	.cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5967	.add_chanctx = wlcore_op_add_chanctx,
5968	.remove_chanctx = wlcore_op_remove_chanctx,
5969	.change_chanctx = wlcore_op_change_chanctx,
5970	.assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5971	.unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5972	.switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
5973	.sta_rc_update = wlcore_op_sta_rc_update,
5974	.sta_statistics = wlcore_op_sta_statistics,
5975	.get_expected_throughput = wlcore_op_get_expected_throughput,
5976	CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5977};
5978
5979
5980u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
5981{
5982	u8 idx;
5983
5984	BUG_ON(band >= 2);
5985
5986	if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5987		wl1271_error("Illegal RX rate from HW: %d", rate);
5988		return 0;
5989	}
5990
5991	idx = wl->band_rate_to_idx[band][rate];
5992	if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5993		wl1271_error("Unsupported RX rate from HW: %d", rate);
5994		return 0;
5995	}
5996
5997	return idx;
5998}
5999
6000static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
6001{
6002	int i;
6003
6004	wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
6005		     oui, nic);
6006
6007	if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
6008		wl1271_warning("NIC part of the MAC address wraps around!");
6009
6010	for (i = 0; i < wl->num_mac_addr; i++) {
6011		wl->addresses[i].addr[0] = (u8)(oui >> 16);
6012		wl->addresses[i].addr[1] = (u8)(oui >> 8);
6013		wl->addresses[i].addr[2] = (u8) oui;
6014		wl->addresses[i].addr[3] = (u8)(nic >> 16);
6015		wl->addresses[i].addr[4] = (u8)(nic >> 8);
6016		wl->addresses[i].addr[5] = (u8) nic;
6017		nic++;
6018	}
6019
6020	/* we may be one address short at the most */
6021	WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
6022
6023	/*
6024	 * turn on the LAA bit in the first address and use it as
6025	 * the last address.
6026	 */
6027	if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
6028		int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
6029		memcpy(&wl->addresses[idx], &wl->addresses[0],
6030		       sizeof(wl->addresses[0]));
6031		/* LAA bit */
6032		wl->addresses[idx].addr[0] |= BIT(1);
6033	}
6034
6035	wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
6036	wl->hw->wiphy->addresses = wl->addresses;
6037}
6038
6039static int wl12xx_get_hw_info(struct wl1271 *wl)
6040{
6041	int ret;
6042
6043	ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
6044	if (ret < 0)
6045		goto out;
6046
6047	wl->fuse_oui_addr = 0;
6048	wl->fuse_nic_addr = 0;
6049
6050	ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6051	if (ret < 0)
6052		goto out;
6053
6054	if (wl->ops->get_mac)
6055		ret = wl->ops->get_mac(wl);
6056
6057out:
6058	return ret;
6059}
6060
6061static int wl1271_register_hw(struct wl1271 *wl)
6062{
6063	int ret;
6064	u32 oui_addr = 0, nic_addr = 0;
6065	struct platform_device *pdev = wl->pdev;
6066	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6067
6068	if (wl->mac80211_registered)
6069		return 0;
6070
6071	if (wl->nvs_len >= 12) {
6072		/* NOTE: The wl->nvs->nvs element must be first, in
6073		 * order to simplify the casting, we assume it is at
6074		 * the beginning of the wl->nvs structure.
6075		 */
6076		u8 *nvs_ptr = (u8 *)wl->nvs;
6077
6078		oui_addr =
6079			(nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6080		nic_addr =
6081			(nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6082	}
6083
6084	/* if the MAC address is zeroed in the NVS derive from fuse */
6085	if (oui_addr == 0 && nic_addr == 0) {
6086		oui_addr = wl->fuse_oui_addr;
6087		/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6088		nic_addr = wl->fuse_nic_addr + 1;
6089	}
6090
6091	if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
6092		wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
6093		if (!strcmp(pdev_data->family->name, "wl18xx")) {
6094			wl1271_warning("This default nvs file can be removed from the file system");
6095		} else {
6096			wl1271_warning("Your device performance is not optimized.");
6097			wl1271_warning("Please use the calibrator tool to configure your device.");
6098		}
6099
6100		if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
6101			wl1271_warning("Fuse mac address is zero. using random mac");
6102			/* Use TI oui and a random nic */
6103			oui_addr = WLCORE_TI_OUI_ADDRESS;
6104			nic_addr = get_random_u32();
6105		} else {
6106			oui_addr = wl->fuse_oui_addr;
6107			/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6108			nic_addr = wl->fuse_nic_addr + 1;
6109		}
6110	}
6111
6112	wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6113
6114	ret = ieee80211_register_hw(wl->hw);
6115	if (ret < 0) {
6116		wl1271_error("unable to register mac80211 hw: %d", ret);
6117		goto out;
6118	}
6119
6120	wl->mac80211_registered = true;
6121
6122	wl1271_debugfs_init(wl);
6123
6124	wl1271_notice("loaded");
6125
6126out:
6127	return ret;
6128}
6129
6130static void wl1271_unregister_hw(struct wl1271 *wl)
6131{
6132	if (wl->plt)
6133		wl1271_plt_stop(wl);
6134
6135	ieee80211_unregister_hw(wl->hw);
6136	wl->mac80211_registered = false;
6137
6138}
6139
6140static int wl1271_init_ieee80211(struct wl1271 *wl)
6141{
6142	int i;
6143	static const u32 cipher_suites[] = {
6144		WLAN_CIPHER_SUITE_WEP40,
6145		WLAN_CIPHER_SUITE_WEP104,
6146		WLAN_CIPHER_SUITE_TKIP,
6147		WLAN_CIPHER_SUITE_CCMP,
6148		WL1271_CIPHER_SUITE_GEM,
6149	};
6150
6151	/* The tx descriptor buffer */
6152	wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6153
6154	if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6155		wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6156
6157	/* unit us */
6158	/* FIXME: find a proper value */
6159	wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6160
6161	ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6162	ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6163	ieee80211_hw_set(wl->hw, SUPPORTS_PER_STA_GTK);
6164	ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6165	ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6166	ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6167	ieee80211_hw_set(wl->hw, AP_LINK_PS);
6168	ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6169	ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6170	ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6171	ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6172	ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6173	ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6174	ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6175	ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
6176
6177	wl->hw->wiphy->cipher_suites = cipher_suites;
6178	wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6179
6180	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6181					 BIT(NL80211_IFTYPE_AP) |
6182					 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6183					 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6184#ifdef CONFIG_MAC80211_MESH
6185					 BIT(NL80211_IFTYPE_MESH_POINT) |
6186#endif
6187					 BIT(NL80211_IFTYPE_P2P_GO);
6188
6189	wl->hw->wiphy->max_scan_ssids = 1;
6190	wl->hw->wiphy->max_sched_scan_ssids = 16;
6191	wl->hw->wiphy->max_match_sets = 16;
6192	/*
6193	 * Maximum length of elements in scanning probe request templates
6194	 * should be the maximum length possible for a template, without
6195	 * the IEEE80211 header of the template
6196	 */
6197	wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6198			sizeof(struct ieee80211_header);
6199
6200	wl->hw->wiphy->max_sched_scan_reqs = 1;
6201	wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6202		sizeof(struct ieee80211_header);
6203
6204	wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6205
6206	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6207				WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6208				WIPHY_FLAG_HAS_CHANNEL_SWITCH |
6209				WIPHY_FLAG_IBSS_RSN;
6210
6211	wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
6212
6213	/* make sure all our channels fit in the scanned_ch bitmask */
6214	BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6215		     ARRAY_SIZE(wl1271_channels_5ghz) >
6216		     WL1271_MAX_CHANNELS);
6217	/*
6218	* clear channel flags from the previous usage
6219	* and restore max_power & max_antenna_gain values.
6220	*/
6221	for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6222		wl1271_band_2ghz.channels[i].flags = 0;
6223		wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6224		wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6225	}
6226
6227	for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6228		wl1271_band_5ghz.channels[i].flags = 0;
6229		wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6230		wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6231	}
6232
6233	/*
6234	 * We keep local copies of the band structs because we need to
6235	 * modify them on a per-device basis.
6236	 */
6237	memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6238	       sizeof(wl1271_band_2ghz));
6239	memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6240	       &wl->ht_cap[NL80211_BAND_2GHZ],
6241	       sizeof(*wl->ht_cap));
6242	memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6243	       sizeof(wl1271_band_5ghz));
6244	memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6245	       &wl->ht_cap[NL80211_BAND_5GHZ],
6246	       sizeof(*wl->ht_cap));
6247
6248	wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6249		&wl->bands[NL80211_BAND_2GHZ];
6250	wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6251		&wl->bands[NL80211_BAND_5GHZ];
6252
6253	/*
6254	 * allow 4 queues per mac address we support +
6255	 * 1 cab queue per mac + one global offchannel Tx queue
6256	 */
6257	wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6258
6259	/* the last queue is the offchannel queue */
6260	wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6261	wl->hw->max_rates = 1;
6262
6263	wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6264
6265	/* the FW answers probe-requests in AP-mode */
6266	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6267	wl->hw->wiphy->probe_resp_offload =
6268		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6269		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6270		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6271
6272	/* allowed interface combinations */
6273	wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6274	wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6275
6276	/* register vendor commands */
6277	wlcore_set_vendor_commands(wl->hw->wiphy);
6278
6279	SET_IEEE80211_DEV(wl->hw, wl->dev);
6280
6281	wl->hw->sta_data_size = sizeof(struct wl1271_station);
6282	wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6283
6284	wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6285
6286	return 0;
6287}
6288
6289struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6290				     u32 mbox_size)
6291{
6292	struct ieee80211_hw *hw;
6293	struct wl1271 *wl;
6294	int i, j, ret;
6295	unsigned int order;
6296
6297	hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6298	if (!hw) {
6299		wl1271_error("could not alloc ieee80211_hw");
6300		ret = -ENOMEM;
6301		goto err_hw_alloc;
6302	}
6303
6304	wl = hw->priv;
6305	memset(wl, 0, sizeof(*wl));
6306
6307	wl->priv = kzalloc(priv_size, GFP_KERNEL);
6308	if (!wl->priv) {
6309		wl1271_error("could not alloc wl priv");
6310		ret = -ENOMEM;
6311		goto err_priv_alloc;
6312	}
6313
6314	INIT_LIST_HEAD(&wl->wlvif_list);
6315
6316	wl->hw = hw;
6317
6318	/*
6319	 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6320	 * we don't allocate any additional resource here, so that's fine.
6321	 */
6322	for (i = 0; i < NUM_TX_QUEUES; i++)
6323		for (j = 0; j < WLCORE_MAX_LINKS; j++)
6324			skb_queue_head_init(&wl->links[j].tx_queue[i]);
6325
6326	skb_queue_head_init(&wl->deferred_rx_queue);
6327	skb_queue_head_init(&wl->deferred_tx_queue);
6328
6329	INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6330	INIT_WORK(&wl->tx_work, wl1271_tx_work);
6331	INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6332	INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6333	INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6334	INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6335
6336	wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6337	if (!wl->freezable_wq) {
6338		ret = -ENOMEM;
6339		goto err_hw;
6340	}
6341
6342	wl->channel = 0;
6343	wl->rx_counter = 0;
6344	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6345	wl->band = NL80211_BAND_2GHZ;
6346	wl->channel_type = NL80211_CHAN_NO_HT;
6347	wl->flags = 0;
6348	wl->sg_enabled = true;
6349	wl->sleep_auth = WL1271_PSM_ILLEGAL;
6350	wl->recovery_count = 0;
6351	wl->hw_pg_ver = -1;
6352	wl->ap_ps_map = 0;
6353	wl->ap_fw_ps_map = 0;
6354	wl->quirks = 0;
6355	wl->system_hlid = WL12XX_SYSTEM_HLID;
6356	wl->active_sta_count = 0;
6357	wl->active_link_count = 0;
6358	wl->fwlog_size = 0;
6359
6360	/* The system link is always allocated */
6361	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6362
6363	memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6364	for (i = 0; i < wl->num_tx_desc; i++)
6365		wl->tx_frames[i] = NULL;
6366
6367	spin_lock_init(&wl->wl_lock);
6368
6369	wl->state = WLCORE_STATE_OFF;
6370	wl->fw_type = WL12XX_FW_TYPE_NONE;
6371	mutex_init(&wl->mutex);
6372	mutex_init(&wl->flush_mutex);
6373	init_completion(&wl->nvs_loading_complete);
6374
6375	order = get_order(aggr_buf_size);
6376	wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6377	if (!wl->aggr_buf) {
6378		ret = -ENOMEM;
6379		goto err_wq;
6380	}
6381	wl->aggr_buf_size = aggr_buf_size;
6382
6383	wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6384	if (!wl->dummy_packet) {
6385		ret = -ENOMEM;
6386		goto err_aggr;
6387	}
6388
6389	/* Allocate one page for the FW log */
6390	wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6391	if (!wl->fwlog) {
6392		ret = -ENOMEM;
6393		goto err_dummy_packet;
6394	}
6395
6396	wl->mbox_size = mbox_size;
6397	wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6398	if (!wl->mbox) {
6399		ret = -ENOMEM;
6400		goto err_fwlog;
6401	}
6402
6403	wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6404	if (!wl->buffer_32) {
6405		ret = -ENOMEM;
6406		goto err_mbox;
6407	}
6408
6409	return hw;
6410
6411err_mbox:
6412	kfree(wl->mbox);
6413
6414err_fwlog:
6415	free_page((unsigned long)wl->fwlog);
6416
6417err_dummy_packet:
6418	dev_kfree_skb(wl->dummy_packet);
6419
6420err_aggr:
6421	free_pages((unsigned long)wl->aggr_buf, order);
6422
6423err_wq:
6424	destroy_workqueue(wl->freezable_wq);
6425
6426err_hw:
6427	wl1271_debugfs_exit(wl);
6428	kfree(wl->priv);
6429
6430err_priv_alloc:
6431	ieee80211_free_hw(hw);
6432
6433err_hw_alloc:
6434
6435	return ERR_PTR(ret);
6436}
6437EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6438
6439int wlcore_free_hw(struct wl1271 *wl)
6440{
6441	/* Unblock any fwlog readers */
6442	mutex_lock(&wl->mutex);
6443	wl->fwlog_size = -1;
6444	mutex_unlock(&wl->mutex);
6445
6446	wlcore_sysfs_free(wl);
6447
6448	kfree(wl->buffer_32);
6449	kfree(wl->mbox);
6450	free_page((unsigned long)wl->fwlog);
6451	dev_kfree_skb(wl->dummy_packet);
6452	free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6453
6454	wl1271_debugfs_exit(wl);
6455
6456	vfree(wl->fw);
6457	wl->fw = NULL;
6458	wl->fw_type = WL12XX_FW_TYPE_NONE;
6459	kfree(wl->nvs);
6460	wl->nvs = NULL;
6461
6462	kfree(wl->raw_fw_status);
6463	kfree(wl->fw_status);
6464	kfree(wl->tx_res_if);
6465	destroy_workqueue(wl->freezable_wq);
6466
6467	kfree(wl->priv);
6468	ieee80211_free_hw(wl->hw);
6469
6470	return 0;
6471}
6472EXPORT_SYMBOL_GPL(wlcore_free_hw);
6473
6474#ifdef CONFIG_PM
6475static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6476	.flags = WIPHY_WOWLAN_ANY,
6477	.n_patterns = WL1271_MAX_RX_FILTERS,
6478	.pattern_min_len = 1,
6479	.pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6480};
6481#endif
6482
6483static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6484{
6485	return IRQ_WAKE_THREAD;
6486}
6487
6488static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6489{
6490	struct wl1271 *wl = context;
6491	struct platform_device *pdev = wl->pdev;
6492	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6493	struct resource *res;
6494
6495	int ret;
6496	irq_handler_t hardirq_fn = NULL;
6497
6498	if (fw) {
6499		wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6500		if (!wl->nvs) {
6501			wl1271_error("Could not allocate nvs data");
6502			goto out;
6503		}
6504		wl->nvs_len = fw->size;
6505	} else if (pdev_data->family->nvs_name) {
6506		wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6507			     pdev_data->family->nvs_name);
6508		wl->nvs = NULL;
6509		wl->nvs_len = 0;
6510	} else {
6511		wl->nvs = NULL;
6512		wl->nvs_len = 0;
6513	}
6514
6515	ret = wl->ops->setup(wl);
6516	if (ret < 0)
6517		goto out_free_nvs;
6518
6519	BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6520
6521	/* adjust some runtime configuration parameters */
6522	wlcore_adjust_conf(wl);
6523
6524	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6525	if (!res) {
6526		wl1271_error("Could not get IRQ resource");
6527		goto out_free_nvs;
6528	}
6529
6530	wl->irq = res->start;
6531	wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6532	wl->if_ops = pdev_data->if_ops;
6533
6534	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6535		hardirq_fn = wlcore_hardirq;
6536	else
6537		wl->irq_flags |= IRQF_ONESHOT;
6538
6539	ret = wl12xx_set_power_on(wl);
6540	if (ret < 0)
6541		goto out_free_nvs;
6542
6543	ret = wl12xx_get_hw_info(wl);
6544	if (ret < 0) {
6545		wl1271_error("couldn't get hw info");
6546		wl1271_power_off(wl);
6547		goto out_free_nvs;
6548	}
6549
6550	ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6551				   wl->irq_flags, pdev->name, wl);
6552	if (ret < 0) {
6553		wl1271_error("interrupt configuration failed");
6554		wl1271_power_off(wl);
6555		goto out_free_nvs;
6556	}
6557
6558#ifdef CONFIG_PM
6559	device_init_wakeup(wl->dev, true);
6560
6561	ret = enable_irq_wake(wl->irq);
6562	if (!ret) {
6563		wl->irq_wake_enabled = true;
6564		if (pdev_data->pwr_in_suspend)
6565			wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6566	}
6567
6568	res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
6569	if (res) {
6570		wl->wakeirq = res->start;
6571		wl->wakeirq_flags = res->flags & IRQF_TRIGGER_MASK;
6572		ret = dev_pm_set_dedicated_wake_irq(wl->dev, wl->wakeirq);
6573		if (ret)
6574			wl->wakeirq = -ENODEV;
6575	} else {
6576		wl->wakeirq = -ENODEV;
6577	}
6578#endif
6579	disable_irq(wl->irq);
6580	wl1271_power_off(wl);
6581
6582	ret = wl->ops->identify_chip(wl);
6583	if (ret < 0)
6584		goto out_irq;
6585
6586	ret = wl1271_init_ieee80211(wl);
6587	if (ret)
6588		goto out_irq;
6589
6590	ret = wl1271_register_hw(wl);
6591	if (ret)
6592		goto out_irq;
6593
6594	ret = wlcore_sysfs_init(wl);
6595	if (ret)
6596		goto out_unreg;
6597
6598	wl->initialized = true;
6599	goto out;
6600
6601out_unreg:
6602	wl1271_unregister_hw(wl);
6603
6604out_irq:
6605	if (wl->wakeirq >= 0)
6606		dev_pm_clear_wake_irq(wl->dev);
6607	device_init_wakeup(wl->dev, false);
6608	free_irq(wl->irq, wl);
6609
6610out_free_nvs:
6611	kfree(wl->nvs);
6612
6613out:
6614	release_firmware(fw);
6615	complete_all(&wl->nvs_loading_complete);
6616}
6617
6618static int __maybe_unused wlcore_runtime_suspend(struct device *dev)
6619{
6620	struct wl1271 *wl = dev_get_drvdata(dev);
6621	struct wl12xx_vif *wlvif;
6622	int error;
6623
6624	/* We do not enter elp sleep in PLT mode */
6625	if (wl->plt)
6626		return 0;
6627
6628	/* Nothing to do if no ELP mode requested */
6629	if (wl->sleep_auth != WL1271_PSM_ELP)
6630		return 0;
6631
6632	wl12xx_for_each_wlvif(wl, wlvif) {
6633		if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
6634		    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
6635			return -EBUSY;
6636	}
6637
6638	wl1271_debug(DEBUG_PSM, "chip to elp");
6639	error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
6640	if (error < 0) {
6641		wl12xx_queue_recovery_work(wl);
6642
6643		return error;
6644	}
6645
6646	set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6647
6648	return 0;
6649}
6650
6651static int __maybe_unused wlcore_runtime_resume(struct device *dev)
6652{
6653	struct wl1271 *wl = dev_get_drvdata(dev);
6654	DECLARE_COMPLETION_ONSTACK(compl);
6655	unsigned long flags;
6656	int ret;
6657	unsigned long start_time = jiffies;
6658	bool recovery = false;
6659
6660	/* Nothing to do if no ELP mode requested */
6661	if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
6662		return 0;
6663
6664	wl1271_debug(DEBUG_PSM, "waking up chip from elp");
6665
6666	spin_lock_irqsave(&wl->wl_lock, flags);
6667	wl->elp_compl = &compl;
6668	spin_unlock_irqrestore(&wl->wl_lock, flags);
6669
6670	ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
6671	if (ret < 0) {
6672		recovery = true;
6673	} else if (!test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags)) {
6674		ret = wait_for_completion_timeout(&compl,
6675			msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
6676		if (ret == 0) {
6677			wl1271_warning("ELP wakeup timeout!");
6678			recovery = true;
6679		}
6680	}
6681
6682	spin_lock_irqsave(&wl->wl_lock, flags);
6683	wl->elp_compl = NULL;
6684	spin_unlock_irqrestore(&wl->wl_lock, flags);
6685	clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6686
6687	if (recovery) {
6688		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
6689		wl12xx_queue_recovery_work(wl);
6690	} else {
6691		wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
6692			     jiffies_to_msecs(jiffies - start_time));
6693	}
6694
6695	return 0;
6696}
6697
6698static const struct dev_pm_ops wlcore_pm_ops = {
6699	SET_RUNTIME_PM_OPS(wlcore_runtime_suspend,
6700			   wlcore_runtime_resume,
6701			   NULL)
6702};
6703
6704int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6705{
6706	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6707	const char *nvs_name;
6708	int ret = 0;
6709
6710	if (!wl->ops || !wl->ptable || !pdev_data)
6711		return -EINVAL;
6712
6713	wl->dev = &pdev->dev;
6714	wl->pdev = pdev;
6715	platform_set_drvdata(pdev, wl);
6716
6717	if (pdev_data->family && pdev_data->family->nvs_name) {
6718		nvs_name = pdev_data->family->nvs_name;
6719		ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
6720					      nvs_name, &pdev->dev, GFP_KERNEL,
6721					      wl, wlcore_nvs_cb);
6722		if (ret < 0) {
6723			wl1271_error("request_firmware_nowait failed for %s: %d",
6724				     nvs_name, ret);
6725			complete_all(&wl->nvs_loading_complete);
6726		}
6727	} else {
6728		wlcore_nvs_cb(NULL, wl);
6729	}
6730
6731	wl->dev->driver->pm = &wlcore_pm_ops;
6732	pm_runtime_set_autosuspend_delay(wl->dev, 50);
6733	pm_runtime_use_autosuspend(wl->dev);
6734	pm_runtime_enable(wl->dev);
6735
6736	return ret;
6737}
6738EXPORT_SYMBOL_GPL(wlcore_probe);
6739
6740int wlcore_remove(struct platform_device *pdev)
6741{
6742	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6743	struct wl1271 *wl = platform_get_drvdata(pdev);
6744	int error;
6745
6746	error = pm_runtime_get_sync(wl->dev);
6747	if (error < 0)
6748		dev_warn(wl->dev, "PM runtime failed: %i\n", error);
6749
6750	wl->dev->driver->pm = NULL;
6751
6752	if (pdev_data->family && pdev_data->family->nvs_name)
6753		wait_for_completion(&wl->nvs_loading_complete);
6754	if (!wl->initialized)
6755		return 0;
6756
6757	if (wl->wakeirq >= 0) {
6758		dev_pm_clear_wake_irq(wl->dev);
6759		wl->wakeirq = -ENODEV;
6760	}
6761
6762	device_init_wakeup(wl->dev, false);
6763
6764	if (wl->irq_wake_enabled)
6765		disable_irq_wake(wl->irq);
6766
6767	wl1271_unregister_hw(wl);
6768
6769	pm_runtime_put_sync(wl->dev);
6770	pm_runtime_dont_use_autosuspend(wl->dev);
6771	pm_runtime_disable(wl->dev);
6772
6773	free_irq(wl->irq, wl);
6774	wlcore_free_hw(wl);
6775
6776	return 0;
6777}
6778EXPORT_SYMBOL_GPL(wlcore_remove);
6779
6780u32 wl12xx_debug_level = DEBUG_NONE;
6781EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6782module_param_named(debug_level, wl12xx_debug_level, uint, 0600);
6783MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6784
6785module_param_named(fwlog, fwlog_param, charp, 0);
6786MODULE_PARM_DESC(fwlog,
6787		 "FW logger options: continuous, dbgpins or disable");
6788
6789module_param(fwlog_mem_blocks, int, 0600);
6790MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6791
6792module_param(bug_on_recovery, int, 0600);
6793MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6794
6795module_param(no_recovery, int, 0600);
6796MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6797
6798MODULE_LICENSE("GPL");
6799MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6800MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * This file is part of wlcore
   4 *
   5 * Copyright (C) 2008-2010 Nokia Corporation
   6 * Copyright (C) 2011-2013 Texas Instruments Inc.
   7 */
   8
   9#include <linux/module.h>
  10#include <linux/firmware.h>
  11#include <linux/etherdevice.h>
  12#include <linux/vmalloc.h>
  13#include <linux/interrupt.h>
  14#include <linux/irq.h>
  15#include <linux/pm_runtime.h>
  16#include <linux/pm_wakeirq.h>
  17
  18#include "wlcore.h"
  19#include "debug.h"
  20#include "wl12xx_80211.h"
  21#include "io.h"
  22#include "tx.h"
  23#include "ps.h"
  24#include "init.h"
  25#include "debugfs.h"
  26#include "testmode.h"
  27#include "vendor_cmd.h"
  28#include "scan.h"
  29#include "hw_ops.h"
  30#include "sysfs.h"
  31
  32#define WL1271_BOOT_RETRIES 3
  33#define WL1271_SUSPEND_SLEEP 100
  34#define WL1271_WAKEUP_TIMEOUT 500
  35
  36static char *fwlog_param;
  37static int fwlog_mem_blocks = -1;
  38static int bug_on_recovery = -1;
  39static int no_recovery     = -1;
  40
  41static void __wl1271_op_remove_interface(struct wl1271 *wl,
  42					 struct ieee80211_vif *vif,
  43					 bool reset_tx_queues);
  44static void wlcore_op_stop_locked(struct wl1271 *wl);
  45static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
  46
  47static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
  48{
  49	int ret;
  50
  51	if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
  52		return -EINVAL;
  53
  54	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
  55		return 0;
  56
  57	if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
  58		return 0;
  59
  60	ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
  61	if (ret < 0)
  62		return ret;
  63
  64	wl1271_info("Association completed.");
  65	return 0;
  66}
  67
  68static void wl1271_reg_notify(struct wiphy *wiphy,
  69			      struct regulatory_request *request)
  70{
  71	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
  72	struct wl1271 *wl = hw->priv;
  73
  74	/* copy the current dfs region */
  75	if (request)
  76		wl->dfs_region = request->dfs_region;
  77
  78	wlcore_regdomain_config(wl);
  79}
  80
  81static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
  82				   bool enable)
  83{
  84	int ret = 0;
  85
  86	/* we should hold wl->mutex */
  87	ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
  88	if (ret < 0)
  89		goto out;
  90
  91	if (enable)
  92		set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
  93	else
  94		clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
  95out:
  96	return ret;
  97}
  98
  99/*
 100 * this function is being called when the rx_streaming interval
 101 * has beed changed or rx_streaming should be disabled
 102 */
 103int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
 104{
 105	int ret = 0;
 106	int period = wl->conf.rx_streaming.interval;
 107
 108	/* don't reconfigure if rx_streaming is disabled */
 109	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
 110		goto out;
 111
 112	/* reconfigure/disable according to new streaming_period */
 113	if (period &&
 114	    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
 115	    (wl->conf.rx_streaming.always ||
 116	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
 117		ret = wl1271_set_rx_streaming(wl, wlvif, true);
 118	else {
 119		ret = wl1271_set_rx_streaming(wl, wlvif, false);
 120		/* don't cancel_work_sync since we might deadlock */
 121		del_timer_sync(&wlvif->rx_streaming_timer);
 122	}
 123out:
 124	return ret;
 125}
 126
 127static void wl1271_rx_streaming_enable_work(struct work_struct *work)
 128{
 129	int ret;
 130	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
 131						rx_streaming_enable_work);
 132	struct wl1271 *wl = wlvif->wl;
 133
 134	mutex_lock(&wl->mutex);
 135
 136	if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
 137	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
 138	    (!wl->conf.rx_streaming.always &&
 139	     !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
 140		goto out;
 141
 142	if (!wl->conf.rx_streaming.interval)
 143		goto out;
 144
 145	ret = pm_runtime_get_sync(wl->dev);
 146	if (ret < 0) {
 147		pm_runtime_put_noidle(wl->dev);
 148		goto out;
 149	}
 150
 151	ret = wl1271_set_rx_streaming(wl, wlvif, true);
 152	if (ret < 0)
 153		goto out_sleep;
 154
 155	/* stop it after some time of inactivity */
 156	mod_timer(&wlvif->rx_streaming_timer,
 157		  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
 158
 159out_sleep:
 160	pm_runtime_mark_last_busy(wl->dev);
 161	pm_runtime_put_autosuspend(wl->dev);
 162out:
 163	mutex_unlock(&wl->mutex);
 164}
 165
 166static void wl1271_rx_streaming_disable_work(struct work_struct *work)
 167{
 168	int ret;
 169	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
 170						rx_streaming_disable_work);
 171	struct wl1271 *wl = wlvif->wl;
 172
 173	mutex_lock(&wl->mutex);
 174
 175	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
 176		goto out;
 177
 178	ret = pm_runtime_get_sync(wl->dev);
 179	if (ret < 0) {
 180		pm_runtime_put_noidle(wl->dev);
 181		goto out;
 182	}
 183
 184	ret = wl1271_set_rx_streaming(wl, wlvif, false);
 185	if (ret)
 186		goto out_sleep;
 187
 188out_sleep:
 189	pm_runtime_mark_last_busy(wl->dev);
 190	pm_runtime_put_autosuspend(wl->dev);
 191out:
 192	mutex_unlock(&wl->mutex);
 193}
 194
 195static void wl1271_rx_streaming_timer(struct timer_list *t)
 196{
 197	struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
 198	struct wl1271 *wl = wlvif->wl;
 199	ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
 200}
 201
 202/* wl->mutex must be taken */
 203void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
 204{
 205	/* if the watchdog is not armed, don't do anything */
 206	if (wl->tx_allocated_blocks == 0)
 207		return;
 208
 209	cancel_delayed_work(&wl->tx_watchdog_work);
 210	ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
 211		msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
 212}
 213
 214static void wlcore_rc_update_work(struct work_struct *work)
 215{
 216	int ret;
 217	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
 218						rc_update_work);
 219	struct wl1271 *wl = wlvif->wl;
 220	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
 221
 222	mutex_lock(&wl->mutex);
 223
 224	if (unlikely(wl->state != WLCORE_STATE_ON))
 225		goto out;
 226
 227	ret = pm_runtime_get_sync(wl->dev);
 228	if (ret < 0) {
 229		pm_runtime_put_noidle(wl->dev);
 230		goto out;
 231	}
 232
 233	if (ieee80211_vif_is_mesh(vif)) {
 234		ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
 235						     true, wlvif->sta.hlid);
 236		if (ret < 0)
 237			goto out_sleep;
 238	} else {
 239		wlcore_hw_sta_rc_update(wl, wlvif);
 240	}
 241
 242out_sleep:
 243	pm_runtime_mark_last_busy(wl->dev);
 244	pm_runtime_put_autosuspend(wl->dev);
 245out:
 246	mutex_unlock(&wl->mutex);
 247}
 248
 249static void wl12xx_tx_watchdog_work(struct work_struct *work)
 250{
 251	struct delayed_work *dwork;
 252	struct wl1271 *wl;
 253
 254	dwork = to_delayed_work(work);
 255	wl = container_of(dwork, struct wl1271, tx_watchdog_work);
 256
 257	mutex_lock(&wl->mutex);
 258
 259	if (unlikely(wl->state != WLCORE_STATE_ON))
 260		goto out;
 261
 262	/* Tx went out in the meantime - everything is ok */
 263	if (unlikely(wl->tx_allocated_blocks == 0))
 264		goto out;
 265
 266	/*
 267	 * if a ROC is in progress, we might not have any Tx for a long
 268	 * time (e.g. pending Tx on the non-ROC channels)
 269	 */
 270	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
 271		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
 272			     wl->conf.tx.tx_watchdog_timeout);
 273		wl12xx_rearm_tx_watchdog_locked(wl);
 274		goto out;
 275	}
 276
 277	/*
 278	 * if a scan is in progress, we might not have any Tx for a long
 279	 * time
 280	 */
 281	if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
 282		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
 283			     wl->conf.tx.tx_watchdog_timeout);
 284		wl12xx_rearm_tx_watchdog_locked(wl);
 285		goto out;
 286	}
 287
 288	/*
 289	* AP might cache a frame for a long time for a sleeping station,
 290	* so rearm the timer if there's an AP interface with stations. If
 291	* Tx is genuinely stuck we will most hopefully discover it when all
 292	* stations are removed due to inactivity.
 293	*/
 294	if (wl->active_sta_count) {
 295		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
 296			     " %d stations",
 297			      wl->conf.tx.tx_watchdog_timeout,
 298			      wl->active_sta_count);
 299		wl12xx_rearm_tx_watchdog_locked(wl);
 300		goto out;
 301	}
 302
 303	wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
 304		     wl->conf.tx.tx_watchdog_timeout);
 305	wl12xx_queue_recovery_work(wl);
 306
 307out:
 308	mutex_unlock(&wl->mutex);
 309}
 310
 311static void wlcore_adjust_conf(struct wl1271 *wl)
 312{
 313
 314	if (fwlog_param) {
 315		if (!strcmp(fwlog_param, "continuous")) {
 316			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
 317			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
 318		} else if (!strcmp(fwlog_param, "dbgpins")) {
 319			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
 320			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
 321		} else if (!strcmp(fwlog_param, "disable")) {
 322			wl->conf.fwlog.mem_blocks = 0;
 323			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
 324		} else {
 325			wl1271_error("Unknown fwlog parameter %s", fwlog_param);
 326		}
 327	}
 328
 329	if (bug_on_recovery != -1)
 330		wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
 331
 332	if (no_recovery != -1)
 333		wl->conf.recovery.no_recovery = (u8) no_recovery;
 334}
 335
 336static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
 337					struct wl12xx_vif *wlvif,
 338					u8 hlid, u8 tx_pkts)
 339{
 340	bool fw_ps;
 341
 342	fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
 343
 344	/*
 345	 * Wake up from high level PS if the STA is asleep with too little
 346	 * packets in FW or if the STA is awake.
 347	 */
 348	if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
 349		wl12xx_ps_link_end(wl, wlvif, hlid);
 350
 351	/*
 352	 * Start high-level PS if the STA is asleep with enough blocks in FW.
 353	 * Make an exception if this is the only connected link. In this
 354	 * case FW-memory congestion is less of a problem.
 355	 * Note that a single connected STA means 2*ap_count + 1 active links,
 356	 * since we must account for the global and broadcast AP links
 357	 * for each AP. The "fw_ps" check assures us the other link is a STA
 358	 * connected to the AP. Otherwise the FW would not set the PSM bit.
 359	 */
 360	else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
 361		 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
 362		wl12xx_ps_link_start(wl, wlvif, hlid, true);
 363}
 364
 365static void wl12xx_irq_update_links_status(struct wl1271 *wl,
 366					   struct wl12xx_vif *wlvif,
 367					   struct wl_fw_status *status)
 368{
 369	unsigned long cur_fw_ps_map;
 370	u8 hlid;
 371
 372	cur_fw_ps_map = status->link_ps_bitmap;
 373	if (wl->ap_fw_ps_map != cur_fw_ps_map) {
 374		wl1271_debug(DEBUG_PSM,
 375			     "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
 376			     wl->ap_fw_ps_map, cur_fw_ps_map,
 377			     wl->ap_fw_ps_map ^ cur_fw_ps_map);
 378
 379		wl->ap_fw_ps_map = cur_fw_ps_map;
 380	}
 381
 382	for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
 383		wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
 384					    wl->links[hlid].allocated_pkts);
 385}
 386
 387static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
 388{
 389	struct wl12xx_vif *wlvif;
 390	u32 old_tx_blk_count = wl->tx_blocks_available;
 391	int avail, freed_blocks;
 392	int i;
 393	int ret;
 394	struct wl1271_link *lnk;
 395
 396	ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
 397				   wl->raw_fw_status,
 398				   wl->fw_status_len, false);
 399	if (ret < 0)
 400		return ret;
 401
 402	wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
 403
 404	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
 405		     "drv_rx_counter = %d, tx_results_counter = %d)",
 406		     status->intr,
 407		     status->fw_rx_counter,
 408		     status->drv_rx_counter,
 409		     status->tx_results_counter);
 410
 411	for (i = 0; i < NUM_TX_QUEUES; i++) {
 412		/* prevent wrap-around in freed-packets counter */
 413		wl->tx_allocated_pkts[i] -=
 414				(status->counters.tx_released_pkts[i] -
 415				wl->tx_pkts_freed[i]) & 0xff;
 416
 417		wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
 418	}
 419
 420
 421	for_each_set_bit(i, wl->links_map, wl->num_links) {
 422		u8 diff;
 423		lnk = &wl->links[i];
 424
 425		/* prevent wrap-around in freed-packets counter */
 426		diff = (status->counters.tx_lnk_free_pkts[i] -
 427		       lnk->prev_freed_pkts) & 0xff;
 428
 429		if (diff == 0)
 430			continue;
 431
 432		lnk->allocated_pkts -= diff;
 433		lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
 434
 435		/* accumulate the prev_freed_pkts counter */
 436		lnk->total_freed_pkts += diff;
 437	}
 438
 439	/* prevent wrap-around in total blocks counter */
 440	if (likely(wl->tx_blocks_freed <= status->total_released_blks))
 441		freed_blocks = status->total_released_blks -
 442			       wl->tx_blocks_freed;
 443	else
 444		freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
 445			       status->total_released_blks;
 446
 447	wl->tx_blocks_freed = status->total_released_blks;
 448
 449	wl->tx_allocated_blocks -= freed_blocks;
 450
 451	/*
 452	 * If the FW freed some blocks:
 453	 * If we still have allocated blocks - re-arm the timer, Tx is
 454	 * not stuck. Otherwise, cancel the timer (no Tx currently).
 455	 */
 456	if (freed_blocks) {
 457		if (wl->tx_allocated_blocks)
 458			wl12xx_rearm_tx_watchdog_locked(wl);
 459		else
 460			cancel_delayed_work(&wl->tx_watchdog_work);
 461	}
 462
 463	avail = status->tx_total - wl->tx_allocated_blocks;
 464
 465	/*
 466	 * The FW might change the total number of TX memblocks before
 467	 * we get a notification about blocks being released. Thus, the
 468	 * available blocks calculation might yield a temporary result
 469	 * which is lower than the actual available blocks. Keeping in
 470	 * mind that only blocks that were allocated can be moved from
 471	 * TX to RX, tx_blocks_available should never decrease here.
 472	 */
 473	wl->tx_blocks_available = max((int)wl->tx_blocks_available,
 474				      avail);
 475
 476	/* if more blocks are available now, tx work can be scheduled */
 477	if (wl->tx_blocks_available > old_tx_blk_count)
 478		clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
 479
 480	/* for AP update num of allocated TX blocks per link and ps status */
 481	wl12xx_for_each_wlvif_ap(wl, wlvif) {
 482		wl12xx_irq_update_links_status(wl, wlvif, status);
 483	}
 484
 485	/* update the host-chipset time offset */
 486	wl->time_offset = (ktime_get_boottime_ns() >> 10) -
 487		(s64)(status->fw_localtime);
 488
 489	wl->fw_fast_lnk_map = status->link_fast_bitmap;
 490
 491	return 0;
 492}
 493
 494static void wl1271_flush_deferred_work(struct wl1271 *wl)
 495{
 496	struct sk_buff *skb;
 497
 498	/* Pass all received frames to the network stack */
 499	while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
 500		ieee80211_rx_ni(wl->hw, skb);
 501
 502	/* Return sent skbs to the network stack */
 503	while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
 504		ieee80211_tx_status_ni(wl->hw, skb);
 505}
 506
 507static void wl1271_netstack_work(struct work_struct *work)
 508{
 509	struct wl1271 *wl =
 510		container_of(work, struct wl1271, netstack_work);
 511
 512	do {
 513		wl1271_flush_deferred_work(wl);
 514	} while (skb_queue_len(&wl->deferred_rx_queue));
 515}
 516
 517#define WL1271_IRQ_MAX_LOOPS 256
 518
 519static int wlcore_irq_locked(struct wl1271 *wl)
 520{
 521	int ret = 0;
 522	u32 intr;
 523	int loopcount = WL1271_IRQ_MAX_LOOPS;
 524	bool run_tx_queue = true;
 525	bool done = false;
 526	unsigned int defer_count;
 527	unsigned long flags;
 528
 529	/*
 530	 * In case edge triggered interrupt must be used, we cannot iterate
 531	 * more than once without introducing race conditions with the hardirq.
 532	 */
 533	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
 534		loopcount = 1;
 535
 536	wl1271_debug(DEBUG_IRQ, "IRQ work");
 537
 538	if (unlikely(wl->state != WLCORE_STATE_ON))
 539		goto out;
 540
 541	ret = pm_runtime_get_sync(wl->dev);
 542	if (ret < 0) {
 543		pm_runtime_put_noidle(wl->dev);
 544		goto out;
 545	}
 546
 547	while (!done && loopcount--) {
 548		smp_mb__after_atomic();
 549
 550		ret = wlcore_fw_status(wl, wl->fw_status);
 551		if (ret < 0)
 552			goto err_ret;
 553
 554		wlcore_hw_tx_immediate_compl(wl);
 555
 556		intr = wl->fw_status->intr;
 557		intr &= WLCORE_ALL_INTR_MASK;
 558		if (!intr) {
 559			done = true;
 560			continue;
 561		}
 562
 563		if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
 564			wl1271_error("HW watchdog interrupt received! starting recovery.");
 565			wl->watchdog_recovery = true;
 566			ret = -EIO;
 567
 568			/* restarting the chip. ignore any other interrupt. */
 569			goto err_ret;
 570		}
 571
 572		if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
 573			wl1271_error("SW watchdog interrupt received! "
 574				     "starting recovery.");
 575			wl->watchdog_recovery = true;
 576			ret = -EIO;
 577
 578			/* restarting the chip. ignore any other interrupt. */
 579			goto err_ret;
 580		}
 581
 582		if (likely(intr & WL1271_ACX_INTR_DATA)) {
 583			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
 584
 585			ret = wlcore_rx(wl, wl->fw_status);
 586			if (ret < 0)
 587				goto err_ret;
 588
 589			/* Check if any tx blocks were freed */
 590			if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) {
 591				if (spin_trylock_irqsave(&wl->wl_lock, flags)) {
 592					if (!wl1271_tx_total_queue_count(wl))
 593						run_tx_queue = false;
 594					spin_unlock_irqrestore(&wl->wl_lock, flags);
 595				}
 596
 597				/*
 598				 * In order to avoid starvation of the TX path,
 599				 * call the work function directly.
 600				 */
 601				if (run_tx_queue) {
 602					ret = wlcore_tx_work_locked(wl);
 603					if (ret < 0)
 604						goto err_ret;
 605				}
 606			}
 607
 608			/* check for tx results */
 609			ret = wlcore_hw_tx_delayed_compl(wl);
 610			if (ret < 0)
 611				goto err_ret;
 612
 613			/* Make sure the deferred queues don't get too long */
 614			defer_count = skb_queue_len(&wl->deferred_tx_queue) +
 615				      skb_queue_len(&wl->deferred_rx_queue);
 616			if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
 617				wl1271_flush_deferred_work(wl);
 618		}
 619
 620		if (intr & WL1271_ACX_INTR_EVENT_A) {
 621			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
 622			ret = wl1271_event_handle(wl, 0);
 623			if (ret < 0)
 624				goto err_ret;
 625		}
 626
 627		if (intr & WL1271_ACX_INTR_EVENT_B) {
 628			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
 629			ret = wl1271_event_handle(wl, 1);
 630			if (ret < 0)
 631				goto err_ret;
 632		}
 633
 634		if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
 635			wl1271_debug(DEBUG_IRQ,
 636				     "WL1271_ACX_INTR_INIT_COMPLETE");
 637
 638		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
 639			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
 640	}
 641
 642err_ret:
 643	pm_runtime_mark_last_busy(wl->dev);
 644	pm_runtime_put_autosuspend(wl->dev);
 645
 646out:
 647	return ret;
 648}
 649
 650static irqreturn_t wlcore_irq(int irq, void *cookie)
 651{
 652	int ret;
 653	unsigned long flags;
 654	struct wl1271 *wl = cookie;
 655	bool queue_tx_work = true;
 656
 657	set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
 658
 659	/* complete the ELP completion */
 660	if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) {
 661		spin_lock_irqsave(&wl->wl_lock, flags);
 662		if (wl->elp_compl)
 663			complete(wl->elp_compl);
 664		spin_unlock_irqrestore(&wl->wl_lock, flags);
 665	}
 666
 667	if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
 668		/* don't enqueue a work right now. mark it as pending */
 669		set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
 670		wl1271_debug(DEBUG_IRQ, "should not enqueue work");
 671		spin_lock_irqsave(&wl->wl_lock, flags);
 672		disable_irq_nosync(wl->irq);
 673		pm_wakeup_event(wl->dev, 0);
 674		spin_unlock_irqrestore(&wl->wl_lock, flags);
 675		goto out_handled;
 676	}
 677
 678	/* TX might be handled here, avoid redundant work */
 679	set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
 680	cancel_work_sync(&wl->tx_work);
 681
 682	mutex_lock(&wl->mutex);
 683
 684	ret = wlcore_irq_locked(wl);
 685	if (ret)
 686		wl12xx_queue_recovery_work(wl);
 687
 688	/* In case TX was not handled in wlcore_irq_locked(), queue TX work */
 689	clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
 690	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) {
 691		if (spin_trylock_irqsave(&wl->wl_lock, flags)) {
 692			if (!wl1271_tx_total_queue_count(wl))
 693				queue_tx_work = false;
 694			spin_unlock_irqrestore(&wl->wl_lock, flags);
 695		}
 696		if (queue_tx_work)
 697			ieee80211_queue_work(wl->hw, &wl->tx_work);
 698	}
 699
 700	mutex_unlock(&wl->mutex);
 701
 702out_handled:
 703	clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
 704
 705	return IRQ_HANDLED;
 706}
 707
 708struct vif_counter_data {
 709	u8 counter;
 710
 711	struct ieee80211_vif *cur_vif;
 712	bool cur_vif_running;
 713};
 714
 715static void wl12xx_vif_count_iter(void *data, u8 *mac,
 716				  struct ieee80211_vif *vif)
 717{
 718	struct vif_counter_data *counter = data;
 719
 720	counter->counter++;
 721	if (counter->cur_vif == vif)
 722		counter->cur_vif_running = true;
 723}
 724
 725/* caller must not hold wl->mutex, as it might deadlock */
 726static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
 727			       struct ieee80211_vif *cur_vif,
 728			       struct vif_counter_data *data)
 729{
 730	memset(data, 0, sizeof(*data));
 731	data->cur_vif = cur_vif;
 732
 733	ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
 734					    wl12xx_vif_count_iter, data);
 735}
 736
 737static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
 738{
 739	const struct firmware *fw;
 740	const char *fw_name;
 741	enum wl12xx_fw_type fw_type;
 742	int ret;
 743
 744	if (plt) {
 745		fw_type = WL12XX_FW_TYPE_PLT;
 746		fw_name = wl->plt_fw_name;
 747	} else {
 748		/*
 749		 * we can't call wl12xx_get_vif_count() here because
 750		 * wl->mutex is taken, so use the cached last_vif_count value
 751		 */
 752		if (wl->last_vif_count > 1 && wl->mr_fw_name) {
 753			fw_type = WL12XX_FW_TYPE_MULTI;
 754			fw_name = wl->mr_fw_name;
 755		} else {
 756			fw_type = WL12XX_FW_TYPE_NORMAL;
 757			fw_name = wl->sr_fw_name;
 758		}
 759	}
 760
 761	if (wl->fw_type == fw_type)
 762		return 0;
 763
 764	wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
 765
 766	ret = request_firmware(&fw, fw_name, wl->dev);
 767
 768	if (ret < 0) {
 769		wl1271_error("could not get firmware %s: %d", fw_name, ret);
 770		return ret;
 771	}
 772
 773	if (fw->size % 4) {
 774		wl1271_error("firmware size is not multiple of 32 bits: %zu",
 775			     fw->size);
 776		ret = -EILSEQ;
 777		goto out;
 778	}
 779
 780	vfree(wl->fw);
 781	wl->fw_type = WL12XX_FW_TYPE_NONE;
 782	wl->fw_len = fw->size;
 783	wl->fw = vmalloc(wl->fw_len);
 784
 785	if (!wl->fw) {
 786		wl1271_error("could not allocate memory for the firmware");
 787		ret = -ENOMEM;
 788		goto out;
 789	}
 790
 791	memcpy(wl->fw, fw->data, wl->fw_len);
 792	ret = 0;
 793	wl->fw_type = fw_type;
 794out:
 795	release_firmware(fw);
 796
 797	return ret;
 798}
 799
 800void wl12xx_queue_recovery_work(struct wl1271 *wl)
 801{
 802	/* Avoid a recursive recovery */
 803	if (wl->state == WLCORE_STATE_ON) {
 804		WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
 805				  &wl->flags));
 806
 807		wl->state = WLCORE_STATE_RESTARTING;
 808		set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
 809		ieee80211_queue_work(wl->hw, &wl->recovery_work);
 810	}
 811}
 812
 813size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
 814{
 815	size_t len;
 816
 817	/* Make sure we have enough room */
 818	len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
 819
 820	/* Fill the FW log file, consumed by the sysfs fwlog entry */
 821	memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
 822	wl->fwlog_size += len;
 823
 824	return len;
 825}
 826
 827static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
 828{
 829	u32 end_of_log = 0;
 830	int error;
 831
 832	if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
 833		return;
 834
 835	wl1271_info("Reading FW panic log");
 836
 837	/*
 838	 * Make sure the chip is awake and the logger isn't active.
 839	 * Do not send a stop fwlog command if the fw is hanged or if
 840	 * dbgpins are used (due to some fw bug).
 841	 */
 842	error = pm_runtime_get_sync(wl->dev);
 843	if (error < 0) {
 844		pm_runtime_put_noidle(wl->dev);
 845		return;
 846	}
 847	if (!wl->watchdog_recovery &&
 848	    wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
 849		wl12xx_cmd_stop_fwlog(wl);
 850
 851	/* Traverse the memory blocks linked list */
 852	do {
 853		end_of_log = wlcore_event_fw_logger(wl);
 854		if (end_of_log == 0) {
 855			msleep(100);
 856			end_of_log = wlcore_event_fw_logger(wl);
 857		}
 858	} while (end_of_log != 0);
 859}
 860
 861static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
 862				   u8 hlid, struct ieee80211_sta *sta)
 863{
 864	struct wl1271_station *wl_sta;
 865	u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
 866
 867	wl_sta = (void *)sta->drv_priv;
 868	wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
 869
 870	/*
 871	 * increment the initial seq number on recovery to account for
 872	 * transmitted packets that we haven't yet got in the FW status
 873	 */
 874	if (wlvif->encryption_type == KEY_GEM)
 875		sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
 876
 877	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
 878		wl_sta->total_freed_pkts += sqn_recovery_padding;
 879}
 880
 881static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
 882					struct wl12xx_vif *wlvif,
 883					u8 hlid, const u8 *addr)
 884{
 885	struct ieee80211_sta *sta;
 886	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
 887
 888	if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
 889		    is_zero_ether_addr(addr)))
 890		return;
 891
 892	rcu_read_lock();
 893	sta = ieee80211_find_sta(vif, addr);
 894	if (sta)
 895		wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
 896	rcu_read_unlock();
 897}
 898
 899static void wlcore_print_recovery(struct wl1271 *wl)
 900{
 901	u32 pc = 0;
 902	u32 hint_sts = 0;
 903	int ret;
 904
 905	wl1271_info("Hardware recovery in progress. FW ver: %s",
 906		    wl->chip.fw_ver_str);
 907
 908	/* change partitions momentarily so we can read the FW pc */
 909	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
 910	if (ret < 0)
 911		return;
 912
 913	ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
 914	if (ret < 0)
 915		return;
 916
 917	ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
 918	if (ret < 0)
 919		return;
 920
 921	wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
 922				pc, hint_sts, ++wl->recovery_count);
 923
 924	wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
 925}
 926
 927
 928static void wl1271_recovery_work(struct work_struct *work)
 929{
 930	struct wl1271 *wl =
 931		container_of(work, struct wl1271, recovery_work);
 932	struct wl12xx_vif *wlvif;
 933	struct ieee80211_vif *vif;
 934	int error;
 935
 936	mutex_lock(&wl->mutex);
 937
 938	if (wl->state == WLCORE_STATE_OFF || wl->plt)
 939		goto out_unlock;
 940
 941	error = pm_runtime_get_sync(wl->dev);
 942	if (error < 0) {
 943		wl1271_warning("Enable for recovery failed");
 944		pm_runtime_put_noidle(wl->dev);
 945	}
 946	wlcore_disable_interrupts_nosync(wl);
 947
 948	if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
 949		if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
 950			wl12xx_read_fwlog_panic(wl);
 951		wlcore_print_recovery(wl);
 952	}
 953
 954	BUG_ON(wl->conf.recovery.bug_on_recovery &&
 955	       !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
 956
 957	clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
 958
 959	if (wl->conf.recovery.no_recovery) {
 960		wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
 961		goto out_unlock;
 962	}
 963
 964	/* Prevent spurious TX during FW restart */
 965	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
 966
 967	/* reboot the chipset */
 968	while (!list_empty(&wl->wlvif_list)) {
 969		wlvif = list_first_entry(&wl->wlvif_list,
 970				       struct wl12xx_vif, list);
 971		vif = wl12xx_wlvif_to_vif(wlvif);
 972
 973		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
 974		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
 975			wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
 976						    vif->bss_conf.bssid);
 977		}
 978
 979		__wl1271_op_remove_interface(wl, vif, false);
 980	}
 981
 982	wlcore_op_stop_locked(wl);
 983	pm_runtime_mark_last_busy(wl->dev);
 984	pm_runtime_put_autosuspend(wl->dev);
 985
 986	ieee80211_restart_hw(wl->hw);
 987
 988	/*
 989	 * Its safe to enable TX now - the queues are stopped after a request
 990	 * to restart the HW.
 991	 */
 992	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
 993
 994out_unlock:
 995	wl->watchdog_recovery = false;
 996	clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
 997	mutex_unlock(&wl->mutex);
 998}
 999
1000static int wlcore_fw_wakeup(struct wl1271 *wl)
1001{
1002	return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1003}
1004
1005static int wl1271_setup(struct wl1271 *wl)
1006{
1007	wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1008	if (!wl->raw_fw_status)
1009		goto err;
1010
1011	wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1012	if (!wl->fw_status)
1013		goto err;
1014
1015	wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1016	if (!wl->tx_res_if)
1017		goto err;
1018
1019	return 0;
1020err:
1021	kfree(wl->fw_status);
1022	kfree(wl->raw_fw_status);
1023	return -ENOMEM;
1024}
1025
1026static int wl12xx_set_power_on(struct wl1271 *wl)
1027{
1028	int ret;
1029
1030	msleep(WL1271_PRE_POWER_ON_SLEEP);
1031	ret = wl1271_power_on(wl);
1032	if (ret < 0)
1033		goto out;
1034	msleep(WL1271_POWER_ON_SLEEP);
1035	wl1271_io_reset(wl);
1036	wl1271_io_init(wl);
1037
1038	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1039	if (ret < 0)
1040		goto fail;
1041
1042	/* ELP module wake up */
1043	ret = wlcore_fw_wakeup(wl);
1044	if (ret < 0)
1045		goto fail;
1046
1047out:
1048	return ret;
1049
1050fail:
1051	wl1271_power_off(wl);
1052	return ret;
1053}
1054
1055static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1056{
1057	int ret = 0;
1058
1059	ret = wl12xx_set_power_on(wl);
1060	if (ret < 0)
1061		goto out;
1062
1063	/*
1064	 * For wl127x based devices we could use the default block
1065	 * size (512 bytes), but due to a bug in the sdio driver, we
1066	 * need to set it explicitly after the chip is powered on.  To
1067	 * simplify the code and since the performance impact is
1068	 * negligible, we use the same block size for all different
1069	 * chip types.
1070	 *
1071	 * Check if the bus supports blocksize alignment and, if it
1072	 * doesn't, make sure we don't have the quirk.
1073	 */
1074	if (!wl1271_set_block_size(wl))
1075		wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1076
1077	/* TODO: make sure the lower driver has set things up correctly */
1078
1079	ret = wl1271_setup(wl);
1080	if (ret < 0)
1081		goto out;
1082
1083	ret = wl12xx_fetch_firmware(wl, plt);
1084	if (ret < 0) {
1085		kfree(wl->fw_status);
1086		kfree(wl->raw_fw_status);
1087		kfree(wl->tx_res_if);
1088	}
1089
1090out:
1091	return ret;
1092}
1093
1094int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1095{
1096	int retries = WL1271_BOOT_RETRIES;
1097	struct wiphy *wiphy = wl->hw->wiphy;
1098
1099	static const char* const PLT_MODE[] = {
1100		"PLT_OFF",
1101		"PLT_ON",
1102		"PLT_FEM_DETECT",
1103		"PLT_CHIP_AWAKE"
1104	};
1105
1106	int ret;
1107
1108	mutex_lock(&wl->mutex);
1109
1110	wl1271_notice("power up");
1111
1112	if (wl->state != WLCORE_STATE_OFF) {
1113		wl1271_error("cannot go into PLT state because not "
1114			     "in off state: %d", wl->state);
1115		ret = -EBUSY;
1116		goto out;
1117	}
1118
1119	/* Indicate to lower levels that we are now in PLT mode */
1120	wl->plt = true;
1121	wl->plt_mode = plt_mode;
1122
1123	while (retries) {
1124		retries--;
1125		ret = wl12xx_chip_wakeup(wl, true);
1126		if (ret < 0)
1127			goto power_off;
1128
1129		if (plt_mode != PLT_CHIP_AWAKE) {
1130			ret = wl->ops->plt_init(wl);
1131			if (ret < 0)
1132				goto power_off;
1133		}
1134
1135		wl->state = WLCORE_STATE_ON;
1136		wl1271_notice("firmware booted in PLT mode %s (%s)",
1137			      PLT_MODE[plt_mode],
1138			      wl->chip.fw_ver_str);
1139
1140		/* update hw/fw version info in wiphy struct */
1141		wiphy->hw_version = wl->chip.id;
1142		strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1143			sizeof(wiphy->fw_version));
1144
1145		goto out;
1146
1147power_off:
1148		wl1271_power_off(wl);
1149	}
1150
1151	wl->plt = false;
1152	wl->plt_mode = PLT_OFF;
1153
1154	wl1271_error("firmware boot in PLT mode failed despite %d retries",
1155		     WL1271_BOOT_RETRIES);
1156out:
1157	mutex_unlock(&wl->mutex);
1158
1159	return ret;
1160}
1161
1162int wl1271_plt_stop(struct wl1271 *wl)
1163{
1164	int ret = 0;
1165
1166	wl1271_notice("power down");
1167
1168	/*
1169	 * Interrupts must be disabled before setting the state to OFF.
1170	 * Otherwise, the interrupt handler might be called and exit without
1171	 * reading the interrupt status.
1172	 */
1173	wlcore_disable_interrupts(wl);
1174	mutex_lock(&wl->mutex);
1175	if (!wl->plt) {
1176		mutex_unlock(&wl->mutex);
1177
1178		/*
1179		 * This will not necessarily enable interrupts as interrupts
1180		 * may have been disabled when op_stop was called. It will,
1181		 * however, balance the above call to disable_interrupts().
1182		 */
1183		wlcore_enable_interrupts(wl);
1184
1185		wl1271_error("cannot power down because not in PLT "
1186			     "state: %d", wl->state);
1187		ret = -EBUSY;
1188		goto out;
1189	}
1190
1191	mutex_unlock(&wl->mutex);
1192
1193	wl1271_flush_deferred_work(wl);
1194	cancel_work_sync(&wl->netstack_work);
1195	cancel_work_sync(&wl->recovery_work);
1196	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1197
1198	mutex_lock(&wl->mutex);
1199	wl1271_power_off(wl);
1200	wl->flags = 0;
1201	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1202	wl->state = WLCORE_STATE_OFF;
1203	wl->plt = false;
1204	wl->plt_mode = PLT_OFF;
1205	wl->rx_counter = 0;
1206	mutex_unlock(&wl->mutex);
1207
1208out:
1209	return ret;
1210}
1211
1212static void wl1271_op_tx(struct ieee80211_hw *hw,
1213			 struct ieee80211_tx_control *control,
1214			 struct sk_buff *skb)
1215{
1216	struct wl1271 *wl = hw->priv;
1217	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1218	struct ieee80211_vif *vif = info->control.vif;
1219	struct wl12xx_vif *wlvif = NULL;
1220	unsigned long flags;
1221	int q, mapping;
1222	u8 hlid;
1223
1224	if (!vif) {
1225		wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1226		ieee80211_free_txskb(hw, skb);
1227		return;
1228	}
1229
1230	wlvif = wl12xx_vif_to_data(vif);
1231	mapping = skb_get_queue_mapping(skb);
1232	q = wl1271_tx_get_queue(mapping);
1233
1234	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1235
1236	spin_lock_irqsave(&wl->wl_lock, flags);
1237
1238	/*
1239	 * drop the packet if the link is invalid or the queue is stopped
1240	 * for any reason but watermark. Watermark is a "soft"-stop so we
1241	 * allow these packets through.
1242	 */
1243	if (hlid == WL12XX_INVALID_LINK_ID ||
1244	    (!test_bit(hlid, wlvif->links_map)) ||
1245	     (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1246	      !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1247			WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1248		wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1249		ieee80211_free_txskb(hw, skb);
1250		goto out;
1251	}
1252
1253	wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1254		     hlid, q, skb->len);
1255	skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1256
1257	wl->tx_queue_count[q]++;
1258	wlvif->tx_queue_count[q]++;
1259
1260	/*
1261	 * The workqueue is slow to process the tx_queue and we need stop
1262	 * the queue here, otherwise the queue will get too long.
1263	 */
1264	if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1265	    !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1266					WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1267		wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1268		wlcore_stop_queue_locked(wl, wlvif, q,
1269					 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1270	}
1271
1272	/*
1273	 * The chip specific setup must run before the first TX packet -
1274	 * before that, the tx_work will not be initialized!
1275	 */
1276
1277	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1278	    !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1279		ieee80211_queue_work(wl->hw, &wl->tx_work);
1280
1281out:
1282	spin_unlock_irqrestore(&wl->wl_lock, flags);
1283}
1284
1285int wl1271_tx_dummy_packet(struct wl1271 *wl)
1286{
1287	unsigned long flags;
1288	int q;
1289
1290	/* no need to queue a new dummy packet if one is already pending */
1291	if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1292		return 0;
1293
1294	q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1295
1296	spin_lock_irqsave(&wl->wl_lock, flags);
1297	set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1298	wl->tx_queue_count[q]++;
1299	spin_unlock_irqrestore(&wl->wl_lock, flags);
1300
1301	/* The FW is low on RX memory blocks, so send the dummy packet asap */
1302	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1303		return wlcore_tx_work_locked(wl);
1304
1305	/*
1306	 * If the FW TX is busy, TX work will be scheduled by the threaded
1307	 * interrupt handler function
1308	 */
1309	return 0;
1310}
1311
1312/*
1313 * The size of the dummy packet should be at least 1400 bytes. However, in
1314 * order to minimize the number of bus transactions, aligning it to 512 bytes
1315 * boundaries could be beneficial, performance wise
1316 */
1317#define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1318
1319static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1320{
1321	struct sk_buff *skb;
1322	struct ieee80211_hdr_3addr *hdr;
1323	unsigned int dummy_packet_size;
1324
1325	dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1326			    sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1327
1328	skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1329	if (!skb) {
1330		wl1271_warning("Failed to allocate a dummy packet skb");
1331		return NULL;
1332	}
1333
1334	skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1335
1336	hdr = skb_put_zero(skb, sizeof(*hdr));
1337	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1338					 IEEE80211_STYPE_NULLFUNC |
1339					 IEEE80211_FCTL_TODS);
1340
1341	skb_put_zero(skb, dummy_packet_size);
1342
1343	/* Dummy packets require the TID to be management */
1344	skb->priority = WL1271_TID_MGMT;
1345
1346	/* Initialize all fields that might be used */
1347	skb_set_queue_mapping(skb, 0);
1348	memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1349
1350	return skb;
1351}
1352
1353
1354static int
1355wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1356{
1357	int num_fields = 0, in_field = 0, fields_size = 0;
1358	int i, pattern_len = 0;
1359
1360	if (!p->mask) {
1361		wl1271_warning("No mask in WoWLAN pattern");
1362		return -EINVAL;
1363	}
1364
1365	/*
1366	 * The pattern is broken up into segments of bytes at different offsets
1367	 * that need to be checked by the FW filter. Each segment is called
1368	 * a field in the FW API. We verify that the total number of fields
1369	 * required for this pattern won't exceed FW limits (8)
1370	 * as well as the total fields buffer won't exceed the FW limit.
1371	 * Note that if there's a pattern which crosses Ethernet/IP header
1372	 * boundary a new field is required.
1373	 */
1374	for (i = 0; i < p->pattern_len; i++) {
1375		if (test_bit(i, (unsigned long *)p->mask)) {
1376			if (!in_field) {
1377				in_field = 1;
1378				pattern_len = 1;
1379			} else {
1380				if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1381					num_fields++;
1382					fields_size += pattern_len +
1383						RX_FILTER_FIELD_OVERHEAD;
1384					pattern_len = 1;
1385				} else
1386					pattern_len++;
1387			}
1388		} else {
1389			if (in_field) {
1390				in_field = 0;
1391				fields_size += pattern_len +
1392					RX_FILTER_FIELD_OVERHEAD;
1393				num_fields++;
1394			}
1395		}
1396	}
1397
1398	if (in_field) {
1399		fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1400		num_fields++;
1401	}
1402
1403	if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1404		wl1271_warning("RX Filter too complex. Too many segments");
1405		return -EINVAL;
1406	}
1407
1408	if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1409		wl1271_warning("RX filter pattern is too big");
1410		return -E2BIG;
1411	}
1412
1413	return 0;
1414}
1415
1416struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1417{
1418	return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1419}
1420
1421void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1422{
1423	int i;
1424
1425	if (filter == NULL)
1426		return;
1427
1428	for (i = 0; i < filter->num_fields; i++)
1429		kfree(filter->fields[i].pattern);
1430
1431	kfree(filter);
1432}
1433
1434int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1435				 u16 offset, u8 flags,
1436				 const u8 *pattern, u8 len)
1437{
1438	struct wl12xx_rx_filter_field *field;
1439
1440	if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1441		wl1271_warning("Max fields per RX filter. can't alloc another");
1442		return -EINVAL;
1443	}
1444
1445	field = &filter->fields[filter->num_fields];
1446
1447	field->pattern = kmemdup(pattern, len, GFP_KERNEL);
1448	if (!field->pattern) {
1449		wl1271_warning("Failed to allocate RX filter pattern");
1450		return -ENOMEM;
1451	}
1452
1453	filter->num_fields++;
1454
1455	field->offset = cpu_to_le16(offset);
1456	field->flags = flags;
1457	field->len = len;
1458
1459	return 0;
1460}
1461
1462int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1463{
1464	int i, fields_size = 0;
1465
1466	for (i = 0; i < filter->num_fields; i++)
1467		fields_size += filter->fields[i].len +
1468			sizeof(struct wl12xx_rx_filter_field) -
1469			sizeof(u8 *);
1470
1471	return fields_size;
1472}
1473
1474void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1475				    u8 *buf)
1476{
1477	int i;
1478	struct wl12xx_rx_filter_field *field;
1479
1480	for (i = 0; i < filter->num_fields; i++) {
1481		field = (struct wl12xx_rx_filter_field *)buf;
1482
1483		field->offset = filter->fields[i].offset;
1484		field->flags = filter->fields[i].flags;
1485		field->len = filter->fields[i].len;
1486
1487		memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1488		buf += sizeof(struct wl12xx_rx_filter_field) -
1489			sizeof(u8 *) + field->len;
1490	}
1491}
1492
1493/*
1494 * Allocates an RX filter returned through f
1495 * which needs to be freed using rx_filter_free()
1496 */
1497static int
1498wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1499					   struct wl12xx_rx_filter **f)
1500{
1501	int i, j, ret = 0;
1502	struct wl12xx_rx_filter *filter;
1503	u16 offset;
1504	u8 flags, len;
1505
1506	filter = wl1271_rx_filter_alloc();
1507	if (!filter) {
1508		wl1271_warning("Failed to alloc rx filter");
1509		ret = -ENOMEM;
1510		goto err;
1511	}
1512
1513	i = 0;
1514	while (i < p->pattern_len) {
1515		if (!test_bit(i, (unsigned long *)p->mask)) {
1516			i++;
1517			continue;
1518		}
1519
1520		for (j = i; j < p->pattern_len; j++) {
1521			if (!test_bit(j, (unsigned long *)p->mask))
1522				break;
1523
1524			if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1525			    j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1526				break;
1527		}
1528
1529		if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1530			offset = i;
1531			flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1532		} else {
1533			offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1534			flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1535		}
1536
1537		len = j - i;
1538
1539		ret = wl1271_rx_filter_alloc_field(filter,
1540						   offset,
1541						   flags,
1542						   &p->pattern[i], len);
1543		if (ret)
1544			goto err;
1545
1546		i = j;
1547	}
1548
1549	filter->action = FILTER_SIGNAL;
1550
1551	*f = filter;
1552	return 0;
1553
1554err:
1555	wl1271_rx_filter_free(filter);
1556	*f = NULL;
1557
1558	return ret;
1559}
1560
1561static int wl1271_configure_wowlan(struct wl1271 *wl,
1562				   struct cfg80211_wowlan *wow)
1563{
1564	int i, ret;
1565
1566	if (!wow || wow->any || !wow->n_patterns) {
1567		ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1568							  FILTER_SIGNAL);
1569		if (ret)
1570			goto out;
1571
1572		ret = wl1271_rx_filter_clear_all(wl);
1573		if (ret)
1574			goto out;
1575
1576		return 0;
1577	}
1578
1579	if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1580		return -EINVAL;
1581
1582	/* Validate all incoming patterns before clearing current FW state */
1583	for (i = 0; i < wow->n_patterns; i++) {
1584		ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1585		if (ret) {
1586			wl1271_warning("Bad wowlan pattern %d", i);
1587			return ret;
1588		}
1589	}
1590
1591	ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1592	if (ret)
1593		goto out;
1594
1595	ret = wl1271_rx_filter_clear_all(wl);
1596	if (ret)
1597		goto out;
1598
1599	/* Translate WoWLAN patterns into filters */
1600	for (i = 0; i < wow->n_patterns; i++) {
1601		struct cfg80211_pkt_pattern *p;
1602		struct wl12xx_rx_filter *filter = NULL;
1603
1604		p = &wow->patterns[i];
1605
1606		ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1607		if (ret) {
1608			wl1271_warning("Failed to create an RX filter from "
1609				       "wowlan pattern %d", i);
1610			goto out;
1611		}
1612
1613		ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1614
1615		wl1271_rx_filter_free(filter);
1616		if (ret)
1617			goto out;
1618	}
1619
1620	ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1621
1622out:
1623	return ret;
1624}
1625
1626static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1627					struct wl12xx_vif *wlvif,
1628					struct cfg80211_wowlan *wow)
1629{
1630	int ret = 0;
1631
1632	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1633		goto out;
1634
1635	ret = wl1271_configure_wowlan(wl, wow);
1636	if (ret < 0)
1637		goto out;
1638
1639	if ((wl->conf.conn.suspend_wake_up_event ==
1640	     wl->conf.conn.wake_up_event) &&
1641	    (wl->conf.conn.suspend_listen_interval ==
1642	     wl->conf.conn.listen_interval))
1643		goto out;
1644
1645	ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1646				    wl->conf.conn.suspend_wake_up_event,
1647				    wl->conf.conn.suspend_listen_interval);
1648
1649	if (ret < 0)
1650		wl1271_error("suspend: set wake up conditions failed: %d", ret);
1651out:
1652	return ret;
1653
1654}
1655
1656static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1657					struct wl12xx_vif *wlvif,
1658					struct cfg80211_wowlan *wow)
1659{
1660	int ret = 0;
1661
1662	if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1663		goto out;
1664
1665	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1666	if (ret < 0)
1667		goto out;
1668
1669	ret = wl1271_configure_wowlan(wl, wow);
1670	if (ret < 0)
1671		goto out;
1672
1673out:
1674	return ret;
1675
1676}
1677
1678static int wl1271_configure_suspend(struct wl1271 *wl,
1679				    struct wl12xx_vif *wlvif,
1680				    struct cfg80211_wowlan *wow)
1681{
1682	if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1683		return wl1271_configure_suspend_sta(wl, wlvif, wow);
1684	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1685		return wl1271_configure_suspend_ap(wl, wlvif, wow);
1686	return 0;
1687}
1688
1689static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1690{
1691	int ret = 0;
1692	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1693	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1694
1695	if ((!is_ap) && (!is_sta))
1696		return;
1697
1698	if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1699	    (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1700		return;
1701
1702	wl1271_configure_wowlan(wl, NULL);
1703
1704	if (is_sta) {
1705		if ((wl->conf.conn.suspend_wake_up_event ==
1706		     wl->conf.conn.wake_up_event) &&
1707		    (wl->conf.conn.suspend_listen_interval ==
1708		     wl->conf.conn.listen_interval))
1709			return;
1710
1711		ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1712				    wl->conf.conn.wake_up_event,
1713				    wl->conf.conn.listen_interval);
1714
1715		if (ret < 0)
1716			wl1271_error("resume: wake up conditions failed: %d",
1717				     ret);
1718
1719	} else if (is_ap) {
1720		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1721	}
1722}
1723
1724static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
1725					    struct cfg80211_wowlan *wow)
1726{
1727	struct wl1271 *wl = hw->priv;
1728	struct wl12xx_vif *wlvif;
1729	unsigned long flags;
1730	int ret;
1731
1732	wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1733	WARN_ON(!wow);
1734
1735	/* we want to perform the recovery before suspending */
1736	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1737		wl1271_warning("postponing suspend to perform recovery");
1738		return -EBUSY;
1739	}
1740
1741	wl1271_tx_flush(wl);
1742
1743	mutex_lock(&wl->mutex);
1744
1745	ret = pm_runtime_get_sync(wl->dev);
1746	if (ret < 0) {
1747		pm_runtime_put_noidle(wl->dev);
1748		mutex_unlock(&wl->mutex);
1749		return ret;
1750	}
1751
1752	wl->wow_enabled = true;
1753	wl12xx_for_each_wlvif(wl, wlvif) {
1754		if (wlcore_is_p2p_mgmt(wlvif))
1755			continue;
1756
1757		ret = wl1271_configure_suspend(wl, wlvif, wow);
1758		if (ret < 0) {
1759			goto out_sleep;
1760		}
1761	}
1762
1763	/* disable fast link flow control notifications from FW */
1764	ret = wlcore_hw_interrupt_notify(wl, false);
1765	if (ret < 0)
1766		goto out_sleep;
1767
1768	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1769	ret = wlcore_hw_rx_ba_filter(wl,
1770				     !!wl->conf.conn.suspend_rx_ba_activity);
1771	if (ret < 0)
1772		goto out_sleep;
1773
1774out_sleep:
1775	pm_runtime_put_noidle(wl->dev);
1776	mutex_unlock(&wl->mutex);
1777
1778	if (ret < 0) {
1779		wl1271_warning("couldn't prepare device to suspend");
1780		return ret;
1781	}
1782
1783	/* flush any remaining work */
1784	wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1785
1786	flush_work(&wl->tx_work);
1787
1788	/*
1789	 * Cancel the watchdog even if above tx_flush failed. We will detect
1790	 * it on resume anyway.
1791	 */
1792	cancel_delayed_work(&wl->tx_watchdog_work);
1793
1794	/*
1795	 * set suspended flag to avoid triggering a new threaded_irq
1796	 * work.
1797	 */
1798	spin_lock_irqsave(&wl->wl_lock, flags);
1799	set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1800	spin_unlock_irqrestore(&wl->wl_lock, flags);
1801
1802	return pm_runtime_force_suspend(wl->dev);
1803}
1804
1805static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
1806{
1807	struct wl1271 *wl = hw->priv;
1808	struct wl12xx_vif *wlvif;
1809	unsigned long flags;
1810	bool run_irq_work = false, pending_recovery;
1811	int ret;
1812
1813	wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1814		     wl->wow_enabled);
1815	WARN_ON(!wl->wow_enabled);
1816
1817	ret = pm_runtime_force_resume(wl->dev);
1818	if (ret < 0) {
1819		wl1271_error("ELP wakeup failure!");
1820		goto out_sleep;
1821	}
1822
1823	/*
1824	 * re-enable irq_work enqueuing, and call irq_work directly if
1825	 * there is a pending work.
1826	 */
1827	spin_lock_irqsave(&wl->wl_lock, flags);
1828	clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1829	if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1830		run_irq_work = true;
1831	spin_unlock_irqrestore(&wl->wl_lock, flags);
1832
1833	mutex_lock(&wl->mutex);
1834
1835	/* test the recovery flag before calling any SDIO functions */
1836	pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1837				    &wl->flags);
1838
1839	if (run_irq_work) {
1840		wl1271_debug(DEBUG_MAC80211,
1841			     "run postponed irq_work directly");
1842
1843		/* don't talk to the HW if recovery is pending */
1844		if (!pending_recovery) {
1845			ret = wlcore_irq_locked(wl);
1846			if (ret)
1847				wl12xx_queue_recovery_work(wl);
1848		}
1849
1850		wlcore_enable_interrupts(wl);
1851	}
1852
1853	if (pending_recovery) {
1854		wl1271_warning("queuing forgotten recovery on resume");
1855		ieee80211_queue_work(wl->hw, &wl->recovery_work);
1856		goto out_sleep;
1857	}
1858
1859	ret = pm_runtime_get_sync(wl->dev);
1860	if (ret < 0) {
1861		pm_runtime_put_noidle(wl->dev);
1862		goto out;
1863	}
1864
1865	wl12xx_for_each_wlvif(wl, wlvif) {
1866		if (wlcore_is_p2p_mgmt(wlvif))
1867			continue;
1868
1869		wl1271_configure_resume(wl, wlvif);
1870	}
1871
1872	ret = wlcore_hw_interrupt_notify(wl, true);
1873	if (ret < 0)
1874		goto out_sleep;
1875
1876	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1877	ret = wlcore_hw_rx_ba_filter(wl, false);
1878	if (ret < 0)
1879		goto out_sleep;
1880
1881out_sleep:
1882	pm_runtime_mark_last_busy(wl->dev);
1883	pm_runtime_put_autosuspend(wl->dev);
1884
1885out:
1886	wl->wow_enabled = false;
1887
1888	/*
1889	 * Set a flag to re-init the watchdog on the first Tx after resume.
1890	 * That way we avoid possible conditions where Tx-complete interrupts
1891	 * fail to arrive and we perform a spurious recovery.
1892	 */
1893	set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1894	mutex_unlock(&wl->mutex);
1895
1896	return 0;
1897}
1898
1899static int wl1271_op_start(struct ieee80211_hw *hw)
1900{
1901	wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1902
1903	/*
1904	 * We have to delay the booting of the hardware because
1905	 * we need to know the local MAC address before downloading and
1906	 * initializing the firmware. The MAC address cannot be changed
1907	 * after boot, and without the proper MAC address, the firmware
1908	 * will not function properly.
1909	 *
1910	 * The MAC address is first known when the corresponding interface
1911	 * is added. That is where we will initialize the hardware.
1912	 */
1913
1914	return 0;
1915}
1916
1917static void wlcore_op_stop_locked(struct wl1271 *wl)
1918{
1919	int i;
1920
1921	if (wl->state == WLCORE_STATE_OFF) {
1922		if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1923					&wl->flags))
1924			wlcore_enable_interrupts(wl);
1925
1926		return;
1927	}
1928
1929	/*
1930	 * this must be before the cancel_work calls below, so that the work
1931	 * functions don't perform further work.
1932	 */
1933	wl->state = WLCORE_STATE_OFF;
1934
1935	/*
1936	 * Use the nosync variant to disable interrupts, so the mutex could be
1937	 * held while doing so without deadlocking.
1938	 */
1939	wlcore_disable_interrupts_nosync(wl);
1940
1941	mutex_unlock(&wl->mutex);
1942
1943	wlcore_synchronize_interrupts(wl);
1944	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1945		cancel_work_sync(&wl->recovery_work);
1946	wl1271_flush_deferred_work(wl);
1947	cancel_delayed_work_sync(&wl->scan_complete_work);
1948	cancel_work_sync(&wl->netstack_work);
1949	cancel_work_sync(&wl->tx_work);
1950	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1951
1952	/* let's notify MAC80211 about the remaining pending TX frames */
1953	mutex_lock(&wl->mutex);
1954	wl12xx_tx_reset(wl);
1955
1956	wl1271_power_off(wl);
1957	/*
1958	 * In case a recovery was scheduled, interrupts were disabled to avoid
1959	 * an interrupt storm. Now that the power is down, it is safe to
1960	 * re-enable interrupts to balance the disable depth
1961	 */
1962	if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1963		wlcore_enable_interrupts(wl);
1964
1965	wl->band = NL80211_BAND_2GHZ;
1966
1967	wl->rx_counter = 0;
1968	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1969	wl->channel_type = NL80211_CHAN_NO_HT;
1970	wl->tx_blocks_available = 0;
1971	wl->tx_allocated_blocks = 0;
1972	wl->tx_results_count = 0;
1973	wl->tx_packets_count = 0;
1974	wl->time_offset = 0;
1975	wl->ap_fw_ps_map = 0;
1976	wl->ap_ps_map = 0;
1977	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1978	memset(wl->roles_map, 0, sizeof(wl->roles_map));
1979	memset(wl->links_map, 0, sizeof(wl->links_map));
1980	memset(wl->roc_map, 0, sizeof(wl->roc_map));
1981	memset(wl->session_ids, 0, sizeof(wl->session_ids));
1982	memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1983	wl->active_sta_count = 0;
1984	wl->active_link_count = 0;
1985
1986	/* The system link is always allocated */
1987	wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1988	wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1989	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1990
1991	/*
1992	 * this is performed after the cancel_work calls and the associated
1993	 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1994	 * get executed before all these vars have been reset.
1995	 */
1996	wl->flags = 0;
1997
1998	wl->tx_blocks_freed = 0;
1999
2000	for (i = 0; i < NUM_TX_QUEUES; i++) {
2001		wl->tx_pkts_freed[i] = 0;
2002		wl->tx_allocated_pkts[i] = 0;
2003	}
2004
2005	wl1271_debugfs_reset(wl);
2006
2007	kfree(wl->raw_fw_status);
2008	wl->raw_fw_status = NULL;
2009	kfree(wl->fw_status);
2010	wl->fw_status = NULL;
2011	kfree(wl->tx_res_if);
2012	wl->tx_res_if = NULL;
2013	kfree(wl->target_mem_map);
2014	wl->target_mem_map = NULL;
2015
2016	/*
2017	 * FW channels must be re-calibrated after recovery,
2018	 * save current Reg-Domain channel configuration and clear it.
2019	 */
2020	memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2021	       sizeof(wl->reg_ch_conf_pending));
2022	memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2023}
2024
2025static void wlcore_op_stop(struct ieee80211_hw *hw)
2026{
2027	struct wl1271 *wl = hw->priv;
2028
2029	wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2030
2031	mutex_lock(&wl->mutex);
2032
2033	wlcore_op_stop_locked(wl);
2034
2035	mutex_unlock(&wl->mutex);
2036}
2037
2038static void wlcore_channel_switch_work(struct work_struct *work)
2039{
2040	struct delayed_work *dwork;
2041	struct wl1271 *wl;
2042	struct ieee80211_vif *vif;
2043	struct wl12xx_vif *wlvif;
2044	int ret;
2045
2046	dwork = to_delayed_work(work);
2047	wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2048	wl = wlvif->wl;
2049
2050	wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2051
2052	mutex_lock(&wl->mutex);
2053
2054	if (unlikely(wl->state != WLCORE_STATE_ON))
2055		goto out;
2056
2057	/* check the channel switch is still ongoing */
2058	if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2059		goto out;
2060
2061	vif = wl12xx_wlvif_to_vif(wlvif);
2062	ieee80211_chswitch_done(vif, false);
2063
2064	ret = pm_runtime_get_sync(wl->dev);
2065	if (ret < 0) {
2066		pm_runtime_put_noidle(wl->dev);
2067		goto out;
2068	}
2069
2070	wl12xx_cmd_stop_channel_switch(wl, wlvif);
2071
2072	pm_runtime_mark_last_busy(wl->dev);
2073	pm_runtime_put_autosuspend(wl->dev);
2074out:
2075	mutex_unlock(&wl->mutex);
2076}
2077
2078static void wlcore_connection_loss_work(struct work_struct *work)
2079{
2080	struct delayed_work *dwork;
2081	struct wl1271 *wl;
2082	struct ieee80211_vif *vif;
2083	struct wl12xx_vif *wlvif;
2084
2085	dwork = to_delayed_work(work);
2086	wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2087	wl = wlvif->wl;
2088
2089	wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2090
2091	mutex_lock(&wl->mutex);
2092
2093	if (unlikely(wl->state != WLCORE_STATE_ON))
2094		goto out;
2095
2096	/* Call mac80211 connection loss */
2097	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2098		goto out;
2099
2100	vif = wl12xx_wlvif_to_vif(wlvif);
2101	ieee80211_connection_loss(vif);
2102out:
2103	mutex_unlock(&wl->mutex);
2104}
2105
2106static void wlcore_pending_auth_complete_work(struct work_struct *work)
2107{
2108	struct delayed_work *dwork;
2109	struct wl1271 *wl;
2110	struct wl12xx_vif *wlvif;
2111	unsigned long time_spare;
2112	int ret;
2113
2114	dwork = to_delayed_work(work);
2115	wlvif = container_of(dwork, struct wl12xx_vif,
2116			     pending_auth_complete_work);
2117	wl = wlvif->wl;
2118
2119	mutex_lock(&wl->mutex);
2120
2121	if (unlikely(wl->state != WLCORE_STATE_ON))
2122		goto out;
2123
2124	/*
2125	 * Make sure a second really passed since the last auth reply. Maybe
2126	 * a second auth reply arrived while we were stuck on the mutex.
2127	 * Check for a little less than the timeout to protect from scheduler
2128	 * irregularities.
2129	 */
2130	time_spare = jiffies +
2131			msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2132	if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2133		goto out;
2134
2135	ret = pm_runtime_get_sync(wl->dev);
2136	if (ret < 0) {
2137		pm_runtime_put_noidle(wl->dev);
2138		goto out;
2139	}
2140
2141	/* cancel the ROC if active */
2142	wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2143
2144	pm_runtime_mark_last_busy(wl->dev);
2145	pm_runtime_put_autosuspend(wl->dev);
2146out:
2147	mutex_unlock(&wl->mutex);
2148}
2149
2150static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2151{
2152	u8 policy = find_first_zero_bit(wl->rate_policies_map,
2153					WL12XX_MAX_RATE_POLICIES);
2154	if (policy >= WL12XX_MAX_RATE_POLICIES)
2155		return -EBUSY;
2156
2157	__set_bit(policy, wl->rate_policies_map);
2158	*idx = policy;
2159	return 0;
2160}
2161
2162static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2163{
2164	if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2165		return;
2166
2167	__clear_bit(*idx, wl->rate_policies_map);
2168	*idx = WL12XX_MAX_RATE_POLICIES;
2169}
2170
2171static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2172{
2173	u8 policy = find_first_zero_bit(wl->klv_templates_map,
2174					WLCORE_MAX_KLV_TEMPLATES);
2175	if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2176		return -EBUSY;
2177
2178	__set_bit(policy, wl->klv_templates_map);
2179	*idx = policy;
2180	return 0;
2181}
2182
2183static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2184{
2185	if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2186		return;
2187
2188	__clear_bit(*idx, wl->klv_templates_map);
2189	*idx = WLCORE_MAX_KLV_TEMPLATES;
2190}
2191
2192static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2193{
2194	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2195
2196	switch (wlvif->bss_type) {
2197	case BSS_TYPE_AP_BSS:
2198		if (wlvif->p2p)
2199			return WL1271_ROLE_P2P_GO;
2200		else if (ieee80211_vif_is_mesh(vif))
2201			return WL1271_ROLE_MESH_POINT;
2202		else
2203			return WL1271_ROLE_AP;
2204
2205	case BSS_TYPE_STA_BSS:
2206		if (wlvif->p2p)
2207			return WL1271_ROLE_P2P_CL;
2208		else
2209			return WL1271_ROLE_STA;
2210
2211	case BSS_TYPE_IBSS:
2212		return WL1271_ROLE_IBSS;
2213
2214	default:
2215		wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2216	}
2217	return WL12XX_INVALID_ROLE_TYPE;
2218}
2219
2220static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2221{
2222	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2223	int i;
2224
2225	/* clear everything but the persistent data */
2226	memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2227
2228	switch (ieee80211_vif_type_p2p(vif)) {
2229	case NL80211_IFTYPE_P2P_CLIENT:
2230		wlvif->p2p = 1;
2231		/* fall-through */
2232	case NL80211_IFTYPE_STATION:
2233	case NL80211_IFTYPE_P2P_DEVICE:
2234		wlvif->bss_type = BSS_TYPE_STA_BSS;
2235		break;
2236	case NL80211_IFTYPE_ADHOC:
2237		wlvif->bss_type = BSS_TYPE_IBSS;
2238		break;
2239	case NL80211_IFTYPE_P2P_GO:
2240		wlvif->p2p = 1;
2241		/* fall-through */
2242	case NL80211_IFTYPE_AP:
2243	case NL80211_IFTYPE_MESH_POINT:
2244		wlvif->bss_type = BSS_TYPE_AP_BSS;
2245		break;
2246	default:
2247		wlvif->bss_type = MAX_BSS_TYPE;
2248		return -EOPNOTSUPP;
2249	}
2250
2251	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2252	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2253	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2254
2255	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2256	    wlvif->bss_type == BSS_TYPE_IBSS) {
2257		/* init sta/ibss data */
2258		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2259		wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2260		wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2261		wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2262		wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2263		wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2264		wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2265		wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2266	} else {
2267		/* init ap data */
2268		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2269		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2270		wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2271		wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2272		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2273			wl12xx_allocate_rate_policy(wl,
2274						&wlvif->ap.ucast_rate_idx[i]);
2275		wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2276		/*
2277		 * TODO: check if basic_rate shouldn't be
2278		 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2279		 * instead (the same thing for STA above).
2280		*/
2281		wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2282		/* TODO: this seems to be used only for STA, check it */
2283		wlvif->rate_set = CONF_TX_ENABLED_RATES;
2284	}
2285
2286	wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2287	wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2288	wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2289
2290	/*
2291	 * mac80211 configures some values globally, while we treat them
2292	 * per-interface. thus, on init, we have to copy them from wl
2293	 */
2294	wlvif->band = wl->band;
2295	wlvif->channel = wl->channel;
2296	wlvif->power_level = wl->power_level;
2297	wlvif->channel_type = wl->channel_type;
2298
2299	INIT_WORK(&wlvif->rx_streaming_enable_work,
2300		  wl1271_rx_streaming_enable_work);
2301	INIT_WORK(&wlvif->rx_streaming_disable_work,
2302		  wl1271_rx_streaming_disable_work);
2303	INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2304	INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2305			  wlcore_channel_switch_work);
2306	INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2307			  wlcore_connection_loss_work);
2308	INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2309			  wlcore_pending_auth_complete_work);
2310	INIT_LIST_HEAD(&wlvif->list);
2311
2312	timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
2313	return 0;
2314}
2315
2316static int wl12xx_init_fw(struct wl1271 *wl)
2317{
2318	int retries = WL1271_BOOT_RETRIES;
2319	bool booted = false;
2320	struct wiphy *wiphy = wl->hw->wiphy;
2321	int ret;
2322
2323	while (retries) {
2324		retries--;
2325		ret = wl12xx_chip_wakeup(wl, false);
2326		if (ret < 0)
2327			goto power_off;
2328
2329		ret = wl->ops->boot(wl);
2330		if (ret < 0)
2331			goto power_off;
2332
2333		ret = wl1271_hw_init(wl);
2334		if (ret < 0)
2335			goto irq_disable;
2336
2337		booted = true;
2338		break;
2339
2340irq_disable:
2341		mutex_unlock(&wl->mutex);
2342		/* Unlocking the mutex in the middle of handling is
2343		   inherently unsafe. In this case we deem it safe to do,
2344		   because we need to let any possibly pending IRQ out of
2345		   the system (and while we are WLCORE_STATE_OFF the IRQ
2346		   work function will not do anything.) Also, any other
2347		   possible concurrent operations will fail due to the
2348		   current state, hence the wl1271 struct should be safe. */
2349		wlcore_disable_interrupts(wl);
2350		wl1271_flush_deferred_work(wl);
2351		cancel_work_sync(&wl->netstack_work);
2352		mutex_lock(&wl->mutex);
2353power_off:
2354		wl1271_power_off(wl);
2355	}
2356
2357	if (!booted) {
2358		wl1271_error("firmware boot failed despite %d retries",
2359			     WL1271_BOOT_RETRIES);
2360		goto out;
2361	}
2362
2363	wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2364
2365	/* update hw/fw version info in wiphy struct */
2366	wiphy->hw_version = wl->chip.id;
2367	strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2368		sizeof(wiphy->fw_version));
2369
2370	/*
2371	 * Now we know if 11a is supported (info from the NVS), so disable
2372	 * 11a channels if not supported
2373	 */
2374	if (!wl->enable_11a)
2375		wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2376
2377	wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2378		     wl->enable_11a ? "" : "not ");
2379
2380	wl->state = WLCORE_STATE_ON;
2381out:
2382	return ret;
2383}
2384
2385static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2386{
2387	return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2388}
2389
2390/*
2391 * Check whether a fw switch (i.e. moving from one loaded
2392 * fw to another) is needed. This function is also responsible
2393 * for updating wl->last_vif_count, so it must be called before
2394 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2395 * will be used).
2396 */
2397static bool wl12xx_need_fw_change(struct wl1271 *wl,
2398				  struct vif_counter_data vif_counter_data,
2399				  bool add)
2400{
2401	enum wl12xx_fw_type current_fw = wl->fw_type;
2402	u8 vif_count = vif_counter_data.counter;
2403
2404	if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2405		return false;
2406
2407	/* increase the vif count if this is a new vif */
2408	if (add && !vif_counter_data.cur_vif_running)
2409		vif_count++;
2410
2411	wl->last_vif_count = vif_count;
2412
2413	/* no need for fw change if the device is OFF */
2414	if (wl->state == WLCORE_STATE_OFF)
2415		return false;
2416
2417	/* no need for fw change if a single fw is used */
2418	if (!wl->mr_fw_name)
2419		return false;
2420
2421	if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2422		return true;
2423	if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2424		return true;
2425
2426	return false;
2427}
2428
2429/*
2430 * Enter "forced psm". Make sure the sta is in psm against the ap,
2431 * to make the fw switch a bit more disconnection-persistent.
2432 */
2433static void wl12xx_force_active_psm(struct wl1271 *wl)
2434{
2435	struct wl12xx_vif *wlvif;
2436
2437	wl12xx_for_each_wlvif_sta(wl, wlvif) {
2438		wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2439	}
2440}
2441
2442struct wlcore_hw_queue_iter_data {
2443	unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2444	/* current vif */
2445	struct ieee80211_vif *vif;
2446	/* is the current vif among those iterated */
2447	bool cur_running;
2448};
2449
2450static void wlcore_hw_queue_iter(void *data, u8 *mac,
2451				 struct ieee80211_vif *vif)
2452{
2453	struct wlcore_hw_queue_iter_data *iter_data = data;
2454
2455	if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2456	    WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2457		return;
2458
2459	if (iter_data->cur_running || vif == iter_data->vif) {
2460		iter_data->cur_running = true;
2461		return;
2462	}
2463
2464	__set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2465}
2466
2467static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2468					 struct wl12xx_vif *wlvif)
2469{
2470	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2471	struct wlcore_hw_queue_iter_data iter_data = {};
2472	int i, q_base;
2473
2474	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2475		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2476		return 0;
2477	}
2478
2479	iter_data.vif = vif;
2480
2481	/* mark all bits taken by active interfaces */
2482	ieee80211_iterate_active_interfaces_atomic(wl->hw,
2483					IEEE80211_IFACE_ITER_RESUME_ALL,
2484					wlcore_hw_queue_iter, &iter_data);
2485
2486	/* the current vif is already running in mac80211 (resume/recovery) */
2487	if (iter_data.cur_running) {
2488		wlvif->hw_queue_base = vif->hw_queue[0];
2489		wl1271_debug(DEBUG_MAC80211,
2490			     "using pre-allocated hw queue base %d",
2491			     wlvif->hw_queue_base);
2492
2493		/* interface type might have changed type */
2494		goto adjust_cab_queue;
2495	}
2496
2497	q_base = find_first_zero_bit(iter_data.hw_queue_map,
2498				     WLCORE_NUM_MAC_ADDRESSES);
2499	if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2500		return -EBUSY;
2501
2502	wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2503	wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2504		     wlvif->hw_queue_base);
2505
2506	for (i = 0; i < NUM_TX_QUEUES; i++) {
2507		wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2508		/* register hw queues in mac80211 */
2509		vif->hw_queue[i] = wlvif->hw_queue_base + i;
2510	}
2511
2512adjust_cab_queue:
2513	/* the last places are reserved for cab queues per interface */
2514	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2515		vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2516				 wlvif->hw_queue_base / NUM_TX_QUEUES;
2517	else
2518		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2519
2520	return 0;
2521}
2522
2523static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2524				   struct ieee80211_vif *vif)
2525{
2526	struct wl1271 *wl = hw->priv;
2527	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2528	struct vif_counter_data vif_count;
2529	int ret = 0;
2530	u8 role_type;
2531
2532	if (wl->plt) {
2533		wl1271_error("Adding Interface not allowed while in PLT mode");
2534		return -EBUSY;
2535	}
2536
2537	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2538			     IEEE80211_VIF_SUPPORTS_UAPSD |
2539			     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2540
2541	wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2542		     ieee80211_vif_type_p2p(vif), vif->addr);
2543
2544	wl12xx_get_vif_count(hw, vif, &vif_count);
2545
2546	mutex_lock(&wl->mutex);
2547
2548	/*
2549	 * in some very corner case HW recovery scenarios its possible to
2550	 * get here before __wl1271_op_remove_interface is complete, so
2551	 * opt out if that is the case.
2552	 */
2553	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2554	    test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2555		ret = -EBUSY;
2556		goto out;
2557	}
2558
2559
2560	ret = wl12xx_init_vif_data(wl, vif);
2561	if (ret < 0)
2562		goto out;
2563
2564	wlvif->wl = wl;
2565	role_type = wl12xx_get_role_type(wl, wlvif);
2566	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2567		ret = -EINVAL;
2568		goto out;
2569	}
2570
2571	ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2572	if (ret < 0)
2573		goto out;
2574
2575	/*
2576	 * TODO: after the nvs issue will be solved, move this block
2577	 * to start(), and make sure here the driver is ON.
2578	 */
2579	if (wl->state == WLCORE_STATE_OFF) {
2580		/*
2581		 * we still need this in order to configure the fw
2582		 * while uploading the nvs
2583		 */
2584		memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2585
2586		ret = wl12xx_init_fw(wl);
2587		if (ret < 0)
2588			goto out;
2589	}
2590
2591	/*
2592	 * Call runtime PM only after possible wl12xx_init_fw() above
2593	 * is done. Otherwise we do not have interrupts enabled.
2594	 */
2595	ret = pm_runtime_get_sync(wl->dev);
2596	if (ret < 0) {
2597		pm_runtime_put_noidle(wl->dev);
2598		goto out_unlock;
2599	}
2600
2601	if (wl12xx_need_fw_change(wl, vif_count, true)) {
2602		wl12xx_force_active_psm(wl);
2603		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2604		mutex_unlock(&wl->mutex);
2605		wl1271_recovery_work(&wl->recovery_work);
2606		return 0;
2607	}
2608
2609	if (!wlcore_is_p2p_mgmt(wlvif)) {
2610		ret = wl12xx_cmd_role_enable(wl, vif->addr,
2611					     role_type, &wlvif->role_id);
2612		if (ret < 0)
2613			goto out;
2614
2615		ret = wl1271_init_vif_specific(wl, vif);
2616		if (ret < 0)
2617			goto out;
2618
2619	} else {
2620		ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2621					     &wlvif->dev_role_id);
2622		if (ret < 0)
2623			goto out;
2624
2625		/* needed mainly for configuring rate policies */
2626		ret = wl1271_sta_hw_init(wl, wlvif);
2627		if (ret < 0)
2628			goto out;
2629	}
2630
2631	list_add(&wlvif->list, &wl->wlvif_list);
2632	set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2633
2634	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2635		wl->ap_count++;
2636	else
2637		wl->sta_count++;
2638out:
2639	pm_runtime_mark_last_busy(wl->dev);
2640	pm_runtime_put_autosuspend(wl->dev);
2641out_unlock:
2642	mutex_unlock(&wl->mutex);
2643
2644	return ret;
2645}
2646
2647static void __wl1271_op_remove_interface(struct wl1271 *wl,
2648					 struct ieee80211_vif *vif,
2649					 bool reset_tx_queues)
2650{
2651	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2652	int i, ret;
2653	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2654
2655	wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2656
2657	if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2658		return;
2659
2660	/* because of hardware recovery, we may get here twice */
2661	if (wl->state == WLCORE_STATE_OFF)
2662		return;
2663
2664	wl1271_info("down");
2665
2666	if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2667	    wl->scan_wlvif == wlvif) {
2668		struct cfg80211_scan_info info = {
2669			.aborted = true,
2670		};
2671
2672		/*
2673		 * Rearm the tx watchdog just before idling scan. This
2674		 * prevents just-finished scans from triggering the watchdog
2675		 */
2676		wl12xx_rearm_tx_watchdog_locked(wl);
2677
2678		wl->scan.state = WL1271_SCAN_STATE_IDLE;
2679		memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2680		wl->scan_wlvif = NULL;
2681		wl->scan.req = NULL;
2682		ieee80211_scan_completed(wl->hw, &info);
2683	}
2684
2685	if (wl->sched_vif == wlvif)
2686		wl->sched_vif = NULL;
2687
2688	if (wl->roc_vif == vif) {
2689		wl->roc_vif = NULL;
2690		ieee80211_remain_on_channel_expired(wl->hw);
2691	}
2692
2693	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2694		/* disable active roles */
2695		ret = pm_runtime_get_sync(wl->dev);
2696		if (ret < 0) {
2697			pm_runtime_put_noidle(wl->dev);
2698			goto deinit;
2699		}
2700
2701		if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2702		    wlvif->bss_type == BSS_TYPE_IBSS) {
2703			if (wl12xx_dev_role_started(wlvif))
2704				wl12xx_stop_dev(wl, wlvif);
2705		}
2706
2707		if (!wlcore_is_p2p_mgmt(wlvif)) {
2708			ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2709			if (ret < 0) {
2710				pm_runtime_put_noidle(wl->dev);
2711				goto deinit;
2712			}
2713		} else {
2714			ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2715			if (ret < 0) {
2716				pm_runtime_put_noidle(wl->dev);
2717				goto deinit;
2718			}
2719		}
2720
2721		pm_runtime_mark_last_busy(wl->dev);
2722		pm_runtime_put_autosuspend(wl->dev);
2723	}
2724deinit:
2725	wl12xx_tx_reset_wlvif(wl, wlvif);
2726
2727	/* clear all hlids (except system_hlid) */
2728	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2729
2730	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2731	    wlvif->bss_type == BSS_TYPE_IBSS) {
2732		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2733		wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2734		wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2735		wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2736		wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2737	} else {
2738		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2739		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2740		wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2741		wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2742		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2743			wl12xx_free_rate_policy(wl,
2744						&wlvif->ap.ucast_rate_idx[i]);
2745		wl1271_free_ap_keys(wl, wlvif);
2746	}
2747
2748	dev_kfree_skb(wlvif->probereq);
2749	wlvif->probereq = NULL;
2750	if (wl->last_wlvif == wlvif)
2751		wl->last_wlvif = NULL;
2752	list_del(&wlvif->list);
2753	memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2754	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2755	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2756
2757	if (is_ap)
2758		wl->ap_count--;
2759	else
2760		wl->sta_count--;
2761
2762	/*
2763	 * Last AP, have more stations. Configure sleep auth according to STA.
2764	 * Don't do thin on unintended recovery.
2765	 */
2766	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2767	    !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2768		goto unlock;
2769
2770	if (wl->ap_count == 0 && is_ap) {
2771		/* mask ap events */
2772		wl->event_mask &= ~wl->ap_event_mask;
2773		wl1271_event_unmask(wl);
2774	}
2775
2776	if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2777		u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2778		/* Configure for power according to debugfs */
2779		if (sta_auth != WL1271_PSM_ILLEGAL)
2780			wl1271_acx_sleep_auth(wl, sta_auth);
2781		/* Configure for ELP power saving */
2782		else
2783			wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2784	}
2785
2786unlock:
2787	mutex_unlock(&wl->mutex);
2788
2789	del_timer_sync(&wlvif->rx_streaming_timer);
2790	cancel_work_sync(&wlvif->rx_streaming_enable_work);
2791	cancel_work_sync(&wlvif->rx_streaming_disable_work);
2792	cancel_work_sync(&wlvif->rc_update_work);
2793	cancel_delayed_work_sync(&wlvif->connection_loss_work);
2794	cancel_delayed_work_sync(&wlvif->channel_switch_work);
2795	cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2796
2797	mutex_lock(&wl->mutex);
2798}
2799
2800static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2801				       struct ieee80211_vif *vif)
2802{
2803	struct wl1271 *wl = hw->priv;
2804	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2805	struct wl12xx_vif *iter;
2806	struct vif_counter_data vif_count;
2807
2808	wl12xx_get_vif_count(hw, vif, &vif_count);
2809	mutex_lock(&wl->mutex);
2810
2811	if (wl->state == WLCORE_STATE_OFF ||
2812	    !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2813		goto out;
2814
2815	/*
2816	 * wl->vif can be null here if someone shuts down the interface
2817	 * just when hardware recovery has been started.
2818	 */
2819	wl12xx_for_each_wlvif(wl, iter) {
2820		if (iter != wlvif)
2821			continue;
2822
2823		__wl1271_op_remove_interface(wl, vif, true);
2824		break;
2825	}
2826	WARN_ON(iter != wlvif);
2827	if (wl12xx_need_fw_change(wl, vif_count, false)) {
2828		wl12xx_force_active_psm(wl);
2829		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2830		wl12xx_queue_recovery_work(wl);
2831	}
2832out:
2833	mutex_unlock(&wl->mutex);
2834}
2835
2836static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2837				      struct ieee80211_vif *vif,
2838				      enum nl80211_iftype new_type, bool p2p)
2839{
2840	struct wl1271 *wl = hw->priv;
2841	int ret;
2842
2843	set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2844	wl1271_op_remove_interface(hw, vif);
2845
2846	vif->type = new_type;
2847	vif->p2p = p2p;
2848	ret = wl1271_op_add_interface(hw, vif);
2849
2850	clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2851	return ret;
2852}
2853
2854static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2855{
2856	int ret;
2857	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2858
2859	/*
2860	 * One of the side effects of the JOIN command is that is clears
2861	 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2862	 * to a WPA/WPA2 access point will therefore kill the data-path.
2863	 * Currently the only valid scenario for JOIN during association
2864	 * is on roaming, in which case we will also be given new keys.
2865	 * Keep the below message for now, unless it starts bothering
2866	 * users who really like to roam a lot :)
2867	 */
2868	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2869		wl1271_info("JOIN while associated.");
2870
2871	/* clear encryption type */
2872	wlvif->encryption_type = KEY_NONE;
2873
2874	if (is_ibss)
2875		ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2876	else {
2877		if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2878			/*
2879			 * TODO: this is an ugly workaround for wl12xx fw
2880			 * bug - we are not able to tx/rx after the first
2881			 * start_sta, so make dummy start+stop calls,
2882			 * and then call start_sta again.
2883			 * this should be fixed in the fw.
2884			 */
2885			wl12xx_cmd_role_start_sta(wl, wlvif);
2886			wl12xx_cmd_role_stop_sta(wl, wlvif);
2887		}
2888
2889		ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2890	}
2891
2892	return ret;
2893}
2894
2895static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2896			    int offset)
2897{
2898	u8 ssid_len;
2899	const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2900					 skb->len - offset);
2901
2902	if (!ptr) {
2903		wl1271_error("No SSID in IEs!");
2904		return -ENOENT;
2905	}
2906
2907	ssid_len = ptr[1];
2908	if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2909		wl1271_error("SSID is too long!");
2910		return -EINVAL;
2911	}
2912
2913	wlvif->ssid_len = ssid_len;
2914	memcpy(wlvif->ssid, ptr+2, ssid_len);
2915	return 0;
2916}
2917
2918static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2919{
2920	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2921	struct sk_buff *skb;
2922	int ieoffset;
2923
2924	/* we currently only support setting the ssid from the ap probe req */
2925	if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2926		return -EINVAL;
2927
2928	skb = ieee80211_ap_probereq_get(wl->hw, vif);
2929	if (!skb)
2930		return -EINVAL;
2931
2932	ieoffset = offsetof(struct ieee80211_mgmt,
2933			    u.probe_req.variable);
2934	wl1271_ssid_set(wlvif, skb, ieoffset);
2935	dev_kfree_skb(skb);
2936
2937	return 0;
2938}
2939
2940static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2941			    struct ieee80211_bss_conf *bss_conf,
2942			    u32 sta_rate_set)
2943{
 
 
2944	int ieoffset;
2945	int ret;
2946
2947	wlvif->aid = bss_conf->aid;
2948	wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2949	wlvif->beacon_int = bss_conf->beacon_int;
2950	wlvif->wmm_enabled = bss_conf->qos;
2951
2952	set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2953
2954	/*
2955	 * with wl1271, we don't need to update the
2956	 * beacon_int and dtim_period, because the firmware
2957	 * updates it by itself when the first beacon is
2958	 * received after a join.
2959	 */
2960	ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2961	if (ret < 0)
2962		return ret;
2963
2964	/*
2965	 * Get a template for hardware connection maintenance
2966	 */
2967	dev_kfree_skb(wlvif->probereq);
2968	wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2969							wlvif,
2970							NULL);
2971	ieoffset = offsetof(struct ieee80211_mgmt,
2972			    u.probe_req.variable);
2973	wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2974
2975	/* enable the connection monitoring feature */
2976	ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2977	if (ret < 0)
2978		return ret;
2979
2980	/*
2981	 * The join command disable the keep-alive mode, shut down its process,
2982	 * and also clear the template config, so we need to reset it all after
2983	 * the join. The acx_aid starts the keep-alive process, and the order
2984	 * of the commands below is relevant.
2985	 */
2986	ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2987	if (ret < 0)
2988		return ret;
2989
2990	ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2991	if (ret < 0)
2992		return ret;
2993
2994	ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2995	if (ret < 0)
2996		return ret;
2997
2998	ret = wl1271_acx_keep_alive_config(wl, wlvif,
2999					   wlvif->sta.klv_template_id,
3000					   ACX_KEEP_ALIVE_TPL_VALID);
3001	if (ret < 0)
3002		return ret;
3003
3004	/*
3005	 * The default fw psm configuration is AUTO, while mac80211 default
3006	 * setting is off (ACTIVE), so sync the fw with the correct value.
3007	 */
3008	ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
3009	if (ret < 0)
3010		return ret;
3011
3012	if (sta_rate_set) {
3013		wlvif->rate_set =
3014			wl1271_tx_enabled_rates_get(wl,
3015						    sta_rate_set,
3016						    wlvif->band);
3017		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3018		if (ret < 0)
3019			return ret;
3020	}
3021
3022	return ret;
3023}
3024
3025static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3026{
3027	int ret;
3028	bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3029
3030	/* make sure we are connected (sta) joined */
3031	if (sta &&
3032	    !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3033		return false;
3034
3035	/* make sure we are joined (ibss) */
3036	if (!sta &&
3037	    test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3038		return false;
3039
3040	if (sta) {
3041		/* use defaults when not associated */
3042		wlvif->aid = 0;
3043
3044		/* free probe-request template */
3045		dev_kfree_skb(wlvif->probereq);
3046		wlvif->probereq = NULL;
3047
3048		/* disable connection monitor features */
3049		ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3050		if (ret < 0)
3051			return ret;
3052
3053		/* Disable the keep-alive feature */
3054		ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3055		if (ret < 0)
3056			return ret;
3057
3058		/* disable beacon filtering */
3059		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3060		if (ret < 0)
3061			return ret;
3062	}
3063
3064	if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3065		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3066
3067		wl12xx_cmd_stop_channel_switch(wl, wlvif);
3068		ieee80211_chswitch_done(vif, false);
3069		cancel_delayed_work(&wlvif->channel_switch_work);
3070	}
3071
3072	/* invalidate keep-alive template */
3073	wl1271_acx_keep_alive_config(wl, wlvif,
3074				     wlvif->sta.klv_template_id,
3075				     ACX_KEEP_ALIVE_TPL_INVALID);
3076
3077	return 0;
3078}
3079
3080static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3081{
3082	wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3083	wlvif->rate_set = wlvif->basic_rate_set;
3084}
3085
3086static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3087				   bool idle)
3088{
3089	bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3090
3091	if (idle == cur_idle)
3092		return;
3093
3094	if (idle) {
3095		clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3096	} else {
3097		/* The current firmware only supports sched_scan in idle */
3098		if (wl->sched_vif == wlvif)
3099			wl->ops->sched_scan_stop(wl, wlvif);
3100
3101		set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3102	}
3103}
3104
3105static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3106			     struct ieee80211_conf *conf, u32 changed)
3107{
3108	int ret;
3109
3110	if (wlcore_is_p2p_mgmt(wlvif))
3111		return 0;
3112
3113	if (conf->power_level != wlvif->power_level) {
3114		ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3115		if (ret < 0)
3116			return ret;
3117
3118		wlvif->power_level = conf->power_level;
3119	}
3120
3121	return 0;
3122}
3123
3124static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3125{
3126	struct wl1271 *wl = hw->priv;
3127	struct wl12xx_vif *wlvif;
3128	struct ieee80211_conf *conf = &hw->conf;
3129	int ret = 0;
3130
3131	wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3132		     " changed 0x%x",
3133		     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3134		     conf->power_level,
3135		     conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3136			 changed);
3137
3138	mutex_lock(&wl->mutex);
3139
3140	if (changed & IEEE80211_CONF_CHANGE_POWER)
3141		wl->power_level = conf->power_level;
3142
3143	if (unlikely(wl->state != WLCORE_STATE_ON))
3144		goto out;
3145
3146	ret = pm_runtime_get_sync(wl->dev);
3147	if (ret < 0) {
3148		pm_runtime_put_noidle(wl->dev);
3149		goto out;
3150	}
3151
3152	/* configure each interface */
3153	wl12xx_for_each_wlvif(wl, wlvif) {
3154		ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3155		if (ret < 0)
3156			goto out_sleep;
3157	}
3158
3159out_sleep:
3160	pm_runtime_mark_last_busy(wl->dev);
3161	pm_runtime_put_autosuspend(wl->dev);
3162
3163out:
3164	mutex_unlock(&wl->mutex);
3165
3166	return ret;
3167}
3168
3169struct wl1271_filter_params {
3170	bool enabled;
3171	int mc_list_length;
3172	u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3173};
3174
3175static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3176				       struct netdev_hw_addr_list *mc_list)
3177{
3178	struct wl1271_filter_params *fp;
3179	struct netdev_hw_addr *ha;
3180
3181	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3182	if (!fp) {
3183		wl1271_error("Out of memory setting filters.");
3184		return 0;
3185	}
3186
3187	/* update multicast filtering parameters */
3188	fp->mc_list_length = 0;
3189	if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3190		fp->enabled = false;
3191	} else {
3192		fp->enabled = true;
3193		netdev_hw_addr_list_for_each(ha, mc_list) {
3194			memcpy(fp->mc_list[fp->mc_list_length],
3195					ha->addr, ETH_ALEN);
3196			fp->mc_list_length++;
3197		}
3198	}
3199
3200	return (u64)(unsigned long)fp;
3201}
3202
3203#define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3204				  FIF_FCSFAIL | \
3205				  FIF_BCN_PRBRESP_PROMISC | \
3206				  FIF_CONTROL | \
3207				  FIF_OTHER_BSS)
3208
3209static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3210				       unsigned int changed,
3211				       unsigned int *total, u64 multicast)
3212{
3213	struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3214	struct wl1271 *wl = hw->priv;
3215	struct wl12xx_vif *wlvif;
3216
3217	int ret;
3218
3219	wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3220		     " total %x", changed, *total);
3221
3222	mutex_lock(&wl->mutex);
3223
3224	*total &= WL1271_SUPPORTED_FILTERS;
3225	changed &= WL1271_SUPPORTED_FILTERS;
3226
3227	if (unlikely(wl->state != WLCORE_STATE_ON))
3228		goto out;
3229
3230	ret = pm_runtime_get_sync(wl->dev);
3231	if (ret < 0) {
3232		pm_runtime_put_noidle(wl->dev);
3233		goto out;
3234	}
3235
3236	wl12xx_for_each_wlvif(wl, wlvif) {
3237		if (wlcore_is_p2p_mgmt(wlvif))
3238			continue;
3239
3240		if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3241			if (*total & FIF_ALLMULTI)
3242				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3243								   false,
3244								   NULL, 0);
3245			else if (fp)
3246				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3247							fp->enabled,
3248							fp->mc_list,
3249							fp->mc_list_length);
3250			if (ret < 0)
3251				goto out_sleep;
3252		}
3253
3254		/*
3255		 * If interface in AP mode and created with allmulticast then disable
3256		 * the firmware filters so that all multicast packets are passed
3257		 * This is mandatory for MDNS based discovery protocols 
3258		 */
3259 		if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
3260 			if (*total & FIF_ALLMULTI) {
3261				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3262							false,
3263							NULL, 0);
3264				if (ret < 0)
3265					goto out_sleep;
3266			}
3267		}
3268	}
3269
3270	/*
3271	 * the fw doesn't provide an api to configure the filters. instead,
3272	 * the filters configuration is based on the active roles / ROC
3273	 * state.
3274	 */
3275
3276out_sleep:
3277	pm_runtime_mark_last_busy(wl->dev);
3278	pm_runtime_put_autosuspend(wl->dev);
3279
3280out:
3281	mutex_unlock(&wl->mutex);
3282	kfree(fp);
3283}
3284
3285static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3286				u8 id, u8 key_type, u8 key_size,
3287				const u8 *key, u8 hlid, u32 tx_seq_32,
3288				u16 tx_seq_16, bool is_pairwise)
3289{
3290	struct wl1271_ap_key *ap_key;
3291	int i;
3292
3293	wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3294
3295	if (key_size > MAX_KEY_SIZE)
3296		return -EINVAL;
3297
3298	/*
3299	 * Find next free entry in ap_keys. Also check we are not replacing
3300	 * an existing key.
3301	 */
3302	for (i = 0; i < MAX_NUM_KEYS; i++) {
3303		if (wlvif->ap.recorded_keys[i] == NULL)
3304			break;
3305
3306		if (wlvif->ap.recorded_keys[i]->id == id) {
3307			wl1271_warning("trying to record key replacement");
3308			return -EINVAL;
3309		}
3310	}
3311
3312	if (i == MAX_NUM_KEYS)
3313		return -EBUSY;
3314
3315	ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3316	if (!ap_key)
3317		return -ENOMEM;
3318
3319	ap_key->id = id;
3320	ap_key->key_type = key_type;
3321	ap_key->key_size = key_size;
3322	memcpy(ap_key->key, key, key_size);
3323	ap_key->hlid = hlid;
3324	ap_key->tx_seq_32 = tx_seq_32;
3325	ap_key->tx_seq_16 = tx_seq_16;
3326	ap_key->is_pairwise = is_pairwise;
3327
3328	wlvif->ap.recorded_keys[i] = ap_key;
3329	return 0;
3330}
3331
3332static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3333{
3334	int i;
3335
3336	for (i = 0; i < MAX_NUM_KEYS; i++) {
3337		kfree(wlvif->ap.recorded_keys[i]);
3338		wlvif->ap.recorded_keys[i] = NULL;
3339	}
3340}
3341
3342static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3343{
3344	int i, ret = 0;
3345	struct wl1271_ap_key *key;
3346	bool wep_key_added = false;
3347
3348	for (i = 0; i < MAX_NUM_KEYS; i++) {
3349		u8 hlid;
3350		if (wlvif->ap.recorded_keys[i] == NULL)
3351			break;
3352
3353		key = wlvif->ap.recorded_keys[i];
3354		hlid = key->hlid;
3355		if (hlid == WL12XX_INVALID_LINK_ID)
3356			hlid = wlvif->ap.bcast_hlid;
3357
3358		ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3359					    key->id, key->key_type,
3360					    key->key_size, key->key,
3361					    hlid, key->tx_seq_32,
3362					    key->tx_seq_16, key->is_pairwise);
3363		if (ret < 0)
3364			goto out;
3365
3366		if (key->key_type == KEY_WEP)
3367			wep_key_added = true;
3368	}
3369
3370	if (wep_key_added) {
3371		ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3372						     wlvif->ap.bcast_hlid);
3373		if (ret < 0)
3374			goto out;
3375	}
3376
3377out:
3378	wl1271_free_ap_keys(wl, wlvif);
3379	return ret;
3380}
3381
3382static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3383		       u16 action, u8 id, u8 key_type,
3384		       u8 key_size, const u8 *key, u32 tx_seq_32,
3385		       u16 tx_seq_16, struct ieee80211_sta *sta,
3386		       bool is_pairwise)
3387{
3388	int ret;
3389	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3390
3391	if (is_ap) {
3392		struct wl1271_station *wl_sta;
3393		u8 hlid;
3394
3395		if (sta) {
3396			wl_sta = (struct wl1271_station *)sta->drv_priv;
3397			hlid = wl_sta->hlid;
3398		} else {
3399			hlid = wlvif->ap.bcast_hlid;
3400		}
3401
3402		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3403			/*
3404			 * We do not support removing keys after AP shutdown.
3405			 * Pretend we do to make mac80211 happy.
3406			 */
3407			if (action != KEY_ADD_OR_REPLACE)
3408				return 0;
3409
3410			ret = wl1271_record_ap_key(wl, wlvif, id,
3411					     key_type, key_size,
3412					     key, hlid, tx_seq_32,
3413					     tx_seq_16, is_pairwise);
3414		} else {
3415			ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3416					     id, key_type, key_size,
3417					     key, hlid, tx_seq_32,
3418					     tx_seq_16, is_pairwise);
3419		}
3420
3421		if (ret < 0)
3422			return ret;
3423	} else {
3424		const u8 *addr;
3425		static const u8 bcast_addr[ETH_ALEN] = {
3426			0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3427		};
3428
3429		addr = sta ? sta->addr : bcast_addr;
3430
3431		if (is_zero_ether_addr(addr)) {
3432			/* We dont support TX only encryption */
3433			return -EOPNOTSUPP;
3434		}
3435
3436		/* The wl1271 does not allow to remove unicast keys - they
3437		   will be cleared automatically on next CMD_JOIN. Ignore the
3438		   request silently, as we dont want the mac80211 to emit
3439		   an error message. */
3440		if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3441			return 0;
3442
3443		/* don't remove key if hlid was already deleted */
3444		if (action == KEY_REMOVE &&
3445		    wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3446			return 0;
3447
3448		ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3449					     id, key_type, key_size,
3450					     key, addr, tx_seq_32,
3451					     tx_seq_16);
3452		if (ret < 0)
3453			return ret;
3454
3455	}
3456
3457	return 0;
3458}
3459
3460static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3461			     struct ieee80211_vif *vif,
3462			     struct ieee80211_sta *sta,
3463			     struct ieee80211_key_conf *key_conf)
3464{
3465	struct wl1271 *wl = hw->priv;
3466	int ret;
3467	bool might_change_spare =
3468		key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3469		key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3470
3471	if (might_change_spare) {
3472		/*
3473		 * stop the queues and flush to ensure the next packets are
3474		 * in sync with FW spare block accounting
3475		 */
3476		wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3477		wl1271_tx_flush(wl);
3478	}
3479
3480	mutex_lock(&wl->mutex);
3481
3482	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3483		ret = -EAGAIN;
3484		goto out_wake_queues;
3485	}
3486
3487	ret = pm_runtime_get_sync(wl->dev);
3488	if (ret < 0) {
3489		pm_runtime_put_noidle(wl->dev);
3490		goto out_wake_queues;
3491	}
3492
3493	ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3494
3495	pm_runtime_mark_last_busy(wl->dev);
3496	pm_runtime_put_autosuspend(wl->dev);
3497
3498out_wake_queues:
3499	if (might_change_spare)
3500		wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3501
3502	mutex_unlock(&wl->mutex);
3503
3504	return ret;
3505}
3506
3507int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3508		   struct ieee80211_vif *vif,
3509		   struct ieee80211_sta *sta,
3510		   struct ieee80211_key_conf *key_conf)
3511{
3512	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3513	int ret;
3514	u32 tx_seq_32 = 0;
3515	u16 tx_seq_16 = 0;
3516	u8 key_type;
3517	u8 hlid;
3518	bool is_pairwise;
3519
3520	wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3521
3522	wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3523	wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3524		     key_conf->cipher, key_conf->keyidx,
3525		     key_conf->keylen, key_conf->flags);
3526	wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3527
3528	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3529		if (sta) {
3530			struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3531			hlid = wl_sta->hlid;
3532		} else {
3533			hlid = wlvif->ap.bcast_hlid;
3534		}
3535	else
3536		hlid = wlvif->sta.hlid;
3537
3538	if (hlid != WL12XX_INVALID_LINK_ID) {
3539		u64 tx_seq = wl->links[hlid].total_freed_pkts;
3540		tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3541		tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3542	}
3543
3544	switch (key_conf->cipher) {
3545	case WLAN_CIPHER_SUITE_WEP40:
3546	case WLAN_CIPHER_SUITE_WEP104:
3547		key_type = KEY_WEP;
3548
3549		key_conf->hw_key_idx = key_conf->keyidx;
3550		break;
3551	case WLAN_CIPHER_SUITE_TKIP:
3552		key_type = KEY_TKIP;
3553		key_conf->hw_key_idx = key_conf->keyidx;
3554		break;
3555	case WLAN_CIPHER_SUITE_CCMP:
3556		key_type = KEY_AES;
3557		key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3558		break;
3559	case WL1271_CIPHER_SUITE_GEM:
3560		key_type = KEY_GEM;
3561		break;
3562	default:
3563		wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3564
3565		return -EOPNOTSUPP;
3566	}
3567
3568	is_pairwise = key_conf->flags & IEEE80211_KEY_FLAG_PAIRWISE;
3569
3570	switch (cmd) {
3571	case SET_KEY:
3572		ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3573				 key_conf->keyidx, key_type,
3574				 key_conf->keylen, key_conf->key,
3575				 tx_seq_32, tx_seq_16, sta, is_pairwise);
3576		if (ret < 0) {
3577			wl1271_error("Could not add or replace key");
3578			return ret;
3579		}
3580
3581		/*
3582		 * reconfiguring arp response if the unicast (or common)
3583		 * encryption key type was changed
3584		 */
3585		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3586		    (sta || key_type == KEY_WEP) &&
3587		    wlvif->encryption_type != key_type) {
3588			wlvif->encryption_type = key_type;
3589			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3590			if (ret < 0) {
3591				wl1271_warning("build arp rsp failed: %d", ret);
3592				return ret;
3593			}
3594		}
3595		break;
3596
3597	case DISABLE_KEY:
3598		ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3599				     key_conf->keyidx, key_type,
3600				     key_conf->keylen, key_conf->key,
3601				     0, 0, sta, is_pairwise);
3602		if (ret < 0) {
3603			wl1271_error("Could not remove key");
3604			return ret;
3605		}
3606		break;
3607
3608	default:
3609		wl1271_error("Unsupported key cmd 0x%x", cmd);
3610		return -EOPNOTSUPP;
3611	}
3612
3613	return ret;
3614}
3615EXPORT_SYMBOL_GPL(wlcore_set_key);
3616
3617static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3618					  struct ieee80211_vif *vif,
3619					  int key_idx)
3620{
3621	struct wl1271 *wl = hw->priv;
3622	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3623	int ret;
3624
3625	wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3626		     key_idx);
3627
3628	/* we don't handle unsetting of default key */
3629	if (key_idx == -1)
3630		return;
3631
3632	mutex_lock(&wl->mutex);
3633
3634	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3635		ret = -EAGAIN;
3636		goto out_unlock;
3637	}
3638
3639	ret = pm_runtime_get_sync(wl->dev);
3640	if (ret < 0) {
3641		pm_runtime_put_noidle(wl->dev);
3642		goto out_unlock;
3643	}
3644
3645	wlvif->default_key = key_idx;
3646
3647	/* the default WEP key needs to be configured at least once */
3648	if (wlvif->encryption_type == KEY_WEP) {
3649		ret = wl12xx_cmd_set_default_wep_key(wl,
3650				key_idx,
3651				wlvif->sta.hlid);
3652		if (ret < 0)
3653			goto out_sleep;
3654	}
3655
3656out_sleep:
3657	pm_runtime_mark_last_busy(wl->dev);
3658	pm_runtime_put_autosuspend(wl->dev);
3659
3660out_unlock:
3661	mutex_unlock(&wl->mutex);
3662}
3663
3664void wlcore_regdomain_config(struct wl1271 *wl)
3665{
3666	int ret;
3667
3668	if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3669		return;
3670
3671	mutex_lock(&wl->mutex);
3672
3673	if (unlikely(wl->state != WLCORE_STATE_ON))
3674		goto out;
3675
3676	ret = pm_runtime_get_sync(wl->dev);
3677	if (ret < 0) {
3678		pm_runtime_put_autosuspend(wl->dev);
3679		goto out;
3680	}
3681
3682	ret = wlcore_cmd_regdomain_config_locked(wl);
3683	if (ret < 0) {
3684		wl12xx_queue_recovery_work(wl);
3685		goto out;
3686	}
3687
3688	pm_runtime_mark_last_busy(wl->dev);
3689	pm_runtime_put_autosuspend(wl->dev);
3690out:
3691	mutex_unlock(&wl->mutex);
3692}
3693
3694static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3695			     struct ieee80211_vif *vif,
3696			     struct ieee80211_scan_request *hw_req)
3697{
3698	struct cfg80211_scan_request *req = &hw_req->req;
3699	struct wl1271 *wl = hw->priv;
3700	int ret;
3701	u8 *ssid = NULL;
3702	size_t len = 0;
3703
3704	wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3705
3706	if (req->n_ssids) {
3707		ssid = req->ssids[0].ssid;
3708		len = req->ssids[0].ssid_len;
3709	}
3710
3711	mutex_lock(&wl->mutex);
3712
3713	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3714		/*
3715		 * We cannot return -EBUSY here because cfg80211 will expect
3716		 * a call to ieee80211_scan_completed if we do - in this case
3717		 * there won't be any call.
3718		 */
3719		ret = -EAGAIN;
3720		goto out;
3721	}
3722
3723	ret = pm_runtime_get_sync(wl->dev);
3724	if (ret < 0) {
3725		pm_runtime_put_noidle(wl->dev);
3726		goto out;
3727	}
3728
3729	/* fail if there is any role in ROC */
3730	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3731		/* don't allow scanning right now */
3732		ret = -EBUSY;
3733		goto out_sleep;
3734	}
3735
3736	ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3737out_sleep:
3738	pm_runtime_mark_last_busy(wl->dev);
3739	pm_runtime_put_autosuspend(wl->dev);
3740out:
3741	mutex_unlock(&wl->mutex);
3742
3743	return ret;
3744}
3745
3746static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3747				     struct ieee80211_vif *vif)
3748{
3749	struct wl1271 *wl = hw->priv;
3750	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3751	struct cfg80211_scan_info info = {
3752		.aborted = true,
3753	};
3754	int ret;
3755
3756	wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3757
3758	mutex_lock(&wl->mutex);
3759
3760	if (unlikely(wl->state != WLCORE_STATE_ON))
3761		goto out;
3762
3763	if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3764		goto out;
3765
3766	ret = pm_runtime_get_sync(wl->dev);
3767	if (ret < 0) {
3768		pm_runtime_put_noidle(wl->dev);
3769		goto out;
3770	}
3771
3772	if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3773		ret = wl->ops->scan_stop(wl, wlvif);
3774		if (ret < 0)
3775			goto out_sleep;
3776	}
3777
3778	/*
3779	 * Rearm the tx watchdog just before idling scan. This
3780	 * prevents just-finished scans from triggering the watchdog
3781	 */
3782	wl12xx_rearm_tx_watchdog_locked(wl);
3783
3784	wl->scan.state = WL1271_SCAN_STATE_IDLE;
3785	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3786	wl->scan_wlvif = NULL;
3787	wl->scan.req = NULL;
3788	ieee80211_scan_completed(wl->hw, &info);
3789
3790out_sleep:
3791	pm_runtime_mark_last_busy(wl->dev);
3792	pm_runtime_put_autosuspend(wl->dev);
3793out:
3794	mutex_unlock(&wl->mutex);
3795
3796	cancel_delayed_work_sync(&wl->scan_complete_work);
3797}
3798
3799static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3800				      struct ieee80211_vif *vif,
3801				      struct cfg80211_sched_scan_request *req,
3802				      struct ieee80211_scan_ies *ies)
3803{
3804	struct wl1271 *wl = hw->priv;
3805	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3806	int ret;
3807
3808	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3809
3810	mutex_lock(&wl->mutex);
3811
3812	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3813		ret = -EAGAIN;
3814		goto out;
3815	}
3816
3817	ret = pm_runtime_get_sync(wl->dev);
3818	if (ret < 0) {
3819		pm_runtime_put_noidle(wl->dev);
3820		goto out;
3821	}
3822
3823	ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3824	if (ret < 0)
3825		goto out_sleep;
3826
3827	wl->sched_vif = wlvif;
3828
3829out_sleep:
3830	pm_runtime_mark_last_busy(wl->dev);
3831	pm_runtime_put_autosuspend(wl->dev);
3832out:
3833	mutex_unlock(&wl->mutex);
3834	return ret;
3835}
3836
3837static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3838				     struct ieee80211_vif *vif)
3839{
3840	struct wl1271 *wl = hw->priv;
3841	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3842	int ret;
3843
3844	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3845
3846	mutex_lock(&wl->mutex);
3847
3848	if (unlikely(wl->state != WLCORE_STATE_ON))
3849		goto out;
3850
3851	ret = pm_runtime_get_sync(wl->dev);
3852	if (ret < 0) {
3853		pm_runtime_put_noidle(wl->dev);
3854		goto out;
3855	}
3856
3857	wl->ops->sched_scan_stop(wl, wlvif);
3858
3859	pm_runtime_mark_last_busy(wl->dev);
3860	pm_runtime_put_autosuspend(wl->dev);
3861out:
3862	mutex_unlock(&wl->mutex);
3863
3864	return 0;
3865}
3866
3867static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3868{
3869	struct wl1271 *wl = hw->priv;
3870	int ret = 0;
3871
3872	mutex_lock(&wl->mutex);
3873
3874	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3875		ret = -EAGAIN;
3876		goto out;
3877	}
3878
3879	ret = pm_runtime_get_sync(wl->dev);
3880	if (ret < 0) {
3881		pm_runtime_put_noidle(wl->dev);
3882		goto out;
3883	}
3884
3885	ret = wl1271_acx_frag_threshold(wl, value);
3886	if (ret < 0)
3887		wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3888
3889	pm_runtime_mark_last_busy(wl->dev);
3890	pm_runtime_put_autosuspend(wl->dev);
3891
3892out:
3893	mutex_unlock(&wl->mutex);
3894
3895	return ret;
3896}
3897
3898static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3899{
3900	struct wl1271 *wl = hw->priv;
3901	struct wl12xx_vif *wlvif;
3902	int ret = 0;
3903
3904	mutex_lock(&wl->mutex);
3905
3906	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3907		ret = -EAGAIN;
3908		goto out;
3909	}
3910
3911	ret = pm_runtime_get_sync(wl->dev);
3912	if (ret < 0) {
3913		pm_runtime_put_noidle(wl->dev);
3914		goto out;
3915	}
3916
3917	wl12xx_for_each_wlvif(wl, wlvif) {
3918		ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3919		if (ret < 0)
3920			wl1271_warning("set rts threshold failed: %d", ret);
3921	}
3922	pm_runtime_mark_last_busy(wl->dev);
3923	pm_runtime_put_autosuspend(wl->dev);
3924
3925out:
3926	mutex_unlock(&wl->mutex);
3927
3928	return ret;
3929}
3930
3931static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3932{
3933	int len;
3934	const u8 *next, *end = skb->data + skb->len;
3935	u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3936					skb->len - ieoffset);
3937	if (!ie)
3938		return;
3939	len = ie[1] + 2;
3940	next = ie + len;
3941	memmove(ie, next, end - next);
3942	skb_trim(skb, skb->len - len);
3943}
3944
3945static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3946					    unsigned int oui, u8 oui_type,
3947					    int ieoffset)
3948{
3949	int len;
3950	const u8 *next, *end = skb->data + skb->len;
3951	u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3952					       skb->data + ieoffset,
3953					       skb->len - ieoffset);
3954	if (!ie)
3955		return;
3956	len = ie[1] + 2;
3957	next = ie + len;
3958	memmove(ie, next, end - next);
3959	skb_trim(skb, skb->len - len);
3960}
3961
3962static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3963					 struct ieee80211_vif *vif)
3964{
3965	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3966	struct sk_buff *skb;
3967	int ret;
3968
3969	skb = ieee80211_proberesp_get(wl->hw, vif);
3970	if (!skb)
3971		return -EOPNOTSUPP;
3972
3973	ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3974				      CMD_TEMPL_AP_PROBE_RESPONSE,
3975				      skb->data,
3976				      skb->len, 0,
3977				      rates);
3978	dev_kfree_skb(skb);
3979
3980	if (ret < 0)
3981		goto out;
3982
3983	wl1271_debug(DEBUG_AP, "probe response updated");
3984	set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3985
3986out:
3987	return ret;
3988}
3989
3990static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3991					     struct ieee80211_vif *vif,
3992					     u8 *probe_rsp_data,
3993					     size_t probe_rsp_len,
3994					     u32 rates)
3995{
3996	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3997	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3998	u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3999	int ssid_ie_offset, ie_offset, templ_len;
4000	const u8 *ptr;
4001
4002	/* no need to change probe response if the SSID is set correctly */
4003	if (wlvif->ssid_len > 0)
4004		return wl1271_cmd_template_set(wl, wlvif->role_id,
4005					       CMD_TEMPL_AP_PROBE_RESPONSE,
4006					       probe_rsp_data,
4007					       probe_rsp_len, 0,
4008					       rates);
4009
4010	if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
4011		wl1271_error("probe_rsp template too big");
4012		return -EINVAL;
4013	}
4014
4015	/* start searching from IE offset */
4016	ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
4017
4018	ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
4019			       probe_rsp_len - ie_offset);
4020	if (!ptr) {
4021		wl1271_error("No SSID in beacon!");
4022		return -EINVAL;
4023	}
4024
4025	ssid_ie_offset = ptr - probe_rsp_data;
4026	ptr += (ptr[1] + 2);
4027
4028	memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
4029
4030	/* insert SSID from bss_conf */
4031	probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
4032	probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
4033	memcpy(probe_rsp_templ + ssid_ie_offset + 2,
4034	       bss_conf->ssid, bss_conf->ssid_len);
4035	templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
4036
4037	memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
4038	       ptr, probe_rsp_len - (ptr - probe_rsp_data));
4039	templ_len += probe_rsp_len - (ptr - probe_rsp_data);
4040
4041	return wl1271_cmd_template_set(wl, wlvif->role_id,
4042				       CMD_TEMPL_AP_PROBE_RESPONSE,
4043				       probe_rsp_templ,
4044				       templ_len, 0,
4045				       rates);
4046}
4047
4048static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
4049				       struct ieee80211_vif *vif,
4050				       struct ieee80211_bss_conf *bss_conf,
4051				       u32 changed)
4052{
4053	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4054	int ret = 0;
4055
4056	if (changed & BSS_CHANGED_ERP_SLOT) {
4057		if (bss_conf->use_short_slot)
4058			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
4059		else
4060			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
4061		if (ret < 0) {
4062			wl1271_warning("Set slot time failed %d", ret);
4063			goto out;
4064		}
4065	}
4066
4067	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
4068		if (bss_conf->use_short_preamble)
4069			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
4070		else
4071			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
4072	}
4073
4074	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
4075		if (bss_conf->use_cts_prot)
4076			ret = wl1271_acx_cts_protect(wl, wlvif,
4077						     CTSPROTECT_ENABLE);
4078		else
4079			ret = wl1271_acx_cts_protect(wl, wlvif,
4080						     CTSPROTECT_DISABLE);
4081		if (ret < 0) {
4082			wl1271_warning("Set ctsprotect failed %d", ret);
4083			goto out;
4084		}
4085	}
4086
4087out:
4088	return ret;
4089}
4090
4091static int wlcore_set_beacon_template(struct wl1271 *wl,
4092				      struct ieee80211_vif *vif,
4093				      bool is_ap)
4094{
4095	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4096	struct ieee80211_hdr *hdr;
4097	u32 min_rate;
4098	int ret;
4099	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4100	struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4101	u16 tmpl_id;
4102
4103	if (!beacon) {
4104		ret = -EINVAL;
4105		goto out;
4106	}
4107
4108	wl1271_debug(DEBUG_MASTER, "beacon updated");
4109
4110	ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4111	if (ret < 0) {
4112		dev_kfree_skb(beacon);
4113		goto out;
4114	}
4115	min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4116	tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4117		CMD_TEMPL_BEACON;
4118	ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4119				      beacon->data,
4120				      beacon->len, 0,
4121				      min_rate);
4122	if (ret < 0) {
4123		dev_kfree_skb(beacon);
4124		goto out;
4125	}
4126
4127	wlvif->wmm_enabled =
4128		cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4129					WLAN_OUI_TYPE_MICROSOFT_WMM,
4130					beacon->data + ieoffset,
4131					beacon->len - ieoffset);
4132
4133	/*
4134	 * In case we already have a probe-resp beacon set explicitly
4135	 * by usermode, don't use the beacon data.
4136	 */
4137	if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4138		goto end_bcn;
4139
4140	/* remove TIM ie from probe response */
4141	wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4142
4143	/*
4144	 * remove p2p ie from probe response.
4145	 * the fw reponds to probe requests that don't include
4146	 * the p2p ie. probe requests with p2p ie will be passed,
4147	 * and will be responded by the supplicant (the spec
4148	 * forbids including the p2p ie when responding to probe
4149	 * requests that didn't include it).
4150	 */
4151	wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4152				WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4153
4154	hdr = (struct ieee80211_hdr *) beacon->data;
4155	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4156					 IEEE80211_STYPE_PROBE_RESP);
4157	if (is_ap)
4158		ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4159							   beacon->data,
4160							   beacon->len,
4161							   min_rate);
4162	else
4163		ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4164					      CMD_TEMPL_PROBE_RESPONSE,
4165					      beacon->data,
4166					      beacon->len, 0,
4167					      min_rate);
4168end_bcn:
4169	dev_kfree_skb(beacon);
4170	if (ret < 0)
4171		goto out;
4172
4173out:
4174	return ret;
4175}
4176
4177static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4178					  struct ieee80211_vif *vif,
4179					  struct ieee80211_bss_conf *bss_conf,
4180					  u32 changed)
4181{
4182	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4183	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4184	int ret = 0;
4185
4186	if (changed & BSS_CHANGED_BEACON_INT) {
4187		wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4188			bss_conf->beacon_int);
4189
4190		wlvif->beacon_int = bss_conf->beacon_int;
4191	}
4192
4193	if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4194		u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4195
4196		wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4197	}
4198
4199	if (changed & BSS_CHANGED_BEACON) {
4200		ret = wlcore_set_beacon_template(wl, vif, is_ap);
4201		if (ret < 0)
4202			goto out;
4203
4204		if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4205				       &wlvif->flags)) {
4206			ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4207			if (ret < 0)
4208				goto out;
4209		}
4210	}
4211out:
4212	if (ret != 0)
4213		wl1271_error("beacon info change failed: %d", ret);
4214	return ret;
4215}
4216
4217/* AP mode changes */
4218static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4219				       struct ieee80211_vif *vif,
4220				       struct ieee80211_bss_conf *bss_conf,
4221				       u32 changed)
4222{
4223	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4224	int ret = 0;
4225
4226	if (changed & BSS_CHANGED_BASIC_RATES) {
4227		u32 rates = bss_conf->basic_rates;
4228
4229		wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4230								 wlvif->band);
4231		wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4232							wlvif->basic_rate_set);
4233
4234		ret = wl1271_init_ap_rates(wl, wlvif);
4235		if (ret < 0) {
4236			wl1271_error("AP rate policy change failed %d", ret);
4237			goto out;
4238		}
4239
4240		ret = wl1271_ap_init_templates(wl, vif);
4241		if (ret < 0)
4242			goto out;
4243
4244		/* No need to set probe resp template for mesh */
4245		if (!ieee80211_vif_is_mesh(vif)) {
4246			ret = wl1271_ap_set_probe_resp_tmpl(wl,
4247							    wlvif->basic_rate,
4248							    vif);
4249			if (ret < 0)
4250				goto out;
4251		}
4252
4253		ret = wlcore_set_beacon_template(wl, vif, true);
4254		if (ret < 0)
4255			goto out;
4256	}
4257
4258	ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4259	if (ret < 0)
4260		goto out;
4261
4262	if (changed & BSS_CHANGED_BEACON_ENABLED) {
4263		if (bss_conf->enable_beacon) {
4264			if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4265				ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4266				if (ret < 0)
4267					goto out;
4268
4269				ret = wl1271_ap_init_hwenc(wl, wlvif);
4270				if (ret < 0)
4271					goto out;
4272
4273				set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4274				wl1271_debug(DEBUG_AP, "started AP");
4275			}
4276		} else {
4277			if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4278				/*
4279				 * AP might be in ROC in case we have just
4280				 * sent auth reply. handle it.
4281				 */
4282				if (test_bit(wlvif->role_id, wl->roc_map))
4283					wl12xx_croc(wl, wlvif->role_id);
4284
4285				ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4286				if (ret < 0)
4287					goto out;
4288
4289				clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4290				clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4291					  &wlvif->flags);
4292				wl1271_debug(DEBUG_AP, "stopped AP");
4293			}
4294		}
4295	}
4296
4297	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4298	if (ret < 0)
4299		goto out;
4300
4301	/* Handle HT information change */
4302	if ((changed & BSS_CHANGED_HT) &&
4303	    (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4304		ret = wl1271_acx_set_ht_information(wl, wlvif,
4305					bss_conf->ht_operation_mode);
4306		if (ret < 0) {
4307			wl1271_warning("Set ht information failed %d", ret);
4308			goto out;
4309		}
4310	}
4311
4312out:
4313	return;
4314}
4315
4316static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4317			    struct ieee80211_bss_conf *bss_conf,
4318			    u32 sta_rate_set)
4319{
 
4320	u32 rates;
4321	int ret;
4322
4323	wl1271_debug(DEBUG_MAC80211,
4324	     "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4325	     bss_conf->bssid, bss_conf->aid,
4326	     bss_conf->beacon_int,
4327	     bss_conf->basic_rates, sta_rate_set);
4328
4329	wlvif->beacon_int = bss_conf->beacon_int;
4330	rates = bss_conf->basic_rates;
4331	wlvif->basic_rate_set =
4332		wl1271_tx_enabled_rates_get(wl, rates,
4333					    wlvif->band);
4334	wlvif->basic_rate =
4335		wl1271_tx_min_rate_get(wl,
4336				       wlvif->basic_rate_set);
4337
4338	if (sta_rate_set)
4339		wlvif->rate_set =
4340			wl1271_tx_enabled_rates_get(wl,
4341						sta_rate_set,
4342						wlvif->band);
4343
4344	/* we only support sched_scan while not connected */
4345	if (wl->sched_vif == wlvif)
4346		wl->ops->sched_scan_stop(wl, wlvif);
4347
4348	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4349	if (ret < 0)
4350		return ret;
4351
4352	ret = wl12xx_cmd_build_null_data(wl, wlvif);
4353	if (ret < 0)
4354		return ret;
4355
4356	ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4357	if (ret < 0)
4358		return ret;
4359
4360	wlcore_set_ssid(wl, wlvif);
4361
4362	set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4363
4364	return 0;
4365}
4366
4367static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4368{
4369	int ret;
4370
4371	/* revert back to minimum rates for the current band */
4372	wl1271_set_band_rate(wl, wlvif);
4373	wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4374
4375	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4376	if (ret < 0)
4377		return ret;
4378
4379	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4380	    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4381		ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4382		if (ret < 0)
4383			return ret;
4384	}
4385
4386	clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4387	return 0;
4388}
4389/* STA/IBSS mode changes */
4390static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4391					struct ieee80211_vif *vif,
4392					struct ieee80211_bss_conf *bss_conf,
4393					u32 changed)
4394{
4395	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4396	bool do_join = false;
4397	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4398	bool ibss_joined = false;
4399	u32 sta_rate_set = 0;
4400	int ret;
4401	struct ieee80211_sta *sta;
4402	bool sta_exists = false;
4403	struct ieee80211_sta_ht_cap sta_ht_cap;
4404
4405	if (is_ibss) {
4406		ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4407						     changed);
4408		if (ret < 0)
4409			goto out;
4410	}
4411
4412	if (changed & BSS_CHANGED_IBSS) {
4413		if (bss_conf->ibss_joined) {
4414			set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4415			ibss_joined = true;
4416		} else {
4417			wlcore_unset_assoc(wl, wlvif);
4418			wl12xx_cmd_role_stop_sta(wl, wlvif);
4419		}
4420	}
4421
4422	if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4423		do_join = true;
4424
4425	/* Need to update the SSID (for filtering etc) */
4426	if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4427		do_join = true;
4428
4429	if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4430		wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4431			     bss_conf->enable_beacon ? "enabled" : "disabled");
4432
4433		do_join = true;
4434	}
4435
4436	if (changed & BSS_CHANGED_IDLE && !is_ibss)
4437		wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4438
4439	if (changed & BSS_CHANGED_CQM) {
4440		bool enable = false;
4441		if (bss_conf->cqm_rssi_thold)
4442			enable = true;
4443		ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4444						  bss_conf->cqm_rssi_thold,
4445						  bss_conf->cqm_rssi_hyst);
4446		if (ret < 0)
4447			goto out;
4448		wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4449	}
4450
4451	if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4452		       BSS_CHANGED_ASSOC)) {
4453		rcu_read_lock();
4454		sta = ieee80211_find_sta(vif, bss_conf->bssid);
4455		if (sta) {
4456			u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4457
4458			/* save the supp_rates of the ap */
4459			sta_rate_set = sta->supp_rates[wlvif->band];
4460			if (sta->ht_cap.ht_supported)
4461				sta_rate_set |=
4462					(rx_mask[0] << HW_HT_RATES_OFFSET) |
4463					(rx_mask[1] << HW_MIMO_RATES_OFFSET);
4464			sta_ht_cap = sta->ht_cap;
4465			sta_exists = true;
4466		}
4467
4468		rcu_read_unlock();
4469	}
4470
4471	if (changed & BSS_CHANGED_BSSID) {
4472		if (!is_zero_ether_addr(bss_conf->bssid)) {
4473			ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4474					       sta_rate_set);
4475			if (ret < 0)
4476				goto out;
4477
4478			/* Need to update the BSSID (for filtering etc) */
4479			do_join = true;
4480		} else {
4481			ret = wlcore_clear_bssid(wl, wlvif);
4482			if (ret < 0)
4483				goto out;
4484		}
4485	}
4486
4487	if (changed & BSS_CHANGED_IBSS) {
4488		wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4489			     bss_conf->ibss_joined);
4490
4491		if (bss_conf->ibss_joined) {
4492			u32 rates = bss_conf->basic_rates;
4493			wlvif->basic_rate_set =
4494				wl1271_tx_enabled_rates_get(wl, rates,
4495							    wlvif->band);
4496			wlvif->basic_rate =
4497				wl1271_tx_min_rate_get(wl,
4498						       wlvif->basic_rate_set);
4499
4500			/* by default, use 11b + OFDM rates */
4501			wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4502			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4503			if (ret < 0)
4504				goto out;
4505		}
4506	}
4507
4508	if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4509		/* enable beacon filtering */
4510		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4511		if (ret < 0)
4512			goto out;
4513	}
4514
4515	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4516	if (ret < 0)
4517		goto out;
4518
4519	if (do_join) {
4520		ret = wlcore_join(wl, wlvif);
4521		if (ret < 0) {
4522			wl1271_warning("cmd join failed %d", ret);
4523			goto out;
4524		}
4525	}
4526
4527	if (changed & BSS_CHANGED_ASSOC) {
4528		if (bss_conf->assoc) {
4529			ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4530					       sta_rate_set);
4531			if (ret < 0)
4532				goto out;
4533
4534			if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4535				wl12xx_set_authorized(wl, wlvif);
4536		} else {
4537			wlcore_unset_assoc(wl, wlvif);
4538		}
4539	}
4540
4541	if (changed & BSS_CHANGED_PS) {
4542		if ((bss_conf->ps) &&
4543		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4544		    !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4545			int ps_mode;
4546			char *ps_mode_str;
4547
4548			if (wl->conf.conn.forced_ps) {
4549				ps_mode = STATION_POWER_SAVE_MODE;
4550				ps_mode_str = "forced";
4551			} else {
4552				ps_mode = STATION_AUTO_PS_MODE;
4553				ps_mode_str = "auto";
4554			}
4555
4556			wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4557
4558			ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4559			if (ret < 0)
4560				wl1271_warning("enter %s ps failed %d",
4561					       ps_mode_str, ret);
4562		} else if (!bss_conf->ps &&
4563			   test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4564			wl1271_debug(DEBUG_PSM, "auto ps disabled");
4565
4566			ret = wl1271_ps_set_mode(wl, wlvif,
4567						 STATION_ACTIVE_MODE);
4568			if (ret < 0)
4569				wl1271_warning("exit auto ps failed %d", ret);
4570		}
4571	}
4572
4573	/* Handle new association with HT. Do this after join. */
4574	if (sta_exists) {
4575		bool enabled =
4576			bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4577
4578		ret = wlcore_hw_set_peer_cap(wl,
4579					     &sta_ht_cap,
4580					     enabled,
4581					     wlvif->rate_set,
4582					     wlvif->sta.hlid);
4583		if (ret < 0) {
4584			wl1271_warning("Set ht cap failed %d", ret);
4585			goto out;
4586
4587		}
4588
4589		if (enabled) {
4590			ret = wl1271_acx_set_ht_information(wl, wlvif,
4591						bss_conf->ht_operation_mode);
4592			if (ret < 0) {
4593				wl1271_warning("Set ht information failed %d",
4594					       ret);
4595				goto out;
4596			}
4597		}
4598	}
4599
4600	/* Handle arp filtering. Done after join. */
4601	if ((changed & BSS_CHANGED_ARP_FILTER) ||
4602	    (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4603		__be32 addr = bss_conf->arp_addr_list[0];
4604		wlvif->sta.qos = bss_conf->qos;
4605		WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4606
4607		if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4608			wlvif->ip_addr = addr;
4609			/*
4610			 * The template should have been configured only upon
4611			 * association. however, it seems that the correct ip
4612			 * isn't being set (when sending), so we have to
4613			 * reconfigure the template upon every ip change.
4614			 */
4615			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4616			if (ret < 0) {
4617				wl1271_warning("build arp rsp failed: %d", ret);
4618				goto out;
4619			}
4620
4621			ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4622				(ACX_ARP_FILTER_ARP_FILTERING |
4623				 ACX_ARP_FILTER_AUTO_ARP),
4624				addr);
4625		} else {
4626			wlvif->ip_addr = 0;
4627			ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4628		}
4629
4630		if (ret < 0)
4631			goto out;
4632	}
4633
4634out:
4635	return;
4636}
4637
4638static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4639				       struct ieee80211_vif *vif,
4640				       struct ieee80211_bss_conf *bss_conf,
4641				       u32 changed)
4642{
4643	struct wl1271 *wl = hw->priv;
4644	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4645	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4646	int ret;
4647
4648	wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4649		     wlvif->role_id, (int)changed);
4650
4651	/*
4652	 * make sure to cancel pending disconnections if our association
4653	 * state changed
4654	 */
4655	if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4656		cancel_delayed_work_sync(&wlvif->connection_loss_work);
4657
4658	if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4659	    !bss_conf->enable_beacon)
4660		wl1271_tx_flush(wl);
4661
4662	mutex_lock(&wl->mutex);
4663
4664	if (unlikely(wl->state != WLCORE_STATE_ON))
4665		goto out;
4666
4667	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4668		goto out;
4669
4670	ret = pm_runtime_get_sync(wl->dev);
4671	if (ret < 0) {
4672		pm_runtime_put_noidle(wl->dev);
4673		goto out;
4674	}
4675
4676	if ((changed & BSS_CHANGED_TXPOWER) &&
4677	    bss_conf->txpower != wlvif->power_level) {
4678
4679		ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4680		if (ret < 0)
4681			goto out;
4682
4683		wlvif->power_level = bss_conf->txpower;
4684	}
4685
4686	if (is_ap)
4687		wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4688	else
4689		wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4690
4691	pm_runtime_mark_last_busy(wl->dev);
4692	pm_runtime_put_autosuspend(wl->dev);
4693
4694out:
4695	mutex_unlock(&wl->mutex);
4696}
4697
4698static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4699				 struct ieee80211_chanctx_conf *ctx)
4700{
4701	wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4702		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4703		     cfg80211_get_chandef_type(&ctx->def));
4704	return 0;
4705}
4706
4707static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4708				     struct ieee80211_chanctx_conf *ctx)
4709{
4710	wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4711		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4712		     cfg80211_get_chandef_type(&ctx->def));
4713}
4714
4715static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4716				     struct ieee80211_chanctx_conf *ctx,
4717				     u32 changed)
4718{
4719	struct wl1271 *wl = hw->priv;
4720	struct wl12xx_vif *wlvif;
4721	int ret;
4722	int channel = ieee80211_frequency_to_channel(
4723		ctx->def.chan->center_freq);
4724
4725	wl1271_debug(DEBUG_MAC80211,
4726		     "mac80211 change chanctx %d (type %d) changed 0x%x",
4727		     channel, cfg80211_get_chandef_type(&ctx->def), changed);
4728
4729	mutex_lock(&wl->mutex);
4730
4731	ret = pm_runtime_get_sync(wl->dev);
4732	if (ret < 0) {
4733		pm_runtime_put_noidle(wl->dev);
4734		goto out;
4735	}
4736
4737	wl12xx_for_each_wlvif(wl, wlvif) {
4738		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4739
4740		rcu_read_lock();
4741		if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4742			rcu_read_unlock();
4743			continue;
4744		}
4745		rcu_read_unlock();
4746
4747		/* start radar if needed */
4748		if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4749		    wlvif->bss_type == BSS_TYPE_AP_BSS &&
4750		    ctx->radar_enabled && !wlvif->radar_enabled &&
4751		    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4752			wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4753			wlcore_hw_set_cac(wl, wlvif, true);
4754			wlvif->radar_enabled = true;
4755		}
4756	}
4757
4758	pm_runtime_mark_last_busy(wl->dev);
4759	pm_runtime_put_autosuspend(wl->dev);
4760out:
4761	mutex_unlock(&wl->mutex);
4762}
4763
4764static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4765					struct ieee80211_vif *vif,
 
4766					struct ieee80211_chanctx_conf *ctx)
4767{
4768	struct wl1271 *wl = hw->priv;
4769	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4770	int channel = ieee80211_frequency_to_channel(
4771		ctx->def.chan->center_freq);
4772	int ret = -EINVAL;
4773
4774	wl1271_debug(DEBUG_MAC80211,
4775		     "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4776		     wlvif->role_id, channel,
4777		     cfg80211_get_chandef_type(&ctx->def),
4778		     ctx->radar_enabled, ctx->def.chan->dfs_state);
4779
4780	mutex_lock(&wl->mutex);
4781
4782	if (unlikely(wl->state != WLCORE_STATE_ON))
4783		goto out;
4784
4785	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4786		goto out;
4787
4788	ret = pm_runtime_get_sync(wl->dev);
4789	if (ret < 0) {
4790		pm_runtime_put_noidle(wl->dev);
4791		goto out;
4792	}
4793
4794	wlvif->band = ctx->def.chan->band;
4795	wlvif->channel = channel;
4796	wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4797
4798	/* update default rates according to the band */
4799	wl1271_set_band_rate(wl, wlvif);
4800
4801	if (ctx->radar_enabled &&
4802	    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4803		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4804		wlcore_hw_set_cac(wl, wlvif, true);
4805		wlvif->radar_enabled = true;
4806	}
4807
4808	pm_runtime_mark_last_busy(wl->dev);
4809	pm_runtime_put_autosuspend(wl->dev);
4810out:
4811	mutex_unlock(&wl->mutex);
4812
4813	return 0;
4814}
4815
4816static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4817					   struct ieee80211_vif *vif,
 
4818					   struct ieee80211_chanctx_conf *ctx)
4819{
4820	struct wl1271 *wl = hw->priv;
4821	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4822	int ret;
4823
4824	wl1271_debug(DEBUG_MAC80211,
4825		     "mac80211 unassign chanctx (role %d) %d (type %d)",
4826		     wlvif->role_id,
4827		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4828		     cfg80211_get_chandef_type(&ctx->def));
4829
4830	wl1271_tx_flush(wl);
4831
4832	mutex_lock(&wl->mutex);
4833
4834	if (unlikely(wl->state != WLCORE_STATE_ON))
4835		goto out;
4836
4837	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4838		goto out;
4839
4840	ret = pm_runtime_get_sync(wl->dev);
4841	if (ret < 0) {
4842		pm_runtime_put_noidle(wl->dev);
4843		goto out;
4844	}
4845
4846	if (wlvif->radar_enabled) {
4847		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4848		wlcore_hw_set_cac(wl, wlvif, false);
4849		wlvif->radar_enabled = false;
4850	}
4851
4852	pm_runtime_mark_last_busy(wl->dev);
4853	pm_runtime_put_autosuspend(wl->dev);
4854out:
4855	mutex_unlock(&wl->mutex);
4856}
4857
4858static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4859				    struct wl12xx_vif *wlvif,
4860				    struct ieee80211_chanctx_conf *new_ctx)
4861{
4862	int channel = ieee80211_frequency_to_channel(
4863		new_ctx->def.chan->center_freq);
4864
4865	wl1271_debug(DEBUG_MAC80211,
4866		     "switch vif (role %d) %d -> %d chan_type: %d",
4867		     wlvif->role_id, wlvif->channel, channel,
4868		     cfg80211_get_chandef_type(&new_ctx->def));
4869
4870	if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4871		return 0;
4872
4873	WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4874
4875	if (wlvif->radar_enabled) {
4876		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4877		wlcore_hw_set_cac(wl, wlvif, false);
4878		wlvif->radar_enabled = false;
4879	}
4880
4881	wlvif->band = new_ctx->def.chan->band;
4882	wlvif->channel = channel;
4883	wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4884
4885	/* start radar if needed */
4886	if (new_ctx->radar_enabled) {
4887		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4888		wlcore_hw_set_cac(wl, wlvif, true);
4889		wlvif->radar_enabled = true;
4890	}
4891
4892	return 0;
4893}
4894
4895static int
4896wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4897			     struct ieee80211_vif_chanctx_switch *vifs,
4898			     int n_vifs,
4899			     enum ieee80211_chanctx_switch_mode mode)
4900{
4901	struct wl1271 *wl = hw->priv;
4902	int i, ret;
4903
4904	wl1271_debug(DEBUG_MAC80211,
4905		     "mac80211 switch chanctx n_vifs %d mode %d",
4906		     n_vifs, mode);
4907
4908	mutex_lock(&wl->mutex);
4909
4910	ret = pm_runtime_get_sync(wl->dev);
4911	if (ret < 0) {
4912		pm_runtime_put_noidle(wl->dev);
4913		goto out;
4914	}
4915
4916	for (i = 0; i < n_vifs; i++) {
4917		struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4918
4919		ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4920		if (ret)
4921			goto out_sleep;
4922	}
4923out_sleep:
4924	pm_runtime_mark_last_busy(wl->dev);
4925	pm_runtime_put_autosuspend(wl->dev);
4926out:
4927	mutex_unlock(&wl->mutex);
4928
4929	return 0;
4930}
4931
4932static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4933			     struct ieee80211_vif *vif, u16 queue,
 
4934			     const struct ieee80211_tx_queue_params *params)
4935{
4936	struct wl1271 *wl = hw->priv;
4937	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4938	u8 ps_scheme;
4939	int ret = 0;
4940
4941	if (wlcore_is_p2p_mgmt(wlvif))
4942		return 0;
4943
4944	mutex_lock(&wl->mutex);
4945
4946	wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4947
4948	if (params->uapsd)
4949		ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4950	else
4951		ps_scheme = CONF_PS_SCHEME_LEGACY;
4952
4953	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4954		goto out;
4955
4956	ret = pm_runtime_get_sync(wl->dev);
4957	if (ret < 0) {
4958		pm_runtime_put_noidle(wl->dev);
4959		goto out;
4960	}
4961
4962	/*
4963	 * the txop is confed in units of 32us by the mac80211,
4964	 * we need us
4965	 */
4966	ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4967				params->cw_min, params->cw_max,
4968				params->aifs, params->txop << 5);
4969	if (ret < 0)
4970		goto out_sleep;
4971
4972	ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4973				 CONF_CHANNEL_TYPE_EDCF,
4974				 wl1271_tx_get_queue(queue),
4975				 ps_scheme, CONF_ACK_POLICY_LEGACY,
4976				 0, 0);
4977
4978out_sleep:
4979	pm_runtime_mark_last_busy(wl->dev);
4980	pm_runtime_put_autosuspend(wl->dev);
4981
4982out:
4983	mutex_unlock(&wl->mutex);
4984
4985	return ret;
4986}
4987
4988static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4989			     struct ieee80211_vif *vif)
4990{
4991
4992	struct wl1271 *wl = hw->priv;
4993	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4994	u64 mactime = ULLONG_MAX;
4995	int ret;
4996
4997	wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4998
4999	mutex_lock(&wl->mutex);
5000
5001	if (unlikely(wl->state != WLCORE_STATE_ON))
5002		goto out;
5003
5004	ret = pm_runtime_get_sync(wl->dev);
5005	if (ret < 0) {
5006		pm_runtime_put_noidle(wl->dev);
5007		goto out;
5008	}
5009
5010	ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
5011	if (ret < 0)
5012		goto out_sleep;
5013
5014out_sleep:
5015	pm_runtime_mark_last_busy(wl->dev);
5016	pm_runtime_put_autosuspend(wl->dev);
5017
5018out:
5019	mutex_unlock(&wl->mutex);
5020	return mactime;
5021}
5022
5023static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
5024				struct survey_info *survey)
5025{
5026	struct ieee80211_conf *conf = &hw->conf;
5027
5028	if (idx != 0)
5029		return -ENOENT;
5030
5031	survey->channel = conf->chandef.chan;
5032	survey->filled = 0;
5033	return 0;
5034}
5035
5036static int wl1271_allocate_sta(struct wl1271 *wl,
5037			     struct wl12xx_vif *wlvif,
5038			     struct ieee80211_sta *sta)
5039{
5040	struct wl1271_station *wl_sta;
5041	int ret;
5042
5043
5044	if (wl->active_sta_count >= wl->max_ap_stations) {
5045		wl1271_warning("could not allocate HLID - too much stations");
5046		return -EBUSY;
5047	}
5048
5049	wl_sta = (struct wl1271_station *)sta->drv_priv;
5050	ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
5051	if (ret < 0) {
5052		wl1271_warning("could not allocate HLID - too many links");
5053		return -EBUSY;
5054	}
5055
5056	/* use the previous security seq, if this is a recovery/resume */
5057	wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
5058
5059	set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
5060	memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
5061	wl->active_sta_count++;
5062	return 0;
5063}
5064
5065void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
5066{
5067	if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
5068		return;
5069
5070	clear_bit(hlid, wlvif->ap.sta_hlid_map);
5071	__clear_bit(hlid, &wl->ap_ps_map);
5072	__clear_bit(hlid, &wl->ap_fw_ps_map);
5073
5074	/*
5075	 * save the last used PN in the private part of iee80211_sta,
5076	 * in case of recovery/suspend
5077	 */
5078	wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
5079
5080	wl12xx_free_link(wl, wlvif, &hlid);
5081	wl->active_sta_count--;
5082
5083	/*
5084	 * rearm the tx watchdog when the last STA is freed - give the FW a
5085	 * chance to return STA-buffered packets before complaining.
5086	 */
5087	if (wl->active_sta_count == 0)
5088		wl12xx_rearm_tx_watchdog_locked(wl);
5089}
5090
5091static int wl12xx_sta_add(struct wl1271 *wl,
5092			  struct wl12xx_vif *wlvif,
5093			  struct ieee80211_sta *sta)
5094{
5095	struct wl1271_station *wl_sta;
5096	int ret = 0;
5097	u8 hlid;
5098
5099	wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
5100
5101	ret = wl1271_allocate_sta(wl, wlvif, sta);
5102	if (ret < 0)
5103		return ret;
5104
5105	wl_sta = (struct wl1271_station *)sta->drv_priv;
5106	hlid = wl_sta->hlid;
5107
5108	ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5109	if (ret < 0)
5110		wl1271_free_sta(wl, wlvif, hlid);
5111
5112	return ret;
5113}
5114
5115static int wl12xx_sta_remove(struct wl1271 *wl,
5116			     struct wl12xx_vif *wlvif,
5117			     struct ieee80211_sta *sta)
5118{
5119	struct wl1271_station *wl_sta;
5120	int ret = 0, id;
5121
5122	wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5123
5124	wl_sta = (struct wl1271_station *)sta->drv_priv;
5125	id = wl_sta->hlid;
5126	if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5127		return -EINVAL;
5128
5129	ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5130	if (ret < 0)
5131		return ret;
5132
5133	wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5134	return ret;
5135}
5136
5137static void wlcore_roc_if_possible(struct wl1271 *wl,
5138				   struct wl12xx_vif *wlvif)
5139{
5140	if (find_first_bit(wl->roc_map,
5141			   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5142		return;
5143
5144	if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5145		return;
5146
5147	wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5148}
5149
5150/*
5151 * when wl_sta is NULL, we treat this call as if coming from a
5152 * pending auth reply.
5153 * wl->mutex must be taken and the FW must be awake when the call
5154 * takes place.
5155 */
5156void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5157			      struct wl1271_station *wl_sta, bool in_conn)
5158{
5159	if (in_conn) {
5160		if (WARN_ON(wl_sta && wl_sta->in_connection))
5161			return;
5162
5163		if (!wlvif->ap_pending_auth_reply &&
5164		    !wlvif->inconn_count)
5165			wlcore_roc_if_possible(wl, wlvif);
5166
5167		if (wl_sta) {
5168			wl_sta->in_connection = true;
5169			wlvif->inconn_count++;
5170		} else {
5171			wlvif->ap_pending_auth_reply = true;
5172		}
5173	} else {
5174		if (wl_sta && !wl_sta->in_connection)
5175			return;
5176
5177		if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5178			return;
5179
5180		if (WARN_ON(wl_sta && !wlvif->inconn_count))
5181			return;
5182
5183		if (wl_sta) {
5184			wl_sta->in_connection = false;
5185			wlvif->inconn_count--;
5186		} else {
5187			wlvif->ap_pending_auth_reply = false;
5188		}
5189
5190		if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5191		    test_bit(wlvif->role_id, wl->roc_map))
5192			wl12xx_croc(wl, wlvif->role_id);
5193	}
5194}
5195
5196static int wl12xx_update_sta_state(struct wl1271 *wl,
5197				   struct wl12xx_vif *wlvif,
5198				   struct ieee80211_sta *sta,
5199				   enum ieee80211_sta_state old_state,
5200				   enum ieee80211_sta_state new_state)
5201{
5202	struct wl1271_station *wl_sta;
5203	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5204	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5205	int ret;
5206
5207	wl_sta = (struct wl1271_station *)sta->drv_priv;
5208
5209	/* Add station (AP mode) */
5210	if (is_ap &&
5211	    old_state == IEEE80211_STA_NOTEXIST &&
5212	    new_state == IEEE80211_STA_NONE) {
5213		ret = wl12xx_sta_add(wl, wlvif, sta);
5214		if (ret)
5215			return ret;
5216
5217		wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5218	}
5219
5220	/* Remove station (AP mode) */
5221	if (is_ap &&
5222	    old_state == IEEE80211_STA_NONE &&
5223	    new_state == IEEE80211_STA_NOTEXIST) {
5224		/* must not fail */
5225		wl12xx_sta_remove(wl, wlvif, sta);
5226
5227		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5228	}
5229
5230	/* Authorize station (AP mode) */
5231	if (is_ap &&
5232	    new_state == IEEE80211_STA_AUTHORIZED) {
5233		ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5234		if (ret < 0)
5235			return ret;
5236
5237		/* reconfigure rates */
5238		ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
5239		if (ret < 0)
5240			return ret;
5241
5242		ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
 
5243						     wl_sta->hlid);
5244		if (ret)
5245			return ret;
5246
5247		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5248	}
5249
5250	/* Authorize station */
5251	if (is_sta &&
5252	    new_state == IEEE80211_STA_AUTHORIZED) {
5253		set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5254		ret = wl12xx_set_authorized(wl, wlvif);
5255		if (ret)
5256			return ret;
5257	}
5258
5259	if (is_sta &&
5260	    old_state == IEEE80211_STA_AUTHORIZED &&
5261	    new_state == IEEE80211_STA_ASSOC) {
5262		clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5263		clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5264	}
5265
5266	/* save seq number on disassoc (suspend) */
5267	if (is_sta &&
5268	    old_state == IEEE80211_STA_ASSOC &&
5269	    new_state == IEEE80211_STA_AUTH) {
5270		wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5271		wlvif->total_freed_pkts = 0;
5272	}
5273
5274	/* restore seq number on assoc (resume) */
5275	if (is_sta &&
5276	    old_state == IEEE80211_STA_AUTH &&
5277	    new_state == IEEE80211_STA_ASSOC) {
5278		wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5279	}
5280
5281	/* clear ROCs on failure or authorization */
5282	if (is_sta &&
5283	    (new_state == IEEE80211_STA_AUTHORIZED ||
5284	     new_state == IEEE80211_STA_NOTEXIST)) {
5285		if (test_bit(wlvif->role_id, wl->roc_map))
5286			wl12xx_croc(wl, wlvif->role_id);
5287	}
5288
5289	if (is_sta &&
5290	    old_state == IEEE80211_STA_NOTEXIST &&
5291	    new_state == IEEE80211_STA_NONE) {
5292		if (find_first_bit(wl->roc_map,
5293				   WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5294			WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5295			wl12xx_roc(wl, wlvif, wlvif->role_id,
5296				   wlvif->band, wlvif->channel);
5297		}
5298	}
5299	return 0;
5300}
5301
5302static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5303			       struct ieee80211_vif *vif,
5304			       struct ieee80211_sta *sta,
5305			       enum ieee80211_sta_state old_state,
5306			       enum ieee80211_sta_state new_state)
5307{
5308	struct wl1271 *wl = hw->priv;
5309	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5310	int ret;
5311
5312	wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5313		     sta->aid, old_state, new_state);
5314
5315	mutex_lock(&wl->mutex);
5316
5317	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5318		ret = -EBUSY;
5319		goto out;
5320	}
5321
5322	ret = pm_runtime_get_sync(wl->dev);
5323	if (ret < 0) {
5324		pm_runtime_put_noidle(wl->dev);
5325		goto out;
5326	}
5327
5328	ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5329
5330	pm_runtime_mark_last_busy(wl->dev);
5331	pm_runtime_put_autosuspend(wl->dev);
5332out:
5333	mutex_unlock(&wl->mutex);
5334	if (new_state < old_state)
5335		return 0;
5336	return ret;
5337}
5338
5339static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5340				  struct ieee80211_vif *vif,
5341				  struct ieee80211_ampdu_params *params)
5342{
5343	struct wl1271 *wl = hw->priv;
5344	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5345	int ret;
5346	u8 hlid, *ba_bitmap;
5347	struct ieee80211_sta *sta = params->sta;
5348	enum ieee80211_ampdu_mlme_action action = params->action;
5349	u16 tid = params->tid;
5350	u16 *ssn = &params->ssn;
5351
5352	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5353		     tid);
5354
5355	/* sanity check - the fields in FW are only 8bits wide */
5356	if (WARN_ON(tid > 0xFF))
5357		return -ENOTSUPP;
5358
5359	mutex_lock(&wl->mutex);
5360
5361	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5362		ret = -EAGAIN;
5363		goto out;
5364	}
5365
5366	if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5367		hlid = wlvif->sta.hlid;
5368	} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5369		struct wl1271_station *wl_sta;
5370
5371		wl_sta = (struct wl1271_station *)sta->drv_priv;
5372		hlid = wl_sta->hlid;
5373	} else {
5374		ret = -EINVAL;
5375		goto out;
5376	}
5377
5378	ba_bitmap = &wl->links[hlid].ba_bitmap;
5379
5380	ret = pm_runtime_get_sync(wl->dev);
5381	if (ret < 0) {
5382		pm_runtime_put_noidle(wl->dev);
5383		goto out;
5384	}
5385
5386	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5387		     tid, action);
5388
5389	switch (action) {
5390	case IEEE80211_AMPDU_RX_START:
5391		if (!wlvif->ba_support || !wlvif->ba_allowed) {
5392			ret = -ENOTSUPP;
5393			break;
5394		}
5395
5396		if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5397			ret = -EBUSY;
5398			wl1271_error("exceeded max RX BA sessions");
5399			break;
5400		}
5401
5402		if (*ba_bitmap & BIT(tid)) {
5403			ret = -EINVAL;
5404			wl1271_error("cannot enable RX BA session on active "
5405				     "tid: %d", tid);
5406			break;
5407		}
5408
5409		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5410				hlid,
5411				params->buf_size);
5412
5413		if (!ret) {
5414			*ba_bitmap |= BIT(tid);
5415			wl->ba_rx_session_count++;
5416		}
5417		break;
5418
5419	case IEEE80211_AMPDU_RX_STOP:
5420		if (!(*ba_bitmap & BIT(tid))) {
5421			/*
5422			 * this happens on reconfig - so only output a debug
5423			 * message for now, and don't fail the function.
5424			 */
5425			wl1271_debug(DEBUG_MAC80211,
5426				     "no active RX BA session on tid: %d",
5427				     tid);
5428			ret = 0;
5429			break;
5430		}
5431
5432		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5433							 hlid, 0);
5434		if (!ret) {
5435			*ba_bitmap &= ~BIT(tid);
5436			wl->ba_rx_session_count--;
5437		}
5438		break;
5439
5440	/*
5441	 * The BA initiator session management in FW independently.
5442	 * Falling break here on purpose for all TX APDU commands.
5443	 */
5444	case IEEE80211_AMPDU_TX_START:
5445	case IEEE80211_AMPDU_TX_STOP_CONT:
5446	case IEEE80211_AMPDU_TX_STOP_FLUSH:
5447	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5448	case IEEE80211_AMPDU_TX_OPERATIONAL:
5449		ret = -EINVAL;
5450		break;
5451
5452	default:
5453		wl1271_error("Incorrect ampdu action id=%x\n", action);
5454		ret = -EINVAL;
5455	}
5456
5457	pm_runtime_mark_last_busy(wl->dev);
5458	pm_runtime_put_autosuspend(wl->dev);
5459
5460out:
5461	mutex_unlock(&wl->mutex);
5462
5463	return ret;
5464}
5465
5466static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5467				   struct ieee80211_vif *vif,
5468				   const struct cfg80211_bitrate_mask *mask)
5469{
5470	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5471	struct wl1271 *wl = hw->priv;
5472	int i, ret = 0;
5473
5474	wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5475		mask->control[NL80211_BAND_2GHZ].legacy,
5476		mask->control[NL80211_BAND_5GHZ].legacy);
5477
5478	mutex_lock(&wl->mutex);
5479
5480	for (i = 0; i < WLCORE_NUM_BANDS; i++)
5481		wlvif->bitrate_masks[i] =
5482			wl1271_tx_enabled_rates_get(wl,
5483						    mask->control[i].legacy,
5484						    i);
5485
5486	if (unlikely(wl->state != WLCORE_STATE_ON))
5487		goto out;
5488
5489	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5490	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5491
5492		ret = pm_runtime_get_sync(wl->dev);
5493		if (ret < 0) {
5494			pm_runtime_put_noidle(wl->dev);
5495			goto out;
5496		}
5497
5498		wl1271_set_band_rate(wl, wlvif);
5499		wlvif->basic_rate =
5500			wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5501		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5502
5503		pm_runtime_mark_last_busy(wl->dev);
5504		pm_runtime_put_autosuspend(wl->dev);
5505	}
5506out:
5507	mutex_unlock(&wl->mutex);
5508
5509	return ret;
5510}
5511
5512static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5513				     struct ieee80211_vif *vif,
5514				     struct ieee80211_channel_switch *ch_switch)
5515{
5516	struct wl1271 *wl = hw->priv;
5517	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5518	int ret;
5519
5520	wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5521
5522	wl1271_tx_flush(wl);
5523
5524	mutex_lock(&wl->mutex);
5525
5526	if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5527		if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5528			ieee80211_chswitch_done(vif, false);
5529		goto out;
5530	} else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5531		goto out;
5532	}
5533
5534	ret = pm_runtime_get_sync(wl->dev);
5535	if (ret < 0) {
5536		pm_runtime_put_noidle(wl->dev);
5537		goto out;
5538	}
5539
5540	/* TODO: change mac80211 to pass vif as param */
5541
5542	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5543		unsigned long delay_usec;
5544
5545		ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5546		if (ret)
5547			goto out_sleep;
5548
5549		set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5550
5551		/* indicate failure 5 seconds after channel switch time */
5552		delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5553			ch_switch->count;
5554		ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5555					     usecs_to_jiffies(delay_usec) +
5556					     msecs_to_jiffies(5000));
5557	}
5558
5559out_sleep:
5560	pm_runtime_mark_last_busy(wl->dev);
5561	pm_runtime_put_autosuspend(wl->dev);
5562
5563out:
5564	mutex_unlock(&wl->mutex);
5565}
5566
5567static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5568					struct wl12xx_vif *wlvif,
5569					u8 eid)
5570{
5571	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5572	struct sk_buff *beacon =
5573		ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5574
5575	if (!beacon)
5576		return NULL;
5577
5578	return cfg80211_find_ie(eid,
5579				beacon->data + ieoffset,
5580				beacon->len - ieoffset);
5581}
5582
5583static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5584				u8 *csa_count)
5585{
5586	const u8 *ie;
5587	const struct ieee80211_channel_sw_ie *ie_csa;
5588
5589	ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5590	if (!ie)
5591		return -EINVAL;
5592
5593	ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5594	*csa_count = ie_csa->count;
5595
5596	return 0;
5597}
5598
5599static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5600					    struct ieee80211_vif *vif,
5601					    struct cfg80211_chan_def *chandef)
5602{
5603	struct wl1271 *wl = hw->priv;
5604	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5605	struct ieee80211_channel_switch ch_switch = {
5606		.block_tx = true,
5607		.chandef = *chandef,
5608	};
5609	int ret;
5610
5611	wl1271_debug(DEBUG_MAC80211,
5612		     "mac80211 channel switch beacon (role %d)",
5613		     wlvif->role_id);
5614
5615	ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5616	if (ret < 0) {
5617		wl1271_error("error getting beacon (for CSA counter)");
5618		return;
5619	}
5620
5621	mutex_lock(&wl->mutex);
5622
5623	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5624		ret = -EBUSY;
5625		goto out;
5626	}
5627
5628	ret = pm_runtime_get_sync(wl->dev);
5629	if (ret < 0) {
5630		pm_runtime_put_noidle(wl->dev);
5631		goto out;
5632	}
5633
5634	ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5635	if (ret)
5636		goto out_sleep;
5637
5638	set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5639
5640out_sleep:
5641	pm_runtime_mark_last_busy(wl->dev);
5642	pm_runtime_put_autosuspend(wl->dev);
5643out:
5644	mutex_unlock(&wl->mutex);
5645}
5646
5647static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5648			    u32 queues, bool drop)
5649{
5650	struct wl1271 *wl = hw->priv;
5651
5652	wl1271_tx_flush(wl);
5653}
5654
5655static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5656				       struct ieee80211_vif *vif,
5657				       struct ieee80211_channel *chan,
5658				       int duration,
5659				       enum ieee80211_roc_type type)
5660{
5661	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5662	struct wl1271 *wl = hw->priv;
5663	int channel, active_roc, ret = 0;
5664
5665	channel = ieee80211_frequency_to_channel(chan->center_freq);
5666
5667	wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5668		     channel, wlvif->role_id);
5669
5670	mutex_lock(&wl->mutex);
5671
5672	if (unlikely(wl->state != WLCORE_STATE_ON))
5673		goto out;
5674
5675	/* return EBUSY if we can't ROC right now */
5676	active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5677	if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5678		wl1271_warning("active roc on role %d", active_roc);
5679		ret = -EBUSY;
5680		goto out;
5681	}
5682
5683	ret = pm_runtime_get_sync(wl->dev);
5684	if (ret < 0) {
5685		pm_runtime_put_noidle(wl->dev);
5686		goto out;
5687	}
5688
5689	ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5690	if (ret < 0)
5691		goto out_sleep;
5692
5693	wl->roc_vif = vif;
5694	ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5695				     msecs_to_jiffies(duration));
5696out_sleep:
5697	pm_runtime_mark_last_busy(wl->dev);
5698	pm_runtime_put_autosuspend(wl->dev);
5699out:
5700	mutex_unlock(&wl->mutex);
5701	return ret;
5702}
5703
5704static int __wlcore_roc_completed(struct wl1271 *wl)
5705{
5706	struct wl12xx_vif *wlvif;
5707	int ret;
5708
5709	/* already completed */
5710	if (unlikely(!wl->roc_vif))
5711		return 0;
5712
5713	wlvif = wl12xx_vif_to_data(wl->roc_vif);
5714
5715	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5716		return -EBUSY;
5717
5718	ret = wl12xx_stop_dev(wl, wlvif);
5719	if (ret < 0)
5720		return ret;
5721
5722	wl->roc_vif = NULL;
5723
5724	return 0;
5725}
5726
5727static int wlcore_roc_completed(struct wl1271 *wl)
5728{
5729	int ret;
5730
5731	wl1271_debug(DEBUG_MAC80211, "roc complete");
5732
5733	mutex_lock(&wl->mutex);
5734
5735	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5736		ret = -EBUSY;
5737		goto out;
5738	}
5739
5740	ret = pm_runtime_get_sync(wl->dev);
5741	if (ret < 0) {
5742		pm_runtime_put_noidle(wl->dev);
5743		goto out;
5744	}
5745
5746	ret = __wlcore_roc_completed(wl);
5747
5748	pm_runtime_mark_last_busy(wl->dev);
5749	pm_runtime_put_autosuspend(wl->dev);
5750out:
5751	mutex_unlock(&wl->mutex);
5752
5753	return ret;
5754}
5755
5756static void wlcore_roc_complete_work(struct work_struct *work)
5757{
5758	struct delayed_work *dwork;
5759	struct wl1271 *wl;
5760	int ret;
5761
5762	dwork = to_delayed_work(work);
5763	wl = container_of(dwork, struct wl1271, roc_complete_work);
5764
5765	ret = wlcore_roc_completed(wl);
5766	if (!ret)
5767		ieee80211_remain_on_channel_expired(wl->hw);
5768}
5769
5770static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
5771					      struct ieee80211_vif *vif)
5772{
5773	struct wl1271 *wl = hw->priv;
5774
5775	wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5776
5777	/* TODO: per-vif */
5778	wl1271_tx_flush(wl);
5779
5780	/*
5781	 * we can't just flush_work here, because it might deadlock
5782	 * (as we might get called from the same workqueue)
5783	 */
5784	cancel_delayed_work_sync(&wl->roc_complete_work);
5785	wlcore_roc_completed(wl);
5786
5787	return 0;
5788}
5789
5790static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5791				    struct ieee80211_vif *vif,
5792				    struct ieee80211_sta *sta,
5793				    u32 changed)
5794{
5795	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5796
5797	wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5798
5799	if (!(changed & IEEE80211_RC_BW_CHANGED))
5800		return;
5801
5802	/* this callback is atomic, so schedule a new work */
5803	wlvif->rc_update_bw = sta->bandwidth;
5804	memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap));
 
5805	ieee80211_queue_work(hw, &wlvif->rc_update_work);
5806}
5807
5808static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5809				     struct ieee80211_vif *vif,
5810				     struct ieee80211_sta *sta,
5811				     struct station_info *sinfo)
5812{
5813	struct wl1271 *wl = hw->priv;
5814	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5815	s8 rssi_dbm;
5816	int ret;
5817
5818	wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5819
5820	mutex_lock(&wl->mutex);
5821
5822	if (unlikely(wl->state != WLCORE_STATE_ON))
5823		goto out;
5824
5825	ret = pm_runtime_get_sync(wl->dev);
5826	if (ret < 0) {
5827		pm_runtime_put_noidle(wl->dev);
5828		goto out_sleep;
5829	}
5830
5831	ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5832	if (ret < 0)
5833		goto out_sleep;
5834
5835	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
5836	sinfo->signal = rssi_dbm;
5837
5838out_sleep:
5839	pm_runtime_mark_last_busy(wl->dev);
5840	pm_runtime_put_autosuspend(wl->dev);
5841
5842out:
5843	mutex_unlock(&wl->mutex);
5844}
5845
5846static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5847					     struct ieee80211_sta *sta)
5848{
5849	struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5850	struct wl1271 *wl = hw->priv;
5851	u8 hlid = wl_sta->hlid;
5852
5853	/* return in units of Kbps */
5854	return (wl->links[hlid].fw_rate_mbps * 1000);
5855}
5856
5857static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5858{
5859	struct wl1271 *wl = hw->priv;
5860	bool ret = false;
5861
5862	mutex_lock(&wl->mutex);
5863
5864	if (unlikely(wl->state != WLCORE_STATE_ON))
5865		goto out;
5866
5867	/* packets are considered pending if in the TX queue or the FW */
5868	ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5869out:
5870	mutex_unlock(&wl->mutex);
5871
5872	return ret;
5873}
5874
5875/* can't be const, mac80211 writes to this */
5876static struct ieee80211_rate wl1271_rates[] = {
5877	{ .bitrate = 10,
5878	  .hw_value = CONF_HW_BIT_RATE_1MBPS,
5879	  .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5880	{ .bitrate = 20,
5881	  .hw_value = CONF_HW_BIT_RATE_2MBPS,
5882	  .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5883	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5884	{ .bitrate = 55,
5885	  .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5886	  .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5887	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5888	{ .bitrate = 110,
5889	  .hw_value = CONF_HW_BIT_RATE_11MBPS,
5890	  .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5891	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5892	{ .bitrate = 60,
5893	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5894	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5895	{ .bitrate = 90,
5896	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5897	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5898	{ .bitrate = 120,
5899	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5900	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5901	{ .bitrate = 180,
5902	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5903	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5904	{ .bitrate = 240,
5905	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5906	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5907	{ .bitrate = 360,
5908	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5909	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5910	{ .bitrate = 480,
5911	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5912	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5913	{ .bitrate = 540,
5914	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5915	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5916};
5917
5918/* can't be const, mac80211 writes to this */
5919static struct ieee80211_channel wl1271_channels[] = {
5920	{ .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5921	{ .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5922	{ .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5923	{ .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5924	{ .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5925	{ .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5926	{ .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5927	{ .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5928	{ .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5929	{ .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5930	{ .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5931	{ .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5932	{ .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5933	{ .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5934};
5935
5936/* can't be const, mac80211 writes to this */
5937static struct ieee80211_supported_band wl1271_band_2ghz = {
5938	.channels = wl1271_channels,
5939	.n_channels = ARRAY_SIZE(wl1271_channels),
5940	.bitrates = wl1271_rates,
5941	.n_bitrates = ARRAY_SIZE(wl1271_rates),
5942};
5943
5944/* 5 GHz data rates for WL1273 */
5945static struct ieee80211_rate wl1271_rates_5ghz[] = {
5946	{ .bitrate = 60,
5947	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5948	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5949	{ .bitrate = 90,
5950	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5951	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5952	{ .bitrate = 120,
5953	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5954	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5955	{ .bitrate = 180,
5956	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5957	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5958	{ .bitrate = 240,
5959	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5960	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5961	{ .bitrate = 360,
5962	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5963	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5964	{ .bitrate = 480,
5965	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5966	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5967	{ .bitrate = 540,
5968	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5969	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5970};
5971
5972/* 5 GHz band channels for WL1273 */
5973static struct ieee80211_channel wl1271_channels_5ghz[] = {
5974	{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5975	{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5976	{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5977	{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5978	{ .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5979	{ .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5980	{ .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5981	{ .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5982	{ .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5983	{ .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5984	{ .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5985	{ .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5986	{ .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5987	{ .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5988	{ .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5989	{ .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5990	{ .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5991	{ .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5992	{ .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5993	{ .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5994	{ .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5995	{ .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5996	{ .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5997	{ .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5998	{ .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5999	{ .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
6000	{ .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
6001	{ .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
6002	{ .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
6003	{ .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
6004	{ .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
6005};
6006
6007static struct ieee80211_supported_band wl1271_band_5ghz = {
6008	.channels = wl1271_channels_5ghz,
6009	.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
6010	.bitrates = wl1271_rates_5ghz,
6011	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
6012};
6013
6014static const struct ieee80211_ops wl1271_ops = {
6015	.start = wl1271_op_start,
6016	.stop = wlcore_op_stop,
6017	.add_interface = wl1271_op_add_interface,
6018	.remove_interface = wl1271_op_remove_interface,
6019	.change_interface = wl12xx_op_change_interface,
6020#ifdef CONFIG_PM
6021	.suspend = wl1271_op_suspend,
6022	.resume = wl1271_op_resume,
6023#endif
6024	.config = wl1271_op_config,
6025	.prepare_multicast = wl1271_op_prepare_multicast,
6026	.configure_filter = wl1271_op_configure_filter,
6027	.tx = wl1271_op_tx,
 
6028	.set_key = wlcore_op_set_key,
6029	.hw_scan = wl1271_op_hw_scan,
6030	.cancel_hw_scan = wl1271_op_cancel_hw_scan,
6031	.sched_scan_start = wl1271_op_sched_scan_start,
6032	.sched_scan_stop = wl1271_op_sched_scan_stop,
6033	.bss_info_changed = wl1271_op_bss_info_changed,
6034	.set_frag_threshold = wl1271_op_set_frag_threshold,
6035	.set_rts_threshold = wl1271_op_set_rts_threshold,
6036	.conf_tx = wl1271_op_conf_tx,
6037	.get_tsf = wl1271_op_get_tsf,
6038	.get_survey = wl1271_op_get_survey,
6039	.sta_state = wl12xx_op_sta_state,
6040	.ampdu_action = wl1271_op_ampdu_action,
6041	.tx_frames_pending = wl1271_tx_frames_pending,
6042	.set_bitrate_mask = wl12xx_set_bitrate_mask,
6043	.set_default_unicast_key = wl1271_op_set_default_key_idx,
6044	.channel_switch = wl12xx_op_channel_switch,
6045	.channel_switch_beacon = wlcore_op_channel_switch_beacon,
6046	.flush = wlcore_op_flush,
6047	.remain_on_channel = wlcore_op_remain_on_channel,
6048	.cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
6049	.add_chanctx = wlcore_op_add_chanctx,
6050	.remove_chanctx = wlcore_op_remove_chanctx,
6051	.change_chanctx = wlcore_op_change_chanctx,
6052	.assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
6053	.unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
6054	.switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
6055	.sta_rc_update = wlcore_op_sta_rc_update,
6056	.sta_statistics = wlcore_op_sta_statistics,
6057	.get_expected_throughput = wlcore_op_get_expected_throughput,
6058	CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
6059};
6060
6061
6062u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
6063{
6064	u8 idx;
6065
6066	BUG_ON(band >= 2);
6067
6068	if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
6069		wl1271_error("Illegal RX rate from HW: %d", rate);
6070		return 0;
6071	}
6072
6073	idx = wl->band_rate_to_idx[band][rate];
6074	if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
6075		wl1271_error("Unsupported RX rate from HW: %d", rate);
6076		return 0;
6077	}
6078
6079	return idx;
6080}
6081
6082static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
6083{
6084	int i;
6085
6086	wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
6087		     oui, nic);
6088
6089	if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
6090		wl1271_warning("NIC part of the MAC address wraps around!");
6091
6092	for (i = 0; i < wl->num_mac_addr; i++) {
6093		wl->addresses[i].addr[0] = (u8)(oui >> 16);
6094		wl->addresses[i].addr[1] = (u8)(oui >> 8);
6095		wl->addresses[i].addr[2] = (u8) oui;
6096		wl->addresses[i].addr[3] = (u8)(nic >> 16);
6097		wl->addresses[i].addr[4] = (u8)(nic >> 8);
6098		wl->addresses[i].addr[5] = (u8) nic;
6099		nic++;
6100	}
6101
6102	/* we may be one address short at the most */
6103	WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
6104
6105	/*
6106	 * turn on the LAA bit in the first address and use it as
6107	 * the last address.
6108	 */
6109	if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
6110		int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
6111		memcpy(&wl->addresses[idx], &wl->addresses[0],
6112		       sizeof(wl->addresses[0]));
6113		/* LAA bit */
6114		wl->addresses[idx].addr[0] |= BIT(1);
6115	}
6116
6117	wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
6118	wl->hw->wiphy->addresses = wl->addresses;
6119}
6120
6121static int wl12xx_get_hw_info(struct wl1271 *wl)
6122{
6123	int ret;
6124
6125	ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
6126	if (ret < 0)
6127		goto out;
6128
6129	wl->fuse_oui_addr = 0;
6130	wl->fuse_nic_addr = 0;
6131
6132	ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6133	if (ret < 0)
6134		goto out;
6135
6136	if (wl->ops->get_mac)
6137		ret = wl->ops->get_mac(wl);
6138
6139out:
6140	return ret;
6141}
6142
6143static int wl1271_register_hw(struct wl1271 *wl)
6144{
6145	int ret;
6146	u32 oui_addr = 0, nic_addr = 0;
6147	struct platform_device *pdev = wl->pdev;
6148	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6149
6150	if (wl->mac80211_registered)
6151		return 0;
6152
6153	if (wl->nvs_len >= 12) {
6154		/* NOTE: The wl->nvs->nvs element must be first, in
6155		 * order to simplify the casting, we assume it is at
6156		 * the beginning of the wl->nvs structure.
6157		 */
6158		u8 *nvs_ptr = (u8 *)wl->nvs;
6159
6160		oui_addr =
6161			(nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6162		nic_addr =
6163			(nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6164	}
6165
6166	/* if the MAC address is zeroed in the NVS derive from fuse */
6167	if (oui_addr == 0 && nic_addr == 0) {
6168		oui_addr = wl->fuse_oui_addr;
6169		/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6170		nic_addr = wl->fuse_nic_addr + 1;
6171	}
6172
6173	if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
6174		wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
6175		if (!strcmp(pdev_data->family->name, "wl18xx")) {
6176			wl1271_warning("This default nvs file can be removed from the file system");
6177		} else {
6178			wl1271_warning("Your device performance is not optimized.");
6179			wl1271_warning("Please use the calibrator tool to configure your device.");
6180		}
6181
6182		if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
6183			wl1271_warning("Fuse mac address is zero. using random mac");
6184			/* Use TI oui and a random nic */
6185			oui_addr = WLCORE_TI_OUI_ADDRESS;
6186			nic_addr = get_random_int();
6187		} else {
6188			oui_addr = wl->fuse_oui_addr;
6189			/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6190			nic_addr = wl->fuse_nic_addr + 1;
6191		}
6192	}
6193
6194	wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6195
6196	ret = ieee80211_register_hw(wl->hw);
6197	if (ret < 0) {
6198		wl1271_error("unable to register mac80211 hw: %d", ret);
6199		goto out;
6200	}
6201
6202	wl->mac80211_registered = true;
6203
6204	wl1271_debugfs_init(wl);
6205
6206	wl1271_notice("loaded");
6207
6208out:
6209	return ret;
6210}
6211
6212static void wl1271_unregister_hw(struct wl1271 *wl)
6213{
6214	if (wl->plt)
6215		wl1271_plt_stop(wl);
6216
6217	ieee80211_unregister_hw(wl->hw);
6218	wl->mac80211_registered = false;
6219
6220}
6221
6222static int wl1271_init_ieee80211(struct wl1271 *wl)
6223{
6224	int i;
6225	static const u32 cipher_suites[] = {
6226		WLAN_CIPHER_SUITE_WEP40,
6227		WLAN_CIPHER_SUITE_WEP104,
6228		WLAN_CIPHER_SUITE_TKIP,
6229		WLAN_CIPHER_SUITE_CCMP,
6230		WL1271_CIPHER_SUITE_GEM,
6231	};
6232
6233	/* The tx descriptor buffer */
6234	wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6235
6236	if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6237		wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6238
6239	/* unit us */
6240	/* FIXME: find a proper value */
6241	wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6242
6243	ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6244	ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6245	ieee80211_hw_set(wl->hw, SUPPORTS_PER_STA_GTK);
6246	ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6247	ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6248	ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6249	ieee80211_hw_set(wl->hw, AP_LINK_PS);
6250	ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6251	ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6252	ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6253	ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6254	ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6255	ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6256	ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6257	ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
6258
6259	wl->hw->wiphy->cipher_suites = cipher_suites;
6260	wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6261
6262	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6263					 BIT(NL80211_IFTYPE_AP) |
6264					 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6265					 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6266#ifdef CONFIG_MAC80211_MESH
6267					 BIT(NL80211_IFTYPE_MESH_POINT) |
6268#endif
6269					 BIT(NL80211_IFTYPE_P2P_GO);
6270
6271	wl->hw->wiphy->max_scan_ssids = 1;
6272	wl->hw->wiphy->max_sched_scan_ssids = 16;
6273	wl->hw->wiphy->max_match_sets = 16;
6274	/*
6275	 * Maximum length of elements in scanning probe request templates
6276	 * should be the maximum length possible for a template, without
6277	 * the IEEE80211 header of the template
6278	 */
6279	wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6280			sizeof(struct ieee80211_header);
6281
6282	wl->hw->wiphy->max_sched_scan_reqs = 1;
6283	wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6284		sizeof(struct ieee80211_header);
6285
6286	wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6287
6288	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6289				WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6290				WIPHY_FLAG_HAS_CHANNEL_SWITCH |
6291				WIPHY_FLAG_IBSS_RSN;
6292
6293	wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
6294
6295	/* make sure all our channels fit in the scanned_ch bitmask */
6296	BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6297		     ARRAY_SIZE(wl1271_channels_5ghz) >
6298		     WL1271_MAX_CHANNELS);
6299	/*
6300	* clear channel flags from the previous usage
6301	* and restore max_power & max_antenna_gain values.
6302	*/
6303	for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6304		wl1271_band_2ghz.channels[i].flags = 0;
6305		wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6306		wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6307	}
6308
6309	for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6310		wl1271_band_5ghz.channels[i].flags = 0;
6311		wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6312		wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6313	}
6314
6315	/*
6316	 * We keep local copies of the band structs because we need to
6317	 * modify them on a per-device basis.
6318	 */
6319	memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6320	       sizeof(wl1271_band_2ghz));
6321	memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6322	       &wl->ht_cap[NL80211_BAND_2GHZ],
6323	       sizeof(*wl->ht_cap));
6324	memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6325	       sizeof(wl1271_band_5ghz));
6326	memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6327	       &wl->ht_cap[NL80211_BAND_5GHZ],
6328	       sizeof(*wl->ht_cap));
6329
6330	wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6331		&wl->bands[NL80211_BAND_2GHZ];
6332	wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6333		&wl->bands[NL80211_BAND_5GHZ];
6334
6335	/*
6336	 * allow 4 queues per mac address we support +
6337	 * 1 cab queue per mac + one global offchannel Tx queue
6338	 */
6339	wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6340
6341	/* the last queue is the offchannel queue */
6342	wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6343	wl->hw->max_rates = 1;
6344
6345	wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6346
6347	/* the FW answers probe-requests in AP-mode */
6348	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6349	wl->hw->wiphy->probe_resp_offload =
6350		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6351		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6352		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6353
6354	/* allowed interface combinations */
6355	wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6356	wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6357
6358	/* register vendor commands */
6359	wlcore_set_vendor_commands(wl->hw->wiphy);
6360
6361	SET_IEEE80211_DEV(wl->hw, wl->dev);
6362
6363	wl->hw->sta_data_size = sizeof(struct wl1271_station);
6364	wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6365
6366	wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6367
6368	return 0;
6369}
6370
6371struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6372				     u32 mbox_size)
6373{
6374	struct ieee80211_hw *hw;
6375	struct wl1271 *wl;
6376	int i, j, ret;
6377	unsigned int order;
6378
6379	hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6380	if (!hw) {
6381		wl1271_error("could not alloc ieee80211_hw");
6382		ret = -ENOMEM;
6383		goto err_hw_alloc;
6384	}
6385
6386	wl = hw->priv;
6387	memset(wl, 0, sizeof(*wl));
6388
6389	wl->priv = kzalloc(priv_size, GFP_KERNEL);
6390	if (!wl->priv) {
6391		wl1271_error("could not alloc wl priv");
6392		ret = -ENOMEM;
6393		goto err_priv_alloc;
6394	}
6395
6396	INIT_LIST_HEAD(&wl->wlvif_list);
6397
6398	wl->hw = hw;
6399
6400	/*
6401	 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6402	 * we don't allocate any additional resource here, so that's fine.
6403	 */
6404	for (i = 0; i < NUM_TX_QUEUES; i++)
6405		for (j = 0; j < WLCORE_MAX_LINKS; j++)
6406			skb_queue_head_init(&wl->links[j].tx_queue[i]);
6407
6408	skb_queue_head_init(&wl->deferred_rx_queue);
6409	skb_queue_head_init(&wl->deferred_tx_queue);
6410
6411	INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6412	INIT_WORK(&wl->tx_work, wl1271_tx_work);
6413	INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6414	INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6415	INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6416	INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6417
6418	wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6419	if (!wl->freezable_wq) {
6420		ret = -ENOMEM;
6421		goto err_hw;
6422	}
6423
6424	wl->channel = 0;
6425	wl->rx_counter = 0;
6426	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6427	wl->band = NL80211_BAND_2GHZ;
6428	wl->channel_type = NL80211_CHAN_NO_HT;
6429	wl->flags = 0;
6430	wl->sg_enabled = true;
6431	wl->sleep_auth = WL1271_PSM_ILLEGAL;
6432	wl->recovery_count = 0;
6433	wl->hw_pg_ver = -1;
6434	wl->ap_ps_map = 0;
6435	wl->ap_fw_ps_map = 0;
6436	wl->quirks = 0;
6437	wl->system_hlid = WL12XX_SYSTEM_HLID;
6438	wl->active_sta_count = 0;
6439	wl->active_link_count = 0;
6440	wl->fwlog_size = 0;
6441
6442	/* The system link is always allocated */
6443	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6444
6445	memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6446	for (i = 0; i < wl->num_tx_desc; i++)
6447		wl->tx_frames[i] = NULL;
6448
6449	spin_lock_init(&wl->wl_lock);
6450
6451	wl->state = WLCORE_STATE_OFF;
6452	wl->fw_type = WL12XX_FW_TYPE_NONE;
6453	mutex_init(&wl->mutex);
6454	mutex_init(&wl->flush_mutex);
6455	init_completion(&wl->nvs_loading_complete);
6456
6457	order = get_order(aggr_buf_size);
6458	wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6459	if (!wl->aggr_buf) {
6460		ret = -ENOMEM;
6461		goto err_wq;
6462	}
6463	wl->aggr_buf_size = aggr_buf_size;
6464
6465	wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6466	if (!wl->dummy_packet) {
6467		ret = -ENOMEM;
6468		goto err_aggr;
6469	}
6470
6471	/* Allocate one page for the FW log */
6472	wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6473	if (!wl->fwlog) {
6474		ret = -ENOMEM;
6475		goto err_dummy_packet;
6476	}
6477
6478	wl->mbox_size = mbox_size;
6479	wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6480	if (!wl->mbox) {
6481		ret = -ENOMEM;
6482		goto err_fwlog;
6483	}
6484
6485	wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6486	if (!wl->buffer_32) {
6487		ret = -ENOMEM;
6488		goto err_mbox;
6489	}
6490
6491	return hw;
6492
6493err_mbox:
6494	kfree(wl->mbox);
6495
6496err_fwlog:
6497	free_page((unsigned long)wl->fwlog);
6498
6499err_dummy_packet:
6500	dev_kfree_skb(wl->dummy_packet);
6501
6502err_aggr:
6503	free_pages((unsigned long)wl->aggr_buf, order);
6504
6505err_wq:
6506	destroy_workqueue(wl->freezable_wq);
6507
6508err_hw:
6509	wl1271_debugfs_exit(wl);
6510	kfree(wl->priv);
6511
6512err_priv_alloc:
6513	ieee80211_free_hw(hw);
6514
6515err_hw_alloc:
6516
6517	return ERR_PTR(ret);
6518}
6519EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6520
6521int wlcore_free_hw(struct wl1271 *wl)
6522{
6523	/* Unblock any fwlog readers */
6524	mutex_lock(&wl->mutex);
6525	wl->fwlog_size = -1;
6526	mutex_unlock(&wl->mutex);
6527
6528	wlcore_sysfs_free(wl);
6529
6530	kfree(wl->buffer_32);
6531	kfree(wl->mbox);
6532	free_page((unsigned long)wl->fwlog);
6533	dev_kfree_skb(wl->dummy_packet);
6534	free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6535
6536	wl1271_debugfs_exit(wl);
6537
6538	vfree(wl->fw);
6539	wl->fw = NULL;
6540	wl->fw_type = WL12XX_FW_TYPE_NONE;
6541	kfree(wl->nvs);
6542	wl->nvs = NULL;
6543
6544	kfree(wl->raw_fw_status);
6545	kfree(wl->fw_status);
6546	kfree(wl->tx_res_if);
6547	destroy_workqueue(wl->freezable_wq);
6548
6549	kfree(wl->priv);
6550	ieee80211_free_hw(wl->hw);
6551
6552	return 0;
6553}
6554EXPORT_SYMBOL_GPL(wlcore_free_hw);
6555
6556#ifdef CONFIG_PM
6557static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6558	.flags = WIPHY_WOWLAN_ANY,
6559	.n_patterns = WL1271_MAX_RX_FILTERS,
6560	.pattern_min_len = 1,
6561	.pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6562};
6563#endif
6564
6565static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6566{
6567	return IRQ_WAKE_THREAD;
6568}
6569
6570static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6571{
6572	struct wl1271 *wl = context;
6573	struct platform_device *pdev = wl->pdev;
6574	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6575	struct resource *res;
6576
6577	int ret;
6578	irq_handler_t hardirq_fn = NULL;
6579
6580	if (fw) {
6581		wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6582		if (!wl->nvs) {
6583			wl1271_error("Could not allocate nvs data");
6584			goto out;
6585		}
6586		wl->nvs_len = fw->size;
6587	} else if (pdev_data->family->nvs_name) {
6588		wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6589			     pdev_data->family->nvs_name);
6590		wl->nvs = NULL;
6591		wl->nvs_len = 0;
6592	} else {
6593		wl->nvs = NULL;
6594		wl->nvs_len = 0;
6595	}
6596
6597	ret = wl->ops->setup(wl);
6598	if (ret < 0)
6599		goto out_free_nvs;
6600
6601	BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6602
6603	/* adjust some runtime configuration parameters */
6604	wlcore_adjust_conf(wl);
6605
6606	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6607	if (!res) {
6608		wl1271_error("Could not get IRQ resource");
6609		goto out_free_nvs;
6610	}
6611
6612	wl->irq = res->start;
6613	wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6614	wl->if_ops = pdev_data->if_ops;
6615
6616	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6617		hardirq_fn = wlcore_hardirq;
6618	else
6619		wl->irq_flags |= IRQF_ONESHOT;
6620
6621	ret = wl12xx_set_power_on(wl);
6622	if (ret < 0)
6623		goto out_free_nvs;
6624
6625	ret = wl12xx_get_hw_info(wl);
6626	if (ret < 0) {
6627		wl1271_error("couldn't get hw info");
6628		wl1271_power_off(wl);
6629		goto out_free_nvs;
6630	}
6631
6632	ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6633				   wl->irq_flags, pdev->name, wl);
6634	if (ret < 0) {
6635		wl1271_error("interrupt configuration failed");
6636		wl1271_power_off(wl);
6637		goto out_free_nvs;
6638	}
6639
6640#ifdef CONFIG_PM
6641	device_init_wakeup(wl->dev, true);
6642
6643	ret = enable_irq_wake(wl->irq);
6644	if (!ret) {
6645		wl->irq_wake_enabled = true;
6646		if (pdev_data->pwr_in_suspend)
6647			wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6648	}
6649
6650	res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
6651	if (res) {
6652		wl->wakeirq = res->start;
6653		wl->wakeirq_flags = res->flags & IRQF_TRIGGER_MASK;
6654		ret = dev_pm_set_dedicated_wake_irq(wl->dev, wl->wakeirq);
6655		if (ret)
6656			wl->wakeirq = -ENODEV;
6657	} else {
6658		wl->wakeirq = -ENODEV;
6659	}
6660#endif
6661	disable_irq(wl->irq);
6662	wl1271_power_off(wl);
6663
6664	ret = wl->ops->identify_chip(wl);
6665	if (ret < 0)
6666		goto out_irq;
6667
6668	ret = wl1271_init_ieee80211(wl);
6669	if (ret)
6670		goto out_irq;
6671
6672	ret = wl1271_register_hw(wl);
6673	if (ret)
6674		goto out_irq;
6675
6676	ret = wlcore_sysfs_init(wl);
6677	if (ret)
6678		goto out_unreg;
6679
6680	wl->initialized = true;
6681	goto out;
6682
6683out_unreg:
6684	wl1271_unregister_hw(wl);
6685
6686out_irq:
6687	if (wl->wakeirq >= 0)
6688		dev_pm_clear_wake_irq(wl->dev);
6689	device_init_wakeup(wl->dev, false);
6690	free_irq(wl->irq, wl);
6691
6692out_free_nvs:
6693	kfree(wl->nvs);
6694
6695out:
6696	release_firmware(fw);
6697	complete_all(&wl->nvs_loading_complete);
6698}
6699
6700static int __maybe_unused wlcore_runtime_suspend(struct device *dev)
6701{
6702	struct wl1271 *wl = dev_get_drvdata(dev);
6703	struct wl12xx_vif *wlvif;
6704	int error;
6705
6706	/* We do not enter elp sleep in PLT mode */
6707	if (wl->plt)
6708		return 0;
6709
6710	/* Nothing to do if no ELP mode requested */
6711	if (wl->sleep_auth != WL1271_PSM_ELP)
6712		return 0;
6713
6714	wl12xx_for_each_wlvif(wl, wlvif) {
6715		if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
6716		    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
6717			return -EBUSY;
6718	}
6719
6720	wl1271_debug(DEBUG_PSM, "chip to elp");
6721	error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
6722	if (error < 0) {
6723		wl12xx_queue_recovery_work(wl);
6724
6725		return error;
6726	}
6727
6728	set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6729
6730	return 0;
6731}
6732
6733static int __maybe_unused wlcore_runtime_resume(struct device *dev)
6734{
6735	struct wl1271 *wl = dev_get_drvdata(dev);
6736	DECLARE_COMPLETION_ONSTACK(compl);
6737	unsigned long flags;
6738	int ret;
6739	unsigned long start_time = jiffies;
6740	bool recovery = false;
6741
6742	/* Nothing to do if no ELP mode requested */
6743	if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
6744		return 0;
6745
6746	wl1271_debug(DEBUG_PSM, "waking up chip from elp");
6747
6748	spin_lock_irqsave(&wl->wl_lock, flags);
6749	wl->elp_compl = &compl;
6750	spin_unlock_irqrestore(&wl->wl_lock, flags);
6751
6752	ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
6753	if (ret < 0) {
6754		recovery = true;
6755	} else if (!test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags)) {
6756		ret = wait_for_completion_timeout(&compl,
6757			msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
6758		if (ret == 0) {
6759			wl1271_warning("ELP wakeup timeout!");
6760			recovery = true;
6761		}
6762	}
6763
6764	spin_lock_irqsave(&wl->wl_lock, flags);
6765	wl->elp_compl = NULL;
6766	spin_unlock_irqrestore(&wl->wl_lock, flags);
6767	clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6768
6769	if (recovery) {
6770		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
6771		wl12xx_queue_recovery_work(wl);
6772	} else {
6773		wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
6774			     jiffies_to_msecs(jiffies - start_time));
6775	}
6776
6777	return 0;
6778}
6779
6780static const struct dev_pm_ops wlcore_pm_ops = {
6781	SET_RUNTIME_PM_OPS(wlcore_runtime_suspend,
6782			   wlcore_runtime_resume,
6783			   NULL)
6784};
6785
6786int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6787{
6788	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6789	const char *nvs_name;
6790	int ret = 0;
6791
6792	if (!wl->ops || !wl->ptable || !pdev_data)
6793		return -EINVAL;
6794
6795	wl->dev = &pdev->dev;
6796	wl->pdev = pdev;
6797	platform_set_drvdata(pdev, wl);
6798
6799	if (pdev_data->family && pdev_data->family->nvs_name) {
6800		nvs_name = pdev_data->family->nvs_name;
6801		ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6802					      nvs_name, &pdev->dev, GFP_KERNEL,
6803					      wl, wlcore_nvs_cb);
6804		if (ret < 0) {
6805			wl1271_error("request_firmware_nowait failed for %s: %d",
6806				     nvs_name, ret);
6807			complete_all(&wl->nvs_loading_complete);
6808		}
6809	} else {
6810		wlcore_nvs_cb(NULL, wl);
6811	}
6812
6813	wl->dev->driver->pm = &wlcore_pm_ops;
6814	pm_runtime_set_autosuspend_delay(wl->dev, 50);
6815	pm_runtime_use_autosuspend(wl->dev);
6816	pm_runtime_enable(wl->dev);
6817
6818	return ret;
6819}
6820EXPORT_SYMBOL_GPL(wlcore_probe);
6821
6822int wlcore_remove(struct platform_device *pdev)
6823{
6824	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6825	struct wl1271 *wl = platform_get_drvdata(pdev);
6826	int error;
6827
6828	error = pm_runtime_get_sync(wl->dev);
6829	if (error < 0)
6830		dev_warn(wl->dev, "PM runtime failed: %i\n", error);
6831
6832	wl->dev->driver->pm = NULL;
6833
6834	if (pdev_data->family && pdev_data->family->nvs_name)
6835		wait_for_completion(&wl->nvs_loading_complete);
6836	if (!wl->initialized)
6837		return 0;
6838
6839	if (wl->wakeirq >= 0) {
6840		dev_pm_clear_wake_irq(wl->dev);
6841		wl->wakeirq = -ENODEV;
6842	}
6843
6844	device_init_wakeup(wl->dev, false);
6845
6846	if (wl->irq_wake_enabled)
6847		disable_irq_wake(wl->irq);
6848
6849	wl1271_unregister_hw(wl);
6850
6851	pm_runtime_put_sync(wl->dev);
6852	pm_runtime_dont_use_autosuspend(wl->dev);
6853	pm_runtime_disable(wl->dev);
6854
6855	free_irq(wl->irq, wl);
6856	wlcore_free_hw(wl);
6857
6858	return 0;
6859}
6860EXPORT_SYMBOL_GPL(wlcore_remove);
6861
6862u32 wl12xx_debug_level = DEBUG_NONE;
6863EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6864module_param_named(debug_level, wl12xx_debug_level, uint, 0600);
6865MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6866
6867module_param_named(fwlog, fwlog_param, charp, 0);
6868MODULE_PARM_DESC(fwlog,
6869		 "FW logger options: continuous, dbgpins or disable");
6870
6871module_param(fwlog_mem_blocks, int, 0600);
6872MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6873
6874module_param(bug_on_recovery, int, 0600);
6875MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6876
6877module_param(no_recovery, int, 0600);
6878MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6879
6880MODULE_LICENSE("GPL");
6881MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6882MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");