Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
   2/*
   3 * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
   4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
   5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
   6 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   7#include <linux/etherdevice.h>
   8#include <net/mac80211.h>
   9#include <linux/crc32.h>
  10
  11#include "mvm.h"
  12#include "fw/api/scan.h"
  13#include "iwl-io.h"
  14
  15#define IWL_DENSE_EBS_SCAN_RATIO 5
  16#define IWL_SPARSE_EBS_SCAN_RATIO 1
  17
  18#define IWL_SCAN_DWELL_ACTIVE		10
  19#define IWL_SCAN_DWELL_PASSIVE		110
  20#define IWL_SCAN_DWELL_FRAGMENTED	44
  21#define IWL_SCAN_DWELL_EXTENDED		90
  22#define IWL_SCAN_NUM_OF_FRAGS		3
  23
  24/* adaptive dwell max budget time [TU] for full scan */
  25#define IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
  26/* adaptive dwell max budget time [TU] for directed scan */
  27#define IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
  28/* adaptive dwell default high band APs number */
  29#define IWL_SCAN_ADWELL_DEFAULT_HB_N_APS 8
  30/* adaptive dwell default low band APs number */
  31#define IWL_SCAN_ADWELL_DEFAULT_LB_N_APS 2
  32/* adaptive dwell default APs number in social channels (1, 6, 11) */
  33#define IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
  34/* number of scan channels */
  35#define IWL_SCAN_NUM_CHANNELS 112
  36/* adaptive dwell number of APs override mask for p2p friendly GO */
  37#define IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY_BIT BIT(20)
  38/* adaptive dwell number of APs override mask for social channels */
  39#define IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS_BIT BIT(21)
  40/* adaptive dwell number of APs override for p2p friendly GO channels */
  41#define IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY 10
  42/* adaptive dwell number of APs override for social channels */
  43#define IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS 2
  44
  45/* minimal number of 2GHz and 5GHz channels in the regular scan request */
  46#define IWL_MVM_6GHZ_PASSIVE_SCAN_MIN_CHANS 4
  47
  48/* Number of iterations on the channel for mei filtered scan */
  49#define IWL_MEI_SCAN_NUM_ITER	5U
  50
  51struct iwl_mvm_scan_timing_params {
 
 
 
 
  52	u32 suspend_time;
  53	u32 max_out_time;
  54};
  55
  56static struct iwl_mvm_scan_timing_params scan_timing[] = {
  57	[IWL_SCAN_TYPE_UNASSOC] = {
 
 
 
 
  58		.suspend_time = 0,
  59		.max_out_time = 0,
  60	},
  61	[IWL_SCAN_TYPE_WILD] = {
 
 
 
 
  62		.suspend_time = 30,
  63		.max_out_time = 120,
  64	},
  65	[IWL_SCAN_TYPE_MILD] = {
 
 
 
 
  66		.suspend_time = 120,
  67		.max_out_time = 120,
  68	},
  69	[IWL_SCAN_TYPE_FRAGMENTED] = {
 
 
 
  70		.suspend_time = 95,
  71		.max_out_time = 44,
  72	},
  73	[IWL_SCAN_TYPE_FAST_BALANCE] = {
  74		.suspend_time = 30,
  75		.max_out_time = 37,
  76	},
  77};
  78
  79struct iwl_mvm_scan_params {
  80	/* For CDB this is low band scan type, for non-CDB - type. */
  81	enum iwl_mvm_scan_type type;
  82	enum iwl_mvm_scan_type hb_type;
  83	u32 n_channels;
  84	u16 delay;
  85	int n_ssids;
  86	struct cfg80211_ssid *ssids;
  87	struct ieee80211_channel **channels;
  88	u32 flags;
  89	u8 *mac_addr;
  90	u8 *mac_addr_mask;
  91	bool no_cck;
  92	bool pass_all;
  93	int n_match_sets;
  94	struct iwl_scan_probe_req preq;
  95	struct cfg80211_match_set *match_sets;
  96	int n_scan_plans;
  97	struct cfg80211_sched_scan_plan *scan_plans;
  98	bool iter_notif;
  99	struct cfg80211_scan_6ghz_params *scan_6ghz_params;
 100	u32 n_6ghz_params;
 101	bool scan_6ghz;
 102	bool enable_6ghz_passive;
 103	bool respect_p2p_go, respect_p2p_go_hb;
 104	s8 tsf_report_link_id;
 105	u8 bssid[ETH_ALEN] __aligned(2);
 106};
 107
 108static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm)
 109{
 110	struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
 111
 112	if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm))
 113		return (void *)&cmd->v8.data;
 114
 115	if (iwl_mvm_is_adaptive_dwell_supported(mvm))
 116		return (void *)&cmd->v7.data;
 117
 118	if (iwl_mvm_cdb_scan_api(mvm))
 119		return (void *)&cmd->v6.data;
 120
 121	return (void *)&cmd->v1.data;
 122}
 123
 124static inline struct iwl_scan_umac_chan_param *
 125iwl_mvm_get_scan_req_umac_channel(struct iwl_mvm *mvm)
 126{
 127	struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
 128
 129	if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm))
 130		return &cmd->v8.channel;
 131
 132	if (iwl_mvm_is_adaptive_dwell_supported(mvm))
 133		return &cmd->v7.channel;
 134
 135	if (iwl_mvm_cdb_scan_api(mvm))
 136		return &cmd->v6.channel;
 137
 138	return &cmd->v1.channel;
 139}
 140
 141static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
 142{
 143	if (mvm->scan_rx_ant != ANT_NONE)
 144		return mvm->scan_rx_ant;
 145	return iwl_mvm_get_valid_rx_ant(mvm);
 146}
 147
 148static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
 149{
 150	u16 rx_chain;
 151	u8 rx_ant;
 152
 153	rx_ant = iwl_mvm_scan_rx_ant(mvm);
 154	rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
 155	rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
 156	rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
 157	rx_chain |= 0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS;
 158	return cpu_to_le16(rx_chain);
 159}
 160
 
 
 
 
 
 
 
 
 161static inline __le32
 162iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band,
 163			  bool no_cck)
 164{
 165	u32 tx_ant;
 166
 167	iwl_mvm_toggle_tx_ant(mvm, &mvm->scan_last_antenna_idx);
 
 
 168	tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
 169
 170	if (band == NL80211_BAND_2GHZ && !no_cck)
 171		return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK_V1 |
 172				   tx_ant);
 173	else
 174		return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
 175}
 176
 177static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm)
 
 178{
 179	return mvm->tcm.result.global_load;
 180}
 181
 182static enum iwl_mvm_traffic_load
 183iwl_mvm_get_traffic_load_band(struct iwl_mvm *mvm, enum nl80211_band band)
 184{
 185	return mvm->tcm.result.band_load[band];
 186}
 187
 188struct iwl_mvm_scan_iter_data {
 189	u32 global_cnt;
 190	struct ieee80211_vif *current_vif;
 191	bool is_dcm_with_p2p_go;
 192};
 193
 194static void iwl_mvm_scan_iterator(void *_data, u8 *mac,
 195				  struct ieee80211_vif *vif)
 196{
 197	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 198	struct iwl_mvm_scan_iter_data *data = _data;
 199	struct iwl_mvm_vif *curr_mvmvif;
 200
 201	if (vif->type != NL80211_IFTYPE_P2P_DEVICE &&
 202	    mvmvif->deflink.phy_ctxt &&
 203	    mvmvif->deflink.phy_ctxt->id < NUM_PHY_CTX)
 204		data->global_cnt += 1;
 205
 206	if (!data->current_vif || vif == data->current_vif)
 207		return;
 208
 209	curr_mvmvif = iwl_mvm_vif_from_mac80211(data->current_vif);
 210
 211	if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
 212	    mvmvif->deflink.phy_ctxt && curr_mvmvif->deflink.phy_ctxt &&
 213	    mvmvif->deflink.phy_ctxt->id != curr_mvmvif->deflink.phy_ctxt->id)
 214		data->is_dcm_with_p2p_go = true;
 215}
 216
 217static enum
 218iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
 219					 struct ieee80211_vif *vif,
 220					 enum iwl_mvm_traffic_load load,
 221					 bool low_latency)
 222{
 223	struct iwl_mvm_scan_iter_data data = {
 224		.current_vif = vif,
 225		.is_dcm_with_p2p_go = false,
 226		.global_cnt = 0,
 227	};
 228
 229	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
 230						   IEEE80211_IFACE_ITER_NORMAL,
 231						   iwl_mvm_scan_iterator,
 232						   &data);
 233
 234	if (!data.global_cnt)
 235		return IWL_SCAN_TYPE_UNASSOC;
 236
 237	if (fw_has_api(&mvm->fw->ucode_capa,
 238		       IWL_UCODE_TLV_API_FRAGMENTED_SCAN)) {
 239		if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) &&
 240		    (!vif || vif->type != NL80211_IFTYPE_P2P_DEVICE))
 241			return IWL_SCAN_TYPE_FRAGMENTED;
 242
 243		/*
 244		 * in case of DCM with GO where BSS DTIM interval < 220msec
 245		 * set all scan requests as fast-balance scan
 246		 */
 247		if (vif && vif->type == NL80211_IFTYPE_STATION &&
 248		    data.is_dcm_with_p2p_go &&
 249		    ((vif->bss_conf.beacon_int *
 250		      vif->bss_conf.dtim_period) < 220))
 251			return IWL_SCAN_TYPE_FAST_BALANCE;
 252	}
 253
 254	if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
 255		return IWL_SCAN_TYPE_MILD;
 256
 257	return IWL_SCAN_TYPE_WILD;
 258}
 259
 260static enum
 261iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
 262					struct ieee80211_vif *vif)
 263{
 264	enum iwl_mvm_traffic_load load;
 265	bool low_latency;
 266
 267	load = iwl_mvm_get_traffic_load(mvm);
 268	low_latency = iwl_mvm_low_latency(mvm);
 269
 270	return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency);
 271}
 272
 273static enum
 274iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm,
 275					     struct ieee80211_vif *vif,
 276					     enum nl80211_band band)
 277{
 278	enum iwl_mvm_traffic_load load;
 279	bool low_latency;
 280
 281	load = iwl_mvm_get_traffic_load_band(mvm, band);
 282	low_latency = iwl_mvm_low_latency_band(mvm, band);
 
 
 
 
 
 
 283
 284	return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency);
 
 285}
 286
 287static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
 288{
 289	/* require rrm scan whenever the fw supports it */
 290	return fw_has_capa(&mvm->fw->ucode_capa,
 291			   IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT);
 292}
 293
 294static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm)
 295{
 296	int max_probe_len;
 297
 298	max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE;
 299
 300	/* we create the 802.11 header and SSID element */
 301	max_probe_len -= 24 + 2;
 302
 303	/* DS parameter set element is added on 2.4GHZ band if required */
 304	if (iwl_mvm_rrm_scan_needed(mvm))
 305		max_probe_len -= 3;
 306
 307	return max_probe_len;
 308}
 309
 310int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm)
 311{
 312	int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm);
 313
 314	/* TODO: [BUG] This function should return the maximum allowed size of
 315	 * scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs
 316	 * in the same command. So the correct implementation of this function
 317	 * is just iwl_mvm_max_scan_ie_fw_cmd_room() / 2. Currently the scan
 318	 * command has only 512 bytes and it would leave us with about 240
 319	 * bytes for scan IEs, which is clearly not enough. So meanwhile
 320	 * we will report an incorrect value. This may result in a failure to
 321	 * issue a scan in unified_scan_lmac and unified_sched_scan_lmac
 322	 * functions with -ENOBUFS, if a large enough probe will be provided.
 323	 */
 324	return max_ie_len;
 325}
 326
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 327void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
 328					      struct iwl_rx_cmd_buffer *rxb)
 329{
 330	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 331	struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
 
 332
 333	IWL_DEBUG_SCAN(mvm,
 334		       "Scan offload iteration complete: status=0x%x scanned channels=%d\n",
 335		       notif->status, notif->scanned_channels);
 
 
 
 336
 337	if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) {
 338		IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n");
 339		ieee80211_sched_scan_results(mvm->hw);
 340		mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
 341	}
 342}
 343
 344void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
 345				 struct iwl_rx_cmd_buffer *rxb)
 346{
 347	IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
 348	ieee80211_sched_scan_results(mvm->hw);
 349}
 350
 351static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)
 352{
 353	switch (status) {
 354	case IWL_SCAN_EBS_SUCCESS:
 355		return "successful";
 356	case IWL_SCAN_EBS_INACTIVE:
 357		return "inactive";
 358	case IWL_SCAN_EBS_FAILED:
 359	case IWL_SCAN_EBS_CHAN_NOT_FOUND:
 360	default:
 361		return "failed";
 362	}
 363}
 364
 365void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
 366					 struct iwl_rx_cmd_buffer *rxb)
 367{
 368	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 369	struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data;
 370	bool aborted = (scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
 371
 372	/* If this happens, the firmware has mistakenly sent an LMAC
 373	 * notification during UMAC scans -- warn and ignore it.
 374	 */
 375	if (WARN_ON_ONCE(fw_has_capa(&mvm->fw->ucode_capa,
 376				     IWL_UCODE_TLV_CAPA_UMAC_SCAN)))
 377		return;
 378
 379	/* scan status must be locked for proper checking */
 380	lockdep_assert_held(&mvm->mutex);
 381
 382	/* We first check if we were stopping a scan, in which case we
 383	 * just clear the stopping flag.  Then we check if it was a
 384	 * firmware initiated stop, in which case we need to inform
 385	 * mac80211.
 386	 * Note that we can have a stopping and a running scan
 387	 * simultaneously, but we can't have two different types of
 388	 * scans stopping or running at the same time (since LMAC
 389	 * doesn't support it).
 390	 */
 391
 392	if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_SCHED) {
 393		WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR);
 394
 395		IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
 396			       aborted ? "aborted" : "completed",
 397			       iwl_mvm_ebs_status_str(scan_notif->ebs_status));
 398		IWL_DEBUG_SCAN(mvm,
 399			       "Last line %d, Last iteration %d, Time after last iteration %d\n",
 400			       scan_notif->last_schedule_line,
 401			       scan_notif->last_schedule_iteration,
 402			       __le32_to_cpu(scan_notif->time_after_last_iter));
 403
 404		mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED;
 405	} else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) {
 406		IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s\n",
 407			       aborted ? "aborted" : "completed",
 408			       iwl_mvm_ebs_status_str(scan_notif->ebs_status));
 409
 410		mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_REGULAR;
 411	} else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) {
 412		WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR);
 413
 414		IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
 415			       aborted ? "aborted" : "completed",
 416			       iwl_mvm_ebs_status_str(scan_notif->ebs_status));
 417		IWL_DEBUG_SCAN(mvm,
 418			       "Last line %d, Last iteration %d, Time after last iteration %d (FW)\n",
 419			       scan_notif->last_schedule_line,
 420			       scan_notif->last_schedule_iteration,
 421			       __le32_to_cpu(scan_notif->time_after_last_iter));
 422
 423		mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
 424		ieee80211_sched_scan_stopped(mvm->hw);
 425		mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
 426	} else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
 427		struct cfg80211_scan_info info = {
 428			.aborted = aborted,
 429		};
 430
 431		IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n",
 432			       aborted ? "aborted" : "completed",
 433			       iwl_mvm_ebs_status_str(scan_notif->ebs_status));
 434
 435		mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR;
 436		ieee80211_scan_completed(mvm->hw, &info);
 
 437		cancel_delayed_work(&mvm->scan_timeout_dwork);
 438		iwl_mvm_resume_tcm(mvm);
 439	} else {
 440		IWL_ERR(mvm,
 441			"got scan complete notification but no scan is running\n");
 442	}
 443
 444	mvm->last_ebs_successful =
 445			scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ||
 446			scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE;
 447}
 448
 449static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
 450{
 451	int i;
 452
 453	for (i = 0; i < PROBE_OPTION_MAX; i++) {
 454		if (!ssid_list[i].len)
 455			break;
 456		if (ssid_list[i].len == ssid_len &&
 457		    !memcmp(ssid_list->ssid, ssid, ssid_len))
 458			return i;
 459	}
 460	return -1;
 461}
 462
 463/* We insert the SSIDs in an inverted order, because the FW will
 464 * invert it back.
 465 */
 466static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params,
 467				 struct iwl_ssid_ie *ssids,
 468				 u32 *ssid_bitmap)
 469{
 470	int i, j;
 471	int index;
 472	u32 tmp_bitmap = 0;
 473
 474	/*
 475	 * copy SSIDs from match list.
 476	 * iwl_config_sched_scan_profiles() uses the order of these ssids to
 477	 * config match list.
 478	 */
 479	for (i = 0, j = params->n_match_sets - 1;
 480	     j >= 0 && i < PROBE_OPTION_MAX;
 481	     i++, j--) {
 482		/* skip empty SSID matchsets */
 483		if (!params->match_sets[j].ssid.ssid_len)
 484			continue;
 485		ssids[i].id = WLAN_EID_SSID;
 486		ssids[i].len = params->match_sets[j].ssid.ssid_len;
 487		memcpy(ssids[i].ssid, params->match_sets[j].ssid.ssid,
 488		       ssids[i].len);
 489	}
 490
 491	/* add SSIDs from scan SSID list */
 
 492	for (j = params->n_ssids - 1;
 493	     j >= 0 && i < PROBE_OPTION_MAX;
 494	     i++, j--) {
 495		index = iwl_ssid_exist(params->ssids[j].ssid,
 496				       params->ssids[j].ssid_len,
 497				       ssids);
 498		if (index < 0) {
 499			ssids[i].id = WLAN_EID_SSID;
 500			ssids[i].len = params->ssids[j].ssid_len;
 501			memcpy(ssids[i].ssid, params->ssids[j].ssid,
 502			       ssids[i].len);
 503			tmp_bitmap |= BIT(i);
 504		} else {
 505			tmp_bitmap |= BIT(index);
 506		}
 507	}
 508	if (ssid_bitmap)
 509		*ssid_bitmap = tmp_bitmap;
 510}
 511
 512static int
 513iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
 514				   struct cfg80211_sched_scan_request *req)
 515{
 516	struct iwl_scan_offload_profile *profile;
 517	struct iwl_scan_offload_profile_cfg_v1 *profile_cfg_v1;
 518	struct iwl_scan_offload_blocklist *blocklist;
 519	struct iwl_scan_offload_profile_cfg_data *data;
 520	int max_profiles = iwl_umac_scan_get_max_profiles(mvm->fw);
 521	int profile_cfg_size = sizeof(*data) +
 522		sizeof(*profile) * max_profiles;
 523	struct iwl_host_cmd cmd = {
 524		.id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
 525		.len[1] = profile_cfg_size,
 526		.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
 527		.dataflags[1] = IWL_HCMD_DFL_NOCOPY,
 528	};
 529	int blocklist_len;
 530	int i;
 531	int ret;
 532
 533	if (WARN_ON(req->n_match_sets > max_profiles))
 534		return -EIO;
 535
 536	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL)
 537		blocklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN;
 538	else
 539		blocklist_len = IWL_SCAN_MAX_BLACKLIST_LEN;
 540
 541	blocklist = kcalloc(blocklist_len, sizeof(*blocklist), GFP_KERNEL);
 542	if (!blocklist)
 543		return -ENOMEM;
 544
 545	profile_cfg_v1 = kzalloc(profile_cfg_size, GFP_KERNEL);
 546	if (!profile_cfg_v1) {
 547		ret = -ENOMEM;
 548		goto free_blocklist;
 549	}
 550
 551	cmd.data[0] = blocklist;
 552	cmd.len[0] = sizeof(*blocklist) * blocklist_len;
 553	cmd.data[1] = profile_cfg_v1;
 554
 555	/* if max_profile is MAX_PROFILES_V2, we have the new API */
 556	if (max_profiles == IWL_SCAN_MAX_PROFILES_V2) {
 557		struct iwl_scan_offload_profile_cfg *profile_cfg =
 558			(struct iwl_scan_offload_profile_cfg *)profile_cfg_v1;
 559
 560		data = &profile_cfg->data;
 561	} else {
 562		data = &profile_cfg_v1->data;
 563	}
 564
 565	/* No blocklist configuration */
 566	data->num_profiles = req->n_match_sets;
 567	data->active_clients = SCAN_CLIENT_SCHED_SCAN;
 568	data->pass_match = SCAN_CLIENT_SCHED_SCAN;
 569	data->match_notify = SCAN_CLIENT_SCHED_SCAN;
 570
 
 
 
 
 571	if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
 572		data->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
 573
 574	for (i = 0; i < req->n_match_sets; i++) {
 575		profile = &profile_cfg_v1->profiles[i];
 576		profile->ssid_index = i;
 577		/* Support any cipher and auth algorithm */
 578		profile->unicast_cipher = 0xff;
 579		profile->auth_alg = IWL_AUTH_ALGO_UNSUPPORTED |
 580			IWL_AUTH_ALGO_NONE | IWL_AUTH_ALGO_PSK | IWL_AUTH_ALGO_8021X |
 581			IWL_AUTH_ALGO_SAE | IWL_AUTH_ALGO_8021X_SHA384 | IWL_AUTH_ALGO_OWE;
 582		profile->network_type = IWL_NETWORK_TYPE_ANY;
 583		profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY;
 584		profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN;
 585	}
 586
 587	IWL_DEBUG_SCAN(mvm, "Sending scheduled scan profile config\n");
 588
 589	ret = iwl_mvm_send_cmd(mvm, &cmd);
 590	kfree(profile_cfg_v1);
 591free_blocklist:
 592	kfree(blocklist);
 593
 594	return ret;
 595}
 596
 597static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
 598				  struct cfg80211_sched_scan_request *req)
 599{
 600	if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
 601		IWL_DEBUG_SCAN(mvm,
 602			       "Sending scheduled scan with filtering, n_match_sets %d\n",
 603			       req->n_match_sets);
 604		mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
 605		return false;
 606	}
 607
 608	IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n");
 609
 610	mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
 611	return true;
 612}
 613
 614static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm)
 615{
 616	int ret;
 617	struct iwl_host_cmd cmd = {
 618		.id = SCAN_OFFLOAD_ABORT_CMD,
 619	};
 620	u32 status = CAN_ABORT_STATUS;
 621
 622	ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
 623	if (ret)
 624		return ret;
 625
 626	if (status != CAN_ABORT_STATUS) {
 627		/*
 628		 * The scan abort will return 1 for success or
 629		 * 2 for "failure".  A failure condition can be
 630		 * due to simply not being in an active scan which
 631		 * can occur if we send the scan abort before the
 632		 * microcode has notified us that a scan is completed.
 633		 */
 634		IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status);
 635		ret = -ENOENT;
 636	}
 637
 638	return ret;
 639}
 640
 641static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm,
 642				     struct iwl_scan_req_tx_cmd *tx_cmd,
 643				     bool no_cck)
 644{
 645	tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
 646					 TX_CMD_FLG_BT_DIS);
 647	tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
 648							   NL80211_BAND_2GHZ,
 649							   no_cck);
 650
 651	if (!iwl_mvm_has_new_station_api(mvm->fw)) {
 652		tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
 653		tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
 654
 655	/*
 656	 * Fw doesn't use this sta anymore, pending deprecation via HOST API
 657	 * change
 658	 */
 659	} else {
 660		tx_cmd[0].sta_id = 0xff;
 661		tx_cmd[1].sta_id = 0xff;
 662	}
 663
 664	tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
 665					 TX_CMD_FLG_BT_DIS);
 666
 667	tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
 668							   NL80211_BAND_5GHZ,
 669							   no_cck);
 
 670}
 671
 672static void
 673iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm,
 674			       struct ieee80211_channel **channels,
 675			       int n_channels, u32 ssid_bitmap,
 676			       struct iwl_scan_req_lmac *cmd)
 677{
 678	struct iwl_scan_channel_cfg_lmac *channel_cfg = (void *)&cmd->data;
 679	int i;
 680
 681	for (i = 0; i < n_channels; i++) {
 682		channel_cfg[i].channel_num =
 683			cpu_to_le16(channels[i]->hw_value);
 684		channel_cfg[i].iter_count = cpu_to_le16(1);
 685		channel_cfg[i].iter_interval = 0;
 686		channel_cfg[i].flags =
 687			cpu_to_le32(IWL_UNIFIED_SCAN_CHANNEL_PARTIAL |
 688				    ssid_bitmap);
 689	}
 690}
 691
 692static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies,
 693					   size_t len, u8 *const pos)
 694{
 695	static const u8 before_ds_params[] = {
 696			WLAN_EID_SSID,
 697			WLAN_EID_SUPP_RATES,
 698			WLAN_EID_REQUEST,
 699			WLAN_EID_EXT_SUPP_RATES,
 700	};
 701	size_t offs;
 702	u8 *newpos = pos;
 703
 704	if (!iwl_mvm_rrm_scan_needed(mvm)) {
 705		memcpy(newpos, ies, len);
 706		return newpos + len;
 707	}
 708
 709	offs = ieee80211_ie_split(ies, len,
 710				  before_ds_params,
 711				  ARRAY_SIZE(before_ds_params),
 712				  0);
 713
 714	memcpy(newpos, ies, offs);
 715	newpos += offs;
 716
 717	/* Add a placeholder for DS Parameter Set element */
 718	*newpos++ = WLAN_EID_DS_PARAMS;
 719	*newpos++ = 1;
 720	*newpos++ = 0;
 721
 722	memcpy(newpos, ies + offs, len - offs);
 723	newpos += len - offs;
 724
 725	return newpos;
 726}
 727
 728#define WFA_TPC_IE_LEN	9
 729
 730static void iwl_mvm_add_tpc_report_ie(u8 *pos)
 731{
 732	pos[0] = WLAN_EID_VENDOR_SPECIFIC;
 733	pos[1] = WFA_TPC_IE_LEN - 2;
 734	pos[2] = (WLAN_OUI_MICROSOFT >> 16) & 0xff;
 735	pos[3] = (WLAN_OUI_MICROSOFT >> 8) & 0xff;
 736	pos[4] = WLAN_OUI_MICROSOFT & 0xff;
 737	pos[5] = WLAN_OUI_TYPE_MICROSOFT_TPC;
 738	pos[6] = 0;
 739	/* pos[7] - tx power will be inserted by the FW */
 740	pos[7] = 0;
 741	pos[8] = 0;
 742}
 743
 744static void
 745iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 746			 struct ieee80211_scan_ies *ies,
 747			 struct iwl_mvm_scan_params *params)
 748{
 749	struct ieee80211_mgmt *frame = (void *)params->preq.buf;
 750	u8 *pos, *newpos;
 751	const u8 *mac_addr = params->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
 752		params->mac_addr : NULL;
 753
 754	/*
 755	 * Unfortunately, right now the offload scan doesn't support randomising
 756	 * within the firmware, so until the firmware API is ready we implement
 757	 * it in the driver. This means that the scan iterations won't really be
 758	 * random, only when it's restarted, but at least that helps a bit.
 759	 */
 760	if (mac_addr)
 761		get_random_mask_addr(frame->sa, mac_addr,
 762				     params->mac_addr_mask);
 763	else
 764		memcpy(frame->sa, vif->addr, ETH_ALEN);
 765
 766	frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
 767	eth_broadcast_addr(frame->da);
 768	ether_addr_copy(frame->bssid, params->bssid);
 769	frame->seq_ctrl = 0;
 770
 771	pos = frame->u.probe_req.variable;
 772	*pos++ = WLAN_EID_SSID;
 773	*pos++ = 0;
 774
 775	params->preq.mac_header.offset = 0;
 776	params->preq.mac_header.len = cpu_to_le16(24 + 2);
 777
 778	/* Insert ds parameter set element on 2.4 GHz band */
 779	newpos = iwl_mvm_copy_and_insert_ds_elem(mvm,
 780						 ies->ies[NL80211_BAND_2GHZ],
 781						 ies->len[NL80211_BAND_2GHZ],
 782						 pos);
 783	params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf);
 784	params->preq.band_data[0].len = cpu_to_le16(newpos - pos);
 785	pos = newpos;
 786
 787	memcpy(pos, ies->ies[NL80211_BAND_5GHZ],
 788	       ies->len[NL80211_BAND_5GHZ]);
 789	params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf);
 790	params->preq.band_data[1].len =
 791		cpu_to_le16(ies->len[NL80211_BAND_5GHZ]);
 792	pos += ies->len[NL80211_BAND_5GHZ];
 793
 794	memcpy(pos, ies->ies[NL80211_BAND_6GHZ],
 795	       ies->len[NL80211_BAND_6GHZ]);
 796	params->preq.band_data[2].offset = cpu_to_le16(pos - params->preq.buf);
 797	params->preq.band_data[2].len =
 798		cpu_to_le16(ies->len[NL80211_BAND_6GHZ]);
 799	pos += ies->len[NL80211_BAND_6GHZ];
 800	memcpy(pos, ies->common_ies, ies->common_ie_len);
 801	params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf);
 802
 803	if (iwl_mvm_rrm_scan_needed(mvm) &&
 804	    !fw_has_capa(&mvm->fw->ucode_capa,
 805			 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) {
 806		iwl_mvm_add_tpc_report_ie(pos + ies->common_ie_len);
 807		params->preq.common_data.len = cpu_to_le16(ies->common_ie_len +
 808							   WFA_TPC_IE_LEN);
 809	} else {
 810		params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
 811	}
 812}
 813
 814static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
 815				    struct iwl_scan_req_lmac *cmd,
 816				    struct iwl_mvm_scan_params *params)
 817{
 818	cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE;
 819	cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE;
 820	cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
 821	cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED;
 822	cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
 823	cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
 824	cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
 825}
 826
 827static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids,
 828				     struct ieee80211_scan_ies *ies,
 829				     int n_channels)
 830{
 831	return ((n_ssids <= PROBE_OPTION_MAX) &&
 832		(n_channels <= mvm->fw->ucode_capa.n_scan_channels) &
 833		(ies->common_ie_len +
 834		 ies->len[NL80211_BAND_2GHZ] +
 835		 ies->len[NL80211_BAND_5GHZ] <=
 836		 iwl_mvm_max_scan_ie_fw_cmd_room(mvm)));
 837}
 838
 839static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
 840					struct ieee80211_vif *vif)
 841{
 842	const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa;
 843	bool low_latency;
 844
 845	if (iwl_mvm_is_cdb_supported(mvm))
 846		low_latency = iwl_mvm_low_latency_band(mvm, NL80211_BAND_5GHZ);
 847	else
 848		low_latency = iwl_mvm_low_latency(mvm);
 849
 850	/* We can only use EBS if:
 851	 *	1. the feature is supported;
 852	 *	2. the last EBS was successful;
 853	 *	3. if only single scan, the single scan EBS API is supported;
 854	 *	4. it's not a p2p find operation.
 855	 *	5. we are not in low latency mode,
 856	 *	   or if fragmented ebs is supported by the FW
 857	 */
 858	return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) &&
 859		mvm->last_ebs_successful && IWL_MVM_ENABLE_EBS &&
 860		vif->type != NL80211_IFTYPE_P2P_DEVICE &&
 861		(!low_latency || iwl_mvm_is_frag_ebs_supported(mvm)));
 862}
 863
 864static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params)
 865{
 866	return params->n_scan_plans == 1 &&
 867		params->scan_plans[0].iterations == 1;
 868}
 869
 870static bool iwl_mvm_is_scan_fragmented(enum iwl_mvm_scan_type type)
 871{
 872	return (type == IWL_SCAN_TYPE_FRAGMENTED ||
 873		type == IWL_SCAN_TYPE_FAST_BALANCE);
 874}
 875
 876static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
 877				   struct iwl_mvm_scan_params *params,
 878				   struct ieee80211_vif *vif)
 879{
 880	int flags = 0;
 881
 882	if (params->n_ssids == 0)
 883		flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
 884
 885	if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
 886		flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
 887
 888	if (iwl_mvm_is_scan_fragmented(params->type))
 889		flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
 890
 891	if (iwl_mvm_rrm_scan_needed(mvm) &&
 892	    fw_has_capa(&mvm->fw->ucode_capa,
 893			IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
 894		flags |= IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED;
 895
 896	if (params->pass_all)
 897		flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
 898	else
 899		flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
 900
 901#ifdef CONFIG_IWLWIFI_DEBUGFS
 902	if (mvm->scan_iter_notif_enabled)
 903		flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
 904#endif
 905
 906	if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
 907		flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
 908
 909	if (iwl_mvm_is_regular_scan(params) &&
 910	    vif->type != NL80211_IFTYPE_P2P_DEVICE &&
 911	    !iwl_mvm_is_scan_fragmented(params->type))
 912		flags |= IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL;
 913
 914	return flags;
 915}
 916
 917static void
 918iwl_mvm_scan_set_legacy_probe_req(struct iwl_scan_probe_req_v1 *p_req,
 919				  struct iwl_scan_probe_req *src_p_req)
 920{
 921	int i;
 922
 923	p_req->mac_header = src_p_req->mac_header;
 924	for (i = 0; i < SCAN_NUM_BAND_PROBE_DATA_V_1; i++)
 925		p_req->band_data[i] = src_p_req->band_data[i];
 926	p_req->common_data = src_p_req->common_data;
 927	memcpy(p_req->buf, src_p_req->buf, sizeof(p_req->buf));
 928}
 929
 930static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 931			     struct iwl_mvm_scan_params *params)
 932{
 933	struct iwl_scan_req_lmac *cmd = mvm->scan_cmd;
 934	struct iwl_scan_probe_req_v1 *preq =
 935		(void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
 936			 mvm->fw->ucode_capa.n_scan_channels);
 937	u32 ssid_bitmap = 0;
 938	int i;
 939	u8 band;
 
 
 
 940
 941	if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
 942		return -EINVAL;
 943
 944	iwl_mvm_scan_lmac_dwell(mvm, cmd, params);
 945
 946	cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
 947	cmd->iter_num = cpu_to_le32(1);
 948	cmd->n_channels = (u8)params->n_channels;
 949
 950	cmd->delay = cpu_to_le32(params->delay);
 951
 952	cmd->scan_flags = cpu_to_le32(iwl_mvm_scan_lmac_flags(mvm, params,
 953							      vif));
 954
 955	band = iwl_mvm_phy_band_from_nl80211(params->channels[0]->band);
 956	cmd->flags = cpu_to_le32(band);
 957	cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
 958					MAC_FILTER_IN_BEACON);
 959	iwl_mvm_scan_fill_tx_cmd(mvm, cmd->tx_cmd, params->no_cck);
 960	iwl_scan_build_ssids(params, cmd->direct_scan, &ssid_bitmap);
 961
 962	/* this API uses bits 1-20 instead of 0-19 */
 963	ssid_bitmap <<= 1;
 964
 965	for (i = 0; i < params->n_scan_plans; i++) {
 966		struct cfg80211_sched_scan_plan *scan_plan =
 967			&params->scan_plans[i];
 968
 969		cmd->schedule[i].delay =
 970			cpu_to_le16(scan_plan->interval);
 971		cmd->schedule[i].iterations = scan_plan->iterations;
 972		cmd->schedule[i].full_scan_mul = 1;
 973	}
 974
 975	/*
 976	 * If the number of iterations of the last scan plan is set to
 977	 * zero, it should run infinitely. However, this is not always the case.
 978	 * For example, when regular scan is requested the driver sets one scan
 979	 * plan with one iteration.
 980	 */
 981	if (!cmd->schedule[i - 1].iterations)
 982		cmd->schedule[i - 1].iterations = 0xff;
 983
 984	if (iwl_mvm_scan_use_ebs(mvm, vif)) {
 985		cmd->channel_opt[0].flags =
 986			cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
 987				    IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
 988				    IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
 989		cmd->channel_opt[0].non_ebs_ratio =
 990			cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
 991		cmd->channel_opt[1].flags =
 992			cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
 993				    IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
 994				    IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
 995		cmd->channel_opt[1].non_ebs_ratio =
 996			cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
 997	}
 998
 999	iwl_mvm_lmac_scan_cfg_channels(mvm, params->channels,
1000				       params->n_channels, ssid_bitmap, cmd);
1001
1002	iwl_mvm_scan_set_legacy_probe_req(preq, &params->preq);
1003
1004	return 0;
1005}
1006
1007static int rate_to_scan_rate_flag(unsigned int rate)
1008{
1009	static const int rate_to_scan_rate[IWL_RATE_COUNT] = {
1010		[IWL_RATE_1M_INDEX]	= SCAN_CONFIG_RATE_1M,
1011		[IWL_RATE_2M_INDEX]	= SCAN_CONFIG_RATE_2M,
1012		[IWL_RATE_5M_INDEX]	= SCAN_CONFIG_RATE_5M,
1013		[IWL_RATE_11M_INDEX]	= SCAN_CONFIG_RATE_11M,
1014		[IWL_RATE_6M_INDEX]	= SCAN_CONFIG_RATE_6M,
1015		[IWL_RATE_9M_INDEX]	= SCAN_CONFIG_RATE_9M,
1016		[IWL_RATE_12M_INDEX]	= SCAN_CONFIG_RATE_12M,
1017		[IWL_RATE_18M_INDEX]	= SCAN_CONFIG_RATE_18M,
1018		[IWL_RATE_24M_INDEX]	= SCAN_CONFIG_RATE_24M,
1019		[IWL_RATE_36M_INDEX]	= SCAN_CONFIG_RATE_36M,
1020		[IWL_RATE_48M_INDEX]	= SCAN_CONFIG_RATE_48M,
1021		[IWL_RATE_54M_INDEX]	= SCAN_CONFIG_RATE_54M,
1022	};
1023
1024	return rate_to_scan_rate[rate];
1025}
1026
1027static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm)
1028{
1029	struct ieee80211_supported_band *band;
1030	unsigned int rates = 0;
1031	int i;
1032
1033	band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
1034	for (i = 0; i < band->n_bitrates; i++)
1035		rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
1036	band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
1037	for (i = 0; i < band->n_bitrates; i++)
1038		rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
1039
1040	/* Set both basic rates and supported rates */
1041	rates |= SCAN_CONFIG_SUPPORTED_RATE(rates);
1042
1043	return cpu_to_le32(rates);
1044}
1045
1046static void iwl_mvm_fill_scan_dwell(struct iwl_mvm *mvm,
1047				    struct iwl_scan_dwell *dwell)
1048{
1049	dwell->active = IWL_SCAN_DWELL_ACTIVE;
1050	dwell->passive = IWL_SCAN_DWELL_PASSIVE;
1051	dwell->fragmented = IWL_SCAN_DWELL_FRAGMENTED;
1052	dwell->extended = IWL_SCAN_DWELL_EXTENDED;
1053}
1054
1055static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels,
1056				  u32 max_channels)
1057{
 
1058	struct ieee80211_supported_band *band;
1059	int i, j = 0;
1060
1061	band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
1062	for (i = 0; i < band->n_channels && j < max_channels; i++, j++)
1063		channels[j] = band->channels[i].hw_value;
1064	band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
1065	for (i = 0; i < band->n_channels && j < max_channels; i++, j++)
1066		channels[j] = band->channels[i].hw_value;
1067}
1068
1069static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
1070					u32 flags, u8 channel_flags,
1071					u32 max_channels)
1072{
1073	enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, NULL);
1074	struct iwl_scan_config_v1 *cfg = config;
1075
1076	cfg->flags = cpu_to_le32(flags);
1077	cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
1078	cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
1079	cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm);
1080	cfg->out_of_channel_time = cpu_to_le32(scan_timing[type].max_out_time);
1081	cfg->suspend_time = cpu_to_le32(scan_timing[type].suspend_time);
1082
1083	iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell);
1084
1085	memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
1086
1087	/* This function should not be called when using ADD_STA ver >=12 */
1088	WARN_ON_ONCE(iwl_mvm_has_new_station_api(mvm->fw));
1089
1090	cfg->bcast_sta_id = mvm->aux_sta.sta_id;
1091	cfg->channel_flags = channel_flags;
1092
1093	iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels);
1094}
1095
1096static void iwl_mvm_fill_scan_config_v2(struct iwl_mvm *mvm, void *config,
1097					u32 flags, u8 channel_flags,
1098					u32 max_channels)
1099{
1100	struct iwl_scan_config_v2 *cfg = config;
1101
1102	cfg->flags = cpu_to_le32(flags);
1103	cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
1104	cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
1105	cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm);
1106
1107	if (iwl_mvm_is_cdb_supported(mvm)) {
1108		enum iwl_mvm_scan_type lb_type, hb_type;
1109
1110		lb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
1111						     NL80211_BAND_2GHZ);
1112		hb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
1113						     NL80211_BAND_5GHZ);
1114
1115		cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
1116			cpu_to_le32(scan_timing[lb_type].max_out_time);
1117		cfg->suspend_time[SCAN_LB_LMAC_IDX] =
1118			cpu_to_le32(scan_timing[lb_type].suspend_time);
1119
1120		cfg->out_of_channel_time[SCAN_HB_LMAC_IDX] =
1121			cpu_to_le32(scan_timing[hb_type].max_out_time);
1122		cfg->suspend_time[SCAN_HB_LMAC_IDX] =
1123			cpu_to_le32(scan_timing[hb_type].suspend_time);
1124	} else {
1125		enum iwl_mvm_scan_type type =
1126			iwl_mvm_get_scan_type(mvm, NULL);
1127
1128		cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
1129			cpu_to_le32(scan_timing[type].max_out_time);
1130		cfg->suspend_time[SCAN_LB_LMAC_IDX] =
1131			cpu_to_le32(scan_timing[type].suspend_time);
1132	}
1133
1134	iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell);
1135
1136	memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
1137
1138	/* This function should not be called when using ADD_STA ver >=12 */
1139	WARN_ON_ONCE(iwl_mvm_has_new_station_api(mvm->fw));
1140
1141	cfg->bcast_sta_id = mvm->aux_sta.sta_id;
1142	cfg->channel_flags = channel_flags;
1143
1144	iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels);
1145}
1146
1147static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm)
1148{
1149	void *cfg;
1150	int ret, cmd_size;
1151	struct iwl_host_cmd cmd = {
1152		.id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_CFG_CMD),
1153	};
1154	enum iwl_mvm_scan_type type;
1155	enum iwl_mvm_scan_type hb_type = IWL_SCAN_TYPE_NOT_SET;
1156	int num_channels =
1157		mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels +
1158		mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels;
1159	u32 flags;
1160	u8 channel_flags;
 
 
 
1161
1162	if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
1163		num_channels = mvm->fw->ucode_capa.n_scan_channels;
1164
1165	if (iwl_mvm_is_cdb_supported(mvm)) {
1166		type = iwl_mvm_get_scan_type_band(mvm, NULL,
1167						  NL80211_BAND_2GHZ);
1168		hb_type = iwl_mvm_get_scan_type_band(mvm, NULL,
1169						     NL80211_BAND_5GHZ);
1170		if (type == mvm->scan_type && hb_type == mvm->hb_scan_type)
1171			return 0;
1172	} else {
1173		type = iwl_mvm_get_scan_type(mvm, NULL);
1174		if (type == mvm->scan_type)
1175			return 0;
1176	}
1177
1178	if (iwl_mvm_cdb_scan_api(mvm))
1179		cmd_size = sizeof(struct iwl_scan_config_v2);
1180	else
1181		cmd_size = sizeof(struct iwl_scan_config_v1);
1182	cmd_size += mvm->fw->ucode_capa.n_scan_channels;
1183
1184	cfg = kzalloc(cmd_size, GFP_KERNEL);
1185	if (!cfg)
1186		return -ENOMEM;
1187
1188	flags = SCAN_CONFIG_FLAG_ACTIVATE |
1189		 SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
1190		 SCAN_CONFIG_FLAG_SET_TX_CHAINS |
1191		 SCAN_CONFIG_FLAG_SET_RX_CHAINS |
1192		 SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
1193		 SCAN_CONFIG_FLAG_SET_ALL_TIMES |
1194		 SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
1195		 SCAN_CONFIG_FLAG_SET_MAC_ADDR |
1196		 SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS |
1197		 SCAN_CONFIG_N_CHANNELS(num_channels) |
1198		 (iwl_mvm_is_scan_fragmented(type) ?
1199		  SCAN_CONFIG_FLAG_SET_FRAGMENTED :
1200		  SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
1201
1202	channel_flags = IWL_CHANNEL_FLAG_EBS |
1203			IWL_CHANNEL_FLAG_ACCURATE_EBS |
1204			IWL_CHANNEL_FLAG_EBS_ADD |
1205			IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
 
 
 
 
 
 
 
 
 
 
 
 
 
1206
1207	/*
1208	 * Check for fragmented scan on LMAC2 - high band.
1209	 * LMAC1 - low band is checked above.
1210	 */
1211	if (iwl_mvm_cdb_scan_api(mvm)) {
1212		if (iwl_mvm_is_cdb_supported(mvm))
1213			flags |= (iwl_mvm_is_scan_fragmented(hb_type)) ?
1214				 SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
1215				 SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
1216		iwl_mvm_fill_scan_config_v2(mvm, cfg, flags, channel_flags,
1217					    num_channels);
1218	} else {
1219		iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags,
1220					    num_channels);
1221	}
1222
1223	cmd.data[0] = cfg;
1224	cmd.len[0] = cmd_size;
1225	cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
1226
1227	IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
1228
1229	ret = iwl_mvm_send_cmd(mvm, &cmd);
1230	if (!ret) {
1231		mvm->scan_type = type;
1232		mvm->hb_scan_type = hb_type;
1233	}
1234
1235	kfree(cfg);
1236	return ret;
1237}
1238
1239int iwl_mvm_config_scan(struct iwl_mvm *mvm)
1240{
1241	struct iwl_scan_config cfg;
1242	struct iwl_host_cmd cmd = {
1243		.id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_CFG_CMD),
1244		.len[0] = sizeof(cfg),
1245		.data[0] = &cfg,
1246		.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
1247	};
1248
1249	if (!iwl_mvm_is_reduced_config_scan_supported(mvm))
1250		return iwl_mvm_legacy_config_scan(mvm);
1251
1252	memset(&cfg, 0, sizeof(cfg));
1253
1254	if (!iwl_mvm_has_new_station_api(mvm->fw)) {
1255		cfg.bcast_sta_id = mvm->aux_sta.sta_id;
1256	} else if (iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_CFG_CMD, 0) < 5) {
1257		/*
1258		 * Fw doesn't use this sta anymore. Deprecated on SCAN_CFG_CMD
1259		 * version 5.
1260		 */
1261		cfg.bcast_sta_id = 0xff;
1262	}
1263
1264	cfg.tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
1265	cfg.rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
1266
1267	IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
1268
1269	return iwl_mvm_send_cmd(mvm, &cmd);
1270}
1271
1272static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status)
1273{
1274	int i;
1275
1276	for (i = 0; i < mvm->max_scans; i++)
1277		if (mvm->scan_uid_status[i] == status)
1278			return i;
1279
1280	return -ENOENT;
1281}
1282
1283static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
1284				    struct iwl_scan_req_umac *cmd,
1285				    struct iwl_mvm_scan_params *params)
1286{
1287	struct iwl_mvm_scan_timing_params *timing, *hb_timing;
1288	u8 active_dwell, passive_dwell;
1289
1290	timing = &scan_timing[params->type];
1291	active_dwell = IWL_SCAN_DWELL_ACTIVE;
1292	passive_dwell = IWL_SCAN_DWELL_PASSIVE;
1293
1294	if (iwl_mvm_is_adaptive_dwell_supported(mvm)) {
1295		cmd->v7.adwell_default_n_aps_social =
1296			IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
1297		cmd->v7.adwell_default_n_aps =
1298			IWL_SCAN_ADWELL_DEFAULT_LB_N_APS;
1299
1300		if (iwl_mvm_is_adwell_hb_ap_num_supported(mvm))
1301			cmd->v9.adwell_default_hb_n_aps =
1302				IWL_SCAN_ADWELL_DEFAULT_HB_N_APS;
1303
1304		/* if custom max budget was configured with debugfs */
1305		if (IWL_MVM_ADWELL_MAX_BUDGET)
1306			cmd->v7.adwell_max_budget =
1307				cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
1308		else if (params->ssids && params->ssids[0].ssid_len)
1309			cmd->v7.adwell_max_budget =
1310				cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
1311		else
1312			cmd->v7.adwell_max_budget =
1313				cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
1314
1315		cmd->v7.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
1316		cmd->v7.max_out_time[SCAN_LB_LMAC_IDX] =
1317			cpu_to_le32(timing->max_out_time);
1318		cmd->v7.suspend_time[SCAN_LB_LMAC_IDX] =
1319			cpu_to_le32(timing->suspend_time);
1320
1321		if (iwl_mvm_is_cdb_supported(mvm)) {
1322			hb_timing = &scan_timing[params->hb_type];
1323
1324			cmd->v7.max_out_time[SCAN_HB_LMAC_IDX] =
1325				cpu_to_le32(hb_timing->max_out_time);
1326			cmd->v7.suspend_time[SCAN_HB_LMAC_IDX] =
1327				cpu_to_le32(hb_timing->suspend_time);
1328		}
1329
1330		if (!iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) {
1331			cmd->v7.active_dwell = active_dwell;
1332			cmd->v7.passive_dwell = passive_dwell;
1333			cmd->v7.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
1334		} else {
1335			cmd->v8.active_dwell[SCAN_LB_LMAC_IDX] = active_dwell;
1336			cmd->v8.passive_dwell[SCAN_LB_LMAC_IDX] = passive_dwell;
1337			if (iwl_mvm_is_cdb_supported(mvm)) {
1338				cmd->v8.active_dwell[SCAN_HB_LMAC_IDX] =
1339					active_dwell;
1340				cmd->v8.passive_dwell[SCAN_HB_LMAC_IDX] =
1341					passive_dwell;
1342			}
1343		}
1344	} else {
1345		cmd->v1.extended_dwell = IWL_SCAN_DWELL_EXTENDED;
1346		cmd->v1.active_dwell = active_dwell;
1347		cmd->v1.passive_dwell = passive_dwell;
1348		cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
1349
1350		if (iwl_mvm_is_cdb_supported(mvm)) {
1351			hb_timing = &scan_timing[params->hb_type];
1352
1353			cmd->v6.max_out_time[SCAN_HB_LMAC_IDX] =
1354					cpu_to_le32(hb_timing->max_out_time);
1355			cmd->v6.suspend_time[SCAN_HB_LMAC_IDX] =
1356					cpu_to_le32(hb_timing->suspend_time);
1357		}
1358
1359		if (iwl_mvm_cdb_scan_api(mvm)) {
1360			cmd->v6.scan_priority =
1361				cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
1362			cmd->v6.max_out_time[SCAN_LB_LMAC_IDX] =
1363				cpu_to_le32(timing->max_out_time);
1364			cmd->v6.suspend_time[SCAN_LB_LMAC_IDX] =
1365				cpu_to_le32(timing->suspend_time);
1366		} else {
1367			cmd->v1.scan_priority =
1368				cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
1369			cmd->v1.max_out_time =
1370				cpu_to_le32(timing->max_out_time);
1371			cmd->v1.suspend_time =
1372				cpu_to_le32(timing->suspend_time);
1373		}
1374	}
 
 
 
 
1375
1376	if (iwl_mvm_is_regular_scan(params))
1377		cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
1378	else
1379		cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
1380}
1381
1382static u32 iwl_mvm_scan_umac_ooc_priority(struct iwl_mvm_scan_params *params)
1383{
1384	return iwl_mvm_is_regular_scan(params) ?
1385		IWL_SCAN_PRIORITY_EXT_6 :
1386		IWL_SCAN_PRIORITY_EXT_2;
1387}
1388
1389static void
1390iwl_mvm_scan_umac_dwell_v11(struct iwl_mvm *mvm,
1391			    struct iwl_scan_general_params_v11 *general_params,
1392			    struct iwl_mvm_scan_params *params)
1393{
1394	struct iwl_mvm_scan_timing_params *timing, *hb_timing;
1395	u8 active_dwell, passive_dwell;
1396
1397	timing = &scan_timing[params->type];
1398	active_dwell = IWL_SCAN_DWELL_ACTIVE;
1399	passive_dwell = IWL_SCAN_DWELL_PASSIVE;
1400
1401	general_params->adwell_default_social_chn =
1402		IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
1403	general_params->adwell_default_2g = IWL_SCAN_ADWELL_DEFAULT_LB_N_APS;
1404	general_params->adwell_default_5g = IWL_SCAN_ADWELL_DEFAULT_HB_N_APS;
1405
1406	/* if custom max budget was configured with debugfs */
1407	if (IWL_MVM_ADWELL_MAX_BUDGET)
1408		general_params->adwell_max_budget =
1409			cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
1410	else if (params->ssids && params->ssids[0].ssid_len)
1411		general_params->adwell_max_budget =
1412			cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
1413	else
1414		general_params->adwell_max_budget =
1415			cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
1416
1417	general_params->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
1418	general_params->max_out_of_time[SCAN_LB_LMAC_IDX] =
1419		cpu_to_le32(timing->max_out_time);
1420	general_params->suspend_time[SCAN_LB_LMAC_IDX] =
1421		cpu_to_le32(timing->suspend_time);
1422
1423	hb_timing = &scan_timing[params->hb_type];
1424
1425	general_params->max_out_of_time[SCAN_HB_LMAC_IDX] =
1426		cpu_to_le32(hb_timing->max_out_time);
1427	general_params->suspend_time[SCAN_HB_LMAC_IDX] =
1428		cpu_to_le32(hb_timing->suspend_time);
1429
1430	general_params->active_dwell[SCAN_LB_LMAC_IDX] = active_dwell;
1431	general_params->passive_dwell[SCAN_LB_LMAC_IDX] = passive_dwell;
1432	general_params->active_dwell[SCAN_HB_LMAC_IDX] = active_dwell;
1433	general_params->passive_dwell[SCAN_HB_LMAC_IDX] = passive_dwell;
1434}
1435
1436struct iwl_mvm_scan_channel_segment {
1437	u8 start_idx;
1438	u8 end_idx;
1439	u8 first_channel_id;
1440	u8 last_channel_id;
1441	u8 channel_spacing_shift;
1442	u8 band;
1443};
1444
1445static const struct iwl_mvm_scan_channel_segment scan_channel_segments[] = {
1446	{
1447		.start_idx = 0,
1448		.end_idx = 13,
1449		.first_channel_id = 1,
1450		.last_channel_id = 14,
1451		.channel_spacing_shift = 0,
1452		.band = PHY_BAND_24
1453	},
1454	{
1455		.start_idx = 14,
1456		.end_idx = 41,
1457		.first_channel_id = 36,
1458		.last_channel_id = 144,
1459		.channel_spacing_shift = 2,
1460		.band = PHY_BAND_5
1461	},
1462	{
1463		.start_idx = 42,
1464		.end_idx = 50,
1465		.first_channel_id = 149,
1466		.last_channel_id = 181,
1467		.channel_spacing_shift = 2,
1468		.band = PHY_BAND_5
1469	},
1470	{
1471		.start_idx = 51,
1472		.end_idx = 111,
1473		.first_channel_id = 1,
1474		.last_channel_id = 241,
1475		.channel_spacing_shift = 2,
1476		.band = PHY_BAND_6
1477	},
1478};
1479
1480static int iwl_mvm_scan_ch_and_band_to_idx(u8 channel_id, u8 band)
1481{
1482	int i, index;
1483
1484	if (!channel_id)
1485		return -EINVAL;
1486
1487	for (i = 0; i < ARRAY_SIZE(scan_channel_segments); i++) {
1488		const struct iwl_mvm_scan_channel_segment *ch_segment =
1489			&scan_channel_segments[i];
1490		u32 ch_offset;
1491
1492		if (ch_segment->band != band ||
1493		    ch_segment->first_channel_id > channel_id ||
1494		    ch_segment->last_channel_id < channel_id)
1495			continue;
1496
1497		ch_offset = (channel_id - ch_segment->first_channel_id) >>
1498			ch_segment->channel_spacing_shift;
1499
1500		index = scan_channel_segments[i].start_idx + ch_offset;
1501		if (index < IWL_SCAN_NUM_CHANNELS)
1502			return index;
1503
1504		break;
1505	}
1506
1507	return -EINVAL;
1508}
1509
1510static const u8 p2p_go_friendly_chs[] = {
1511	36, 40, 44, 48, 149, 153, 157, 161, 165,
1512};
1513
1514static const u8 social_chs[] = {
1515	1, 6, 11
1516};
1517
1518static void iwl_mvm_scan_ch_add_n_aps_override(enum nl80211_iftype vif_type,
1519					       u8 ch_id, u8 band, u8 *ch_bitmap,
1520					       size_t bitmap_n_entries)
1521{
1522	int i;
1523
1524	if (vif_type != NL80211_IFTYPE_P2P_DEVICE)
1525		return;
1526
1527	for (i = 0; i < ARRAY_SIZE(p2p_go_friendly_chs); i++) {
1528		if (p2p_go_friendly_chs[i] == ch_id) {
1529			int ch_idx, bitmap_idx;
1530
1531			ch_idx = iwl_mvm_scan_ch_and_band_to_idx(ch_id, band);
1532			if (ch_idx < 0)
1533				return;
1534
1535			bitmap_idx = ch_idx / 8;
1536			if (bitmap_idx >= bitmap_n_entries)
1537				return;
1538
1539			ch_idx = ch_idx % 8;
1540			ch_bitmap[bitmap_idx] |= BIT(ch_idx);
1541
1542			return;
1543		}
1544	}
1545}
1546
1547static u32 iwl_mvm_scan_ch_n_aps_flag(enum nl80211_iftype vif_type, u8 ch_id)
1548{
1549	int i;
1550	u32 flags = 0;
1551
1552	if (vif_type != NL80211_IFTYPE_P2P_DEVICE)
1553		goto out;
1554
1555	for (i = 0; i < ARRAY_SIZE(p2p_go_friendly_chs); i++) {
1556		if (p2p_go_friendly_chs[i] == ch_id) {
1557			flags |= IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY_BIT;
1558			break;
1559		}
1560	}
1561
1562	if (flags)
1563		goto out;
1564
1565	for (i = 0; i < ARRAY_SIZE(social_chs); i++) {
1566		if (social_chs[i] == ch_id) {
1567			flags |= IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS_BIT;
1568			break;
1569		}
1570	}
1571
1572out:
1573	return flags;
1574}
1575
1576static void
1577iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
1578			       struct ieee80211_channel **channels,
1579			       int n_channels, u32 flags,
1580			       struct iwl_scan_channel_cfg_umac *channel_cfg)
1581{
1582	int i;
1583
1584	for (i = 0; i < n_channels; i++) {
1585		channel_cfg[i].flags = cpu_to_le32(flags);
1586		channel_cfg[i].v1.channel_num = channels[i]->hw_value;
1587		if (iwl_mvm_is_scan_ext_chan_supported(mvm)) {
1588			enum nl80211_band band = channels[i]->band;
1589
1590			channel_cfg[i].v2.band =
1591				iwl_mvm_phy_band_from_nl80211(band);
1592			channel_cfg[i].v2.iter_count = 1;
1593			channel_cfg[i].v2.iter_interval = 0;
1594		} else {
1595			channel_cfg[i].v1.iter_count = 1;
1596			channel_cfg[i].v1.iter_interval = 0;
1597		}
1598	}
1599}
1600
1601static void
1602iwl_mvm_umac_scan_cfg_channels_v4(struct iwl_mvm *mvm,
1603				  struct ieee80211_channel **channels,
1604				  struct iwl_scan_channel_params_v4 *cp,
1605				  int n_channels, u32 flags,
1606				  enum nl80211_iftype vif_type)
1607{
1608	u8 *bitmap = cp->adwell_ch_override_bitmap;
1609	size_t bitmap_n_entries = ARRAY_SIZE(cp->adwell_ch_override_bitmap);
1610	int i;
1611
1612	for (i = 0; i < n_channels; i++) {
1613		enum nl80211_band band = channels[i]->band;
1614		struct iwl_scan_channel_cfg_umac *cfg =
1615			&cp->channel_config[i];
1616
1617		cfg->flags = cpu_to_le32(flags);
1618		cfg->v2.channel_num = channels[i]->hw_value;
1619		cfg->v2.band = iwl_mvm_phy_band_from_nl80211(band);
1620		cfg->v2.iter_count = 1;
1621		cfg->v2.iter_interval = 0;
1622
1623		iwl_mvm_scan_ch_add_n_aps_override(vif_type,
1624						   cfg->v2.channel_num,
1625						   cfg->v2.band, bitmap,
1626						   bitmap_n_entries);
1627	}
1628}
1629
1630static void
1631iwl_mvm_umac_scan_cfg_channels_v7(struct iwl_mvm *mvm,
1632				  struct ieee80211_channel **channels,
1633				  struct iwl_scan_channel_params_v7 *cp,
1634				  int n_channels, u32 flags,
1635				  enum nl80211_iftype vif_type, u32 version)
1636{
 
1637	int i;
1638
1639	for (i = 0; i < n_channels; i++) {
1640		enum nl80211_band band = channels[i]->band;
1641		struct iwl_scan_channel_cfg_umac *cfg = &cp->channel_config[i];
1642		u32 n_aps_flag =
1643			iwl_mvm_scan_ch_n_aps_flag(vif_type,
1644						   channels[i]->hw_value);
1645		u8 iwl_band = iwl_mvm_phy_band_from_nl80211(band);
1646
1647		cfg->flags = cpu_to_le32(flags | n_aps_flag);
1648		cfg->v2.channel_num = channels[i]->hw_value;
1649		if (cfg80211_channel_is_psc(channels[i]))
1650			cfg->flags = 0;
1651		cfg->v2.iter_count = 1;
1652		cfg->v2.iter_interval = 0;
1653		if (version < 17)
1654			cfg->v2.band = iwl_band;
1655		else
1656			cfg->flags |= cpu_to_le32((iwl_band <<
1657						   IWL_CHAN_CFG_FLAGS_BAND_POS));
1658	}
1659}
1660
1661static void
1662iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm,
1663				    struct iwl_mvm_scan_params *params,
1664				     struct iwl_scan_probe_params_v4 *pp)
1665{
1666	int j, idex_s = 0, idex_b = 0;
1667	struct cfg80211_scan_6ghz_params *scan_6ghz_params =
1668		params->scan_6ghz_params;
1669	bool hidden_supported = fw_has_capa(&mvm->fw->ucode_capa,
1670					    IWL_UCODE_TLV_CAPA_HIDDEN_6GHZ_SCAN);
1671
1672	for (j = 0; j < params->n_ssids && idex_s < SCAN_SHORT_SSID_MAX_SIZE;
1673	     j++) {
1674		if (!params->ssids[j].ssid_len)
1675			continue;
1676
1677		pp->short_ssid[idex_s] =
1678			cpu_to_le32(~crc32_le(~0, params->ssids[j].ssid,
1679					      params->ssids[j].ssid_len));
1680
1681		if (hidden_supported) {
1682			pp->direct_scan[idex_s].id = WLAN_EID_SSID;
1683			pp->direct_scan[idex_s].len = params->ssids[j].ssid_len;
1684			memcpy(pp->direct_scan[idex_s].ssid, params->ssids[j].ssid,
1685			       params->ssids[j].ssid_len);
1686		}
1687		idex_s++;
1688	}
1689
1690	/*
1691	 * Populate the arrays of the short SSIDs and the BSSIDs using the 6GHz
1692	 * collocated parameters. This might not be optimal, as this processing
1693	 * does not (yet) correspond to the actual channels, so it is possible
1694	 * that some entries would be left out.
1695	 *
1696	 * TODO: improve this logic.
1697	 */
1698	for (j = 0; j < params->n_6ghz_params; j++) {
1699		int k;
1700
1701		/* First, try to place the short SSID */
1702		if (scan_6ghz_params[j].short_ssid_valid) {
1703			for (k = 0; k < idex_s; k++) {
1704				if (pp->short_ssid[k] ==
1705				    cpu_to_le32(scan_6ghz_params[j].short_ssid))
1706					break;
1707			}
1708
1709			if (k == idex_s && idex_s < SCAN_SHORT_SSID_MAX_SIZE) {
1710				pp->short_ssid[idex_s++] =
1711					cpu_to_le32(scan_6ghz_params[j].short_ssid);
1712			}
1713		}
1714
1715		/* try to place BSSID for the same entry */
1716		for (k = 0; k < idex_b; k++) {
1717			if (!memcmp(&pp->bssid_array[k],
1718				    scan_6ghz_params[j].bssid, ETH_ALEN))
1719				break;
1720		}
1721
1722		if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE) {
1723			memcpy(&pp->bssid_array[idex_b++],
1724			       scan_6ghz_params[j].bssid, ETH_ALEN);
1725		}
1726	}
1727
1728	pp->short_ssid_num = idex_s;
1729	pp->bssid_num = idex_b;
1730}
1731
1732/* TODO: this function can be merged with iwl_mvm_scan_umac_fill_ch_p_v7 */
1733static u32
1734iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
1735				     struct iwl_mvm_scan_params *params,
1736				     u32 n_channels,
1737				     struct iwl_scan_probe_params_v4 *pp,
1738				     struct iwl_scan_channel_params_v7 *cp,
1739				     enum nl80211_iftype vif_type,
1740				     u32 version)
1741{
1742	int i;
1743	struct cfg80211_scan_6ghz_params *scan_6ghz_params =
1744		params->scan_6ghz_params;
1745	u32 ch_cnt;
1746
1747	for (i = 0, ch_cnt = 0; i < params->n_channels; i++) {
1748		struct iwl_scan_channel_cfg_umac *cfg =
1749			&cp->channel_config[ch_cnt];
1750
1751		u32 s_ssid_bitmap = 0, bssid_bitmap = 0, flags = 0;
1752		u8 j, k, s_max = 0, b_max = 0, n_used_bssid_entries;
1753		bool force_passive, found = false, allow_passive = true,
1754		     unsolicited_probe_on_chan = false, psc_no_listen = false;
1755		s8 psd_20 = IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED;
1756
1757		/*
1758		 * Avoid performing passive scan on non PSC channels unless the
1759		 * scan is specifically a passive scan, i.e., no SSIDs
1760		 * configured in the scan command.
1761		 */
1762		if (!cfg80211_channel_is_psc(params->channels[i]) &&
1763		    !params->n_6ghz_params && params->n_ssids)
1764			continue;
1765
1766		cfg->v1.channel_num = params->channels[i]->hw_value;
1767		if (version < 17)
1768			cfg->v2.band = PHY_BAND_6;
1769		else
1770			cfg->flags |= cpu_to_le32(PHY_BAND_6 <<
1771						  IWL_CHAN_CFG_FLAGS_BAND_POS);
1772
1773		cfg->v5.iter_count = 1;
1774		cfg->v5.iter_interval = 0;
1775
1776		/*
1777		 * The optimize the scan time, i.e., reduce the scan dwell time
1778		 * on each channel, the below logic tries to set 3 direct BSSID
1779		 * probe requests for each broadcast probe request with a short
1780		 * SSID.
1781		 * TODO: improve this logic
1782		 */
1783		n_used_bssid_entries = 3;
1784		for (j = 0; j < params->n_6ghz_params; j++) {
1785			s8 tmp_psd_20;
1786
1787			if (!(scan_6ghz_params[j].channel_idx == i))
1788				continue;
1789
1790			/* Use the highest PSD value allowed as advertised by
1791			 * APs for this channel
1792			 */
1793			tmp_psd_20 = scan_6ghz_params[j].psd_20;
1794			if (tmp_psd_20 !=
1795			    IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED &&
1796			    (psd_20 ==
1797			     IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED ||
1798			     psd_20 < tmp_psd_20))
1799				psd_20 = tmp_psd_20;
1800
1801			found = false;
1802			unsolicited_probe_on_chan |=
1803				scan_6ghz_params[j].unsolicited_probe;
1804			psc_no_listen |= scan_6ghz_params[j].psc_no_listen;
1805
1806			for (k = 0; k < pp->short_ssid_num; k++) {
1807				if (!scan_6ghz_params[j].unsolicited_probe &&
1808				    le32_to_cpu(pp->short_ssid[k]) ==
1809				    scan_6ghz_params[j].short_ssid) {
1810					/* Relevant short SSID bit set */
1811					if (s_ssid_bitmap & BIT(k)) {
1812						found = true;
1813						break;
1814					}
1815
1816					/*
1817					 * Use short SSID only to create a new
1818					 * iteration during channel dwell or in
1819					 * case that the short SSID has a
1820					 * matching SSID, i.e., scan for hidden
1821					 * APs.
1822					 */
1823					if (n_used_bssid_entries >= 3) {
1824						s_ssid_bitmap |= BIT(k);
1825						s_max++;
1826						n_used_bssid_entries -= 3;
1827						found = true;
1828						break;
1829					} else if (pp->direct_scan[k].len) {
1830						s_ssid_bitmap |= BIT(k);
1831						s_max++;
1832						found = true;
1833						allow_passive = false;
1834						break;
1835					}
1836				}
1837			}
1838
1839			if (found)
1840				continue;
1841
1842			for (k = 0; k < pp->bssid_num; k++) {
1843				if (!memcmp(&pp->bssid_array[k],
1844					    scan_6ghz_params[j].bssid,
1845					    ETH_ALEN)) {
1846					if (!(bssid_bitmap & BIT(k))) {
1847						bssid_bitmap |= BIT(k);
1848						b_max++;
1849						n_used_bssid_entries++;
1850					}
1851					break;
1852				}
1853			}
1854		}
1855
1856		if (cfg80211_channel_is_psc(params->channels[i]) &&
1857		    psc_no_listen)
1858			flags |= IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN;
1859
1860		if (unsolicited_probe_on_chan)
1861			flags |= IWL_UHB_CHAN_CFG_FLAG_UNSOLICITED_PROBE_RES;
1862
1863		/*
1864		 * In the following cases apply passive scan:
1865		 * 1. Non fragmented scan:
1866		 *	- PSC channel with NO_LISTEN_FLAG on should be treated
1867		 *	  like non PSC channel
1868		 *	- Non PSC channel with more than 3 short SSIDs or more
1869		 *	  than 9 BSSIDs.
1870		 *	- Non PSC Channel with unsolicited probe response and
1871		 *	  more than 2 short SSIDs or more than 6 BSSIDs.
1872		 *	- PSC channel with more than 2 short SSIDs or more than
1873		 *	  6 BSSIDs.
1874		 * 3. Fragmented scan:
1875		 *	- PSC channel with more than 1 SSID or 3 BSSIDs.
1876		 *	- Non PSC channel with more than 2 SSIDs or 6 BSSIDs.
1877		 *	- Non PSC channel with unsolicited probe response and
1878		 *	  more than 1 SSID or more than 3 BSSIDs.
1879		 */
1880		if (!iwl_mvm_is_scan_fragmented(params->type)) {
1881			if (!cfg80211_channel_is_psc(params->channels[i]) ||
1882			    flags & IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN) {
1883				force_passive = (s_max > 3 || b_max > 9);
1884				force_passive |= (unsolicited_probe_on_chan &&
1885						  (s_max > 2 || b_max > 6));
1886			} else {
1887				force_passive = (s_max > 2 || b_max > 6);
1888			}
1889		} else if (cfg80211_channel_is_psc(params->channels[i])) {
1890			force_passive = (s_max > 1 || b_max > 3);
1891		} else {
1892			force_passive = (s_max > 2 || b_max > 6);
1893			force_passive |= (unsolicited_probe_on_chan &&
1894					  (s_max > 1 || b_max > 3));
1895		}
1896		if ((allow_passive && force_passive) ||
1897		    (!(bssid_bitmap | s_ssid_bitmap) &&
1898		     !cfg80211_channel_is_psc(params->channels[i])))
1899			flags |= IWL_UHB_CHAN_CFG_FLAG_FORCE_PASSIVE;
1900		else
1901			flags |= bssid_bitmap | (s_ssid_bitmap << 16);
1902
1903		cfg->flags |= cpu_to_le32(flags);
1904		if (version >= 17)
1905			cfg->v5.psd_20 = psd_20;
1906
1907		ch_cnt++;
1908	}
1909
1910	if (params->n_channels > ch_cnt)
1911		IWL_DEBUG_SCAN(mvm,
1912			       "6GHz: reducing number channels: (%u->%u)\n",
1913			       params->n_channels, ch_cnt);
1914
1915	return ch_cnt;
1916}
1917
1918static u8 iwl_mvm_scan_umac_chan_flags_v2(struct iwl_mvm *mvm,
1919					  struct iwl_mvm_scan_params *params,
1920					  struct ieee80211_vif *vif)
1921{
1922	u8 flags = 0;
1923
1924	flags |= IWL_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER;
1925
1926	if (iwl_mvm_scan_use_ebs(mvm, vif))
1927		flags |= IWL_SCAN_CHANNEL_FLAG_EBS |
1928			IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1929			IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
1930
1931	/* set fragmented ebs for fragmented scan on HB channels */
1932	if ((!iwl_mvm_is_cdb_supported(mvm) &&
1933	     iwl_mvm_is_scan_fragmented(params->type)) ||
1934	    (iwl_mvm_is_cdb_supported(mvm) &&
1935	     iwl_mvm_is_scan_fragmented(params->hb_type)))
1936		flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG;
1937
1938	/*
1939	 * force EBS in case the scan is a fragmented and there is a need to take P2P
1940	 * GO operation into consideration during scan operation.
1941	 */
1942	if ((!iwl_mvm_is_cdb_supported(mvm) &&
1943	     iwl_mvm_is_scan_fragmented(params->type) && params->respect_p2p_go) ||
1944	    (iwl_mvm_is_cdb_supported(mvm) &&
1945	     iwl_mvm_is_scan_fragmented(params->hb_type) &&
1946	     params->respect_p2p_go_hb)) {
1947		IWL_DEBUG_SCAN(mvm, "Respect P2P GO. Force EBS\n");
1948		flags |= IWL_SCAN_CHANNEL_FLAG_FORCE_EBS;
1949	}
1950
1951	return flags;
1952}
1953
1954static void iwl_mvm_scan_6ghz_passive_scan(struct iwl_mvm *mvm,
1955					   struct iwl_mvm_scan_params *params,
1956					   struct ieee80211_vif *vif)
1957{
1958	struct ieee80211_supported_band *sband =
1959		&mvm->nvm_data->bands[NL80211_BAND_6GHZ];
1960	u32 n_disabled, i;
1961
1962	params->enable_6ghz_passive = false;
1963
1964	if (params->scan_6ghz)
1965		return;
1966
1967	if (!fw_has_capa(&mvm->fw->ucode_capa,
1968			 IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN)) {
1969		IWL_DEBUG_SCAN(mvm,
1970			       "6GHz passive scan: Not supported by FW\n");
1971		return;
1972	}
1973
1974	/* 6GHz passive scan allowed only on station interface  */
1975	if (vif->type != NL80211_IFTYPE_STATION) {
1976		IWL_DEBUG_SCAN(mvm,
1977			       "6GHz passive scan: not station interface\n");
1978		return;
1979	}
1980
1981	/*
1982	 * 6GHz passive scan is allowed in a defined time interval following HW
1983	 * reset or resume flow, or while not associated and a large interval
1984	 * has passed since the last 6GHz passive scan.
1985	 */
1986	if ((vif->cfg.assoc ||
1987	     time_after(mvm->last_6ghz_passive_scan_jiffies +
1988			(IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT * HZ), jiffies)) &&
1989	    (time_before(mvm->last_reset_or_resume_time_jiffies +
1990			 (IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT * HZ),
1991			 jiffies))) {
1992		IWL_DEBUG_SCAN(mvm, "6GHz passive scan: %s\n",
1993			       vif->cfg.assoc ? "associated" :
1994			       "timeout did not expire");
1995		return;
1996	}
1997
1998	/* not enough channels in the regular scan request */
1999	if (params->n_channels < IWL_MVM_6GHZ_PASSIVE_SCAN_MIN_CHANS) {
2000		IWL_DEBUG_SCAN(mvm,
2001			       "6GHz passive scan: not enough channels\n");
2002		return;
2003	}
2004
2005	for (i = 0; i < params->n_ssids; i++) {
2006		if (!params->ssids[i].ssid_len)
2007			break;
2008	}
2009
2010	/* not a wildcard scan, so cannot enable passive 6GHz scan */
2011	if (i == params->n_ssids) {
2012		IWL_DEBUG_SCAN(mvm,
2013			       "6GHz passive scan: no wildcard SSID\n");
2014		return;
2015	}
2016
2017	if (!sband || !sband->n_channels) {
2018		IWL_DEBUG_SCAN(mvm,
2019			       "6GHz passive scan: no 6GHz channels\n");
2020		return;
2021	}
2022
2023	for (i = 0, n_disabled = 0; i < sband->n_channels; i++) {
2024		if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED))
2025			n_disabled++;
2026	}
2027
2028	/*
2029	 * Not all the 6GHz channels are disabled, so no need for 6GHz passive
2030	 * scan
2031	 */
2032	if (n_disabled != sband->n_channels) {
2033		IWL_DEBUG_SCAN(mvm,
2034			       "6GHz passive scan: 6GHz channels enabled\n");
2035		return;
2036	}
2037
2038	/* all conditions to enable 6ghz passive scan are satisfied */
2039	IWL_DEBUG_SCAN(mvm, "6GHz passive scan: can be enabled\n");
2040	params->enable_6ghz_passive = true;
2041}
2042
2043static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm,
2044				      struct iwl_mvm_scan_params *params,
2045				      struct ieee80211_vif *vif,
2046				      int type)
2047{
2048	u16 flags = 0;
2049
2050	/*
2051	 * If no direct SSIDs are provided perform a passive scan. Otherwise,
2052	 * if there is a single SSID which is not the broadcast SSID, assume
2053	 * that the scan is intended for roaming purposes and thus enable Rx on
2054	 * all chains to improve chances of hearing the beacons/probe responses.
2055	 */
2056	if (params->n_ssids == 0)
2057		flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE;
2058	else if (params->n_ssids == 1 && params->ssids[0].ssid_len)
2059		flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_USE_ALL_RX_CHAINS;
2060
2061	if (iwl_mvm_is_scan_fragmented(params->type))
2062		flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1;
2063
2064	if (iwl_mvm_is_scan_fragmented(params->hb_type))
2065		flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2;
2066
2067	if (params->pass_all)
2068		flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL;
2069	else
2070		flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_MATCH;
2071
2072	if (!iwl_mvm_is_regular_scan(params))
2073		flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC;
2074
2075	if (params->iter_notif ||
2076	    mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
2077		flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
2078
2079	if (IWL_MVM_ADWELL_ENABLE)
2080		flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL;
2081
2082	if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT)
2083		flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PREEMPTIVE;
2084
2085	if ((type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT) &&
2086	    params->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ)
2087		flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN;
2088
2089	if (params->enable_6ghz_passive)
2090		flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_6GHZ_PASSIVE_SCAN;
2091
2092	if (iwl_mvm_is_oce_supported(mvm) &&
2093	    (params->flags & (NL80211_SCAN_FLAG_ACCEPT_BCAST_PROBE_RESP |
2094			      NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE |
2095			      NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME)))
2096		flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_OCE;
2097
2098	return flags;
2099}
2100
2101static u8 iwl_mvm_scan_umac_flags2(struct iwl_mvm *mvm,
2102				   struct iwl_mvm_scan_params *params,
2103				   struct ieee80211_vif *vif, int type)
2104{
2105	u8 flags = 0;
2106
2107	if (iwl_mvm_is_cdb_supported(mvm)) {
2108		if (params->respect_p2p_go)
2109			flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB;
2110		if (params->respect_p2p_go_hb)
2111			flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB;
2112	} else {
2113		if (params->respect_p2p_go)
2114			flags = IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_LB |
2115				IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_RESPECT_P2P_GO_HB;
2116	}
2117
2118	if (params->scan_6ghz &&
2119	    fw_has_capa(&mvm->fw->ucode_capa,
2120			IWL_UCODE_TLV_CAPA_SCAN_DONT_TOGGLE_ANT))
2121		flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_DONT_TOGGLE_ANT;
2122
2123	return flags;
2124}
2125
2126static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
2127				   struct iwl_mvm_scan_params *params,
2128				   struct ieee80211_vif *vif)
2129{
2130	u16 flags = 0;
2131
2132	if (params->n_ssids == 0)
2133		flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE;
2134
2135	if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
2136		flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
2137
2138	if (iwl_mvm_is_scan_fragmented(params->type))
2139		flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
2140
2141	if (iwl_mvm_is_cdb_supported(mvm) &&
2142	    iwl_mvm_is_scan_fragmented(params->hb_type))
2143		flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED;
2144
2145	if (iwl_mvm_rrm_scan_needed(mvm) &&
2146	    fw_has_capa(&mvm->fw->ucode_capa,
2147			IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
2148		flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
2149
2150	if (params->pass_all)
2151		flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
2152	else
2153		flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH;
2154
2155	if (!iwl_mvm_is_regular_scan(params))
2156		flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
2157
2158	if (params->iter_notif)
2159		flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
2160
2161#ifdef CONFIG_IWLWIFI_DEBUGFS
2162	if (mvm->scan_iter_notif_enabled)
2163		flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
2164#endif
2165
2166	if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
2167		flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
2168
2169	if (iwl_mvm_is_adaptive_dwell_supported(mvm) && IWL_MVM_ADWELL_ENABLE)
2170		flags |= IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL;
2171
2172	/*
2173	 * Extended dwell is relevant only for low band to start with, as it is
2174	 * being used for social channles only (1, 6, 11), so we can check
2175	 * only scan type on low band also for CDB.
2176	 */
2177	if (iwl_mvm_is_regular_scan(params) &&
2178	    vif->type != NL80211_IFTYPE_P2P_DEVICE &&
2179	    !iwl_mvm_is_scan_fragmented(params->type) &&
2180	    !iwl_mvm_is_adaptive_dwell_supported(mvm) &&
2181	    !iwl_mvm_is_oce_supported(mvm))
2182		flags |= IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL;
2183
2184	if (iwl_mvm_is_oce_supported(mvm)) {
2185		if ((params->flags &
2186		     NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE))
2187			flags |= IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_HIGH_TX_RATE;
2188		/* Since IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL and
2189		 * NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION shares
2190		 * the same bit, we need to make sure that we use this bit here
2191		 * only when IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL cannot be
2192		 * used. */
2193		if ((params->flags &
2194		     NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION) &&
2195		     !WARN_ON_ONCE(!iwl_mvm_is_adaptive_dwell_supported(mvm)))
2196			flags |= IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_DEFER_SUPP;
2197		if ((params->flags & NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME))
2198			flags |= IWL_UMAC_SCAN_GEN_FLAGS_MAX_CHNL_TIME;
2199	}
2200
2201	return flags;
2202}
2203
2204static int
2205iwl_mvm_fill_scan_sched_params(struct iwl_mvm_scan_params *params,
2206			       struct iwl_scan_umac_schedule *schedule,
2207			       __le16 *delay)
2208{
2209	int i;
2210	if (WARN_ON(!params->n_scan_plans ||
2211		    params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
2212		return -EINVAL;
2213
2214	for (i = 0; i < params->n_scan_plans; i++) {
2215		struct cfg80211_sched_scan_plan *scan_plan =
2216			&params->scan_plans[i];
2217
2218		schedule[i].iter_count = scan_plan->iterations;
2219		schedule[i].interval =
2220			cpu_to_le16(scan_plan->interval);
2221	}
2222
2223	/*
2224	 * If the number of iterations of the last scan plan is set to
2225	 * zero, it should run infinitely. However, this is not always the case.
2226	 * For example, when regular scan is requested the driver sets one scan
2227	 * plan with one iteration.
2228	 */
2229	if (!schedule[params->n_scan_plans - 1].iter_count)
2230		schedule[params->n_scan_plans - 1].iter_count = 0xff;
2231
2232	*delay = cpu_to_le16(params->delay);
2233
2234	return 0;
2235}
2236
2237static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2238			     struct iwl_mvm_scan_params *params,
2239			     int type, int uid)
2240{
2241	struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
2242	struct iwl_scan_umac_chan_param *chan_param;
2243	void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm);
2244	void *sec_part = (u8 *)cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) *
2245		mvm->fw->ucode_capa.n_scan_channels;
2246	struct iwl_scan_req_umac_tail_v2 *tail_v2 =
2247		(struct iwl_scan_req_umac_tail_v2 *)sec_part;
2248	struct iwl_scan_req_umac_tail_v1 *tail_v1;
2249	struct iwl_ssid_ie *direct_scan;
2250	int ret = 0;
2251	u32 ssid_bitmap = 0;
2252	u8 channel_flags = 0;
2253	u16 gen_flags;
2254	struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif);
2255
2256	chan_param = iwl_mvm_get_scan_req_umac_channel(mvm);
 
 
 
 
 
 
 
 
 
2257
2258	iwl_mvm_scan_umac_dwell(mvm, cmd, params);
2259
2260	mvm->scan_uid_status[uid] = type;
2261
2262	cmd->uid = cpu_to_le32(uid);
2263	gen_flags = iwl_mvm_scan_umac_flags(mvm, params, vif);
2264	cmd->general_flags = cpu_to_le16(gen_flags);
2265	if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) {
2266		if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED)
2267			cmd->v8.num_of_fragments[SCAN_LB_LMAC_IDX] =
2268							IWL_SCAN_NUM_OF_FRAGS;
2269		if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED)
2270			cmd->v8.num_of_fragments[SCAN_HB_LMAC_IDX] =
2271							IWL_SCAN_NUM_OF_FRAGS;
2272
2273		cmd->v8.general_flags2 =
2274			IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER;
2275	}
2276
2277	cmd->scan_start_mac_id = scan_vif->id;
2278
2279	if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT)
2280		cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
2281
2282	if (iwl_mvm_scan_use_ebs(mvm, vif)) {
2283		channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
2284				IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
2285				IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
2286
2287		/* set fragmented ebs for fragmented scan on HB channels */
2288		if (iwl_mvm_is_frag_ebs_supported(mvm)) {
2289			if (gen_flags &
2290			    IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED ||
2291			    (!iwl_mvm_is_cdb_supported(mvm) &&
2292			     gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED))
2293				channel_flags |= IWL_SCAN_CHANNEL_FLAG_EBS_FRAG;
2294		}
2295	}
2296
2297	chan_param->flags = channel_flags;
2298	chan_param->count = params->n_channels;
2299
2300	ret = iwl_mvm_fill_scan_sched_params(params, tail_v2->schedule,
2301					     &tail_v2->delay);
2302	if (ret) {
2303		mvm->scan_uid_status[uid] = 0;
2304		return ret;
2305	}
2306
2307	if (iwl_mvm_is_scan_ext_chan_supported(mvm)) {
2308		tail_v2->preq = params->preq;
2309		direct_scan = tail_v2->direct_scan;
2310	} else {
2311		tail_v1 = (struct iwl_scan_req_umac_tail_v1 *)sec_part;
2312		iwl_mvm_scan_set_legacy_probe_req(&tail_v1->preq,
2313						  &params->preq);
2314		direct_scan = tail_v1->direct_scan;
2315	}
2316	iwl_scan_build_ssids(params, direct_scan, &ssid_bitmap);
2317	iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
2318				       params->n_channels, ssid_bitmap,
2319				       cmd_data);
2320	return 0;
2321}
2322
2323static void
2324iwl_mvm_scan_umac_fill_general_p_v12(struct iwl_mvm *mvm,
2325				     struct iwl_mvm_scan_params *params,
2326				     struct ieee80211_vif *vif,
2327				     struct iwl_scan_general_params_v11 *gp,
2328				     u16 gen_flags, u8 gen_flags2,
2329				     u32 version)
2330{
2331	struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif);
2332
2333	iwl_mvm_scan_umac_dwell_v11(mvm, gp, params);
2334
2335	IWL_DEBUG_SCAN(mvm, "General: flags=0x%x, flags2=0x%x\n",
2336		       gen_flags, gen_flags2);
2337
2338	gp->flags = cpu_to_le16(gen_flags);
2339	gp->flags2 = gen_flags2;
2340
2341	if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
2342		gp->num_of_fragments[SCAN_LB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
2343	if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
2344		gp->num_of_fragments[SCAN_HB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
2345
2346	mvm->scan_link_id = 0;
2347
2348	if (version < 16) {
2349		gp->scan_start_mac_or_link_id = scan_vif->id;
2350	} else {
2351		struct iwl_mvm_vif_link_info *link_info =
2352			scan_vif->link[params->tsf_report_link_id];
2353
2354		mvm->scan_link_id = params->tsf_report_link_id;
2355		if (!WARN_ON(!link_info))
2356			gp->scan_start_mac_or_link_id = link_info->fw_link_id;
2357	}
2358}
2359
2360static void
2361iwl_mvm_scan_umac_fill_probe_p_v3(struct iwl_mvm_scan_params *params,
2362				  struct iwl_scan_probe_params_v3 *pp)
2363{
2364	pp->preq = params->preq;
2365	pp->ssid_num = params->n_ssids;
2366	iwl_scan_build_ssids(params, pp->direct_scan, NULL);
2367}
2368
2369static void
2370iwl_mvm_scan_umac_fill_probe_p_v4(struct iwl_mvm_scan_params *params,
2371				  struct iwl_scan_probe_params_v4 *pp,
2372				  u32 *bitmap_ssid)
2373{
2374	pp->preq = params->preq;
2375	iwl_scan_build_ssids(params, pp->direct_scan, bitmap_ssid);
2376}
2377
2378static void
2379iwl_mvm_scan_umac_fill_ch_p_v4(struct iwl_mvm *mvm,
2380			       struct iwl_mvm_scan_params *params,
2381			       struct ieee80211_vif *vif,
2382			       struct iwl_scan_channel_params_v4 *cp,
2383			       u32 channel_cfg_flags)
2384{
2385	cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
2386	cp->count = params->n_channels;
2387	cp->num_of_aps_override = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
2388
2389	iwl_mvm_umac_scan_cfg_channels_v4(mvm, params->channels, cp,
2390					  params->n_channels,
2391					  channel_cfg_flags,
2392					  vif->type);
2393}
2394
2395static void
2396iwl_mvm_scan_umac_fill_ch_p_v7(struct iwl_mvm *mvm,
2397			       struct iwl_mvm_scan_params *params,
2398			       struct ieee80211_vif *vif,
2399			       struct iwl_scan_channel_params_v7 *cp,
2400			       u32 channel_cfg_flags,
2401			       u32 version)
2402{
2403	cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
2404	cp->count = params->n_channels;
2405	cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
2406	cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS;
2407
2408	iwl_mvm_umac_scan_cfg_channels_v7(mvm, params->channels, cp,
2409					  params->n_channels,
2410					  channel_cfg_flags,
2411					  vif->type, version);
2412
2413	if (params->enable_6ghz_passive) {
2414		struct ieee80211_supported_band *sband =
2415			&mvm->nvm_data->bands[NL80211_BAND_6GHZ];
2416		u32 i;
2417
2418		for (i = 0; i < sband->n_channels; i++) {
2419			struct ieee80211_channel *channel =
2420				&sband->channels[i];
2421
2422			struct iwl_scan_channel_cfg_umac *cfg =
2423				&cp->channel_config[cp->count];
2424
2425			if (!cfg80211_channel_is_psc(channel))
2426				continue;
2427
2428			cfg->v5.channel_num = channel->hw_value;
2429			cfg->v5.iter_count = 1;
2430			cfg->v5.iter_interval = 0;
2431
2432			if (version < 17) {
2433				cfg->flags = 0;
2434				cfg->v2.band = PHY_BAND_6;
2435			} else {
2436				cfg->flags = cpu_to_le32(PHY_BAND_6 <<
2437							 IWL_CHAN_CFG_FLAGS_BAND_POS);
2438				cfg->v5.psd_20 =
2439					IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED;
2440			}
2441			cp->count++;
2442		}
2443	}
2444}
2445
2446static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2447				 struct iwl_mvm_scan_params *params, int type,
2448				 int uid)
2449{
2450	struct iwl_scan_req_umac_v12 *cmd = mvm->scan_cmd;
2451	struct iwl_scan_req_params_v12 *scan_p = &cmd->scan_params;
2452	int ret;
2453	u16 gen_flags;
2454
2455	mvm->scan_uid_status[uid] = type;
2456
2457	cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
2458	cmd->uid = cpu_to_le32(uid);
2459
2460	gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
2461	iwl_mvm_scan_umac_fill_general_p_v12(mvm, params, vif,
2462					     &scan_p->general_params,
2463					     gen_flags, 0, 12);
2464
2465	ret = iwl_mvm_fill_scan_sched_params(params,
2466					     scan_p->periodic_params.schedule,
2467					     &scan_p->periodic_params.delay);
2468	if (ret)
2469		return ret;
2470
2471	iwl_mvm_scan_umac_fill_probe_p_v3(params, &scan_p->probe_params);
2472	iwl_mvm_scan_umac_fill_ch_p_v4(mvm, params, vif,
2473				       &scan_p->channel_params, 0);
2474
2475	return 0;
2476}
2477
2478static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm,
2479					   struct ieee80211_vif *vif,
2480					   struct iwl_mvm_scan_params *params,
2481					   int type, int uid, u32 version)
2482{
2483	struct iwl_scan_req_umac_v17 *cmd = mvm->scan_cmd;
2484	struct iwl_scan_req_params_v17 *scan_p = &cmd->scan_params;
2485	struct iwl_scan_channel_params_v7 *cp = &scan_p->channel_params;
2486	struct iwl_scan_probe_params_v4 *pb = &scan_p->probe_params;
2487	int ret;
2488	u16 gen_flags;
2489	u8 gen_flags2;
2490	u32 bitmap_ssid = 0;
2491
2492	mvm->scan_uid_status[uid] = type;
2493
2494	cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
2495	cmd->uid = cpu_to_le32(uid);
2496
2497	gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
2498
2499	if (version >= 15)
2500		gen_flags2 = iwl_mvm_scan_umac_flags2(mvm, params, vif, type);
2501	else
2502		gen_flags2 = 0;
2503
2504	iwl_mvm_scan_umac_fill_general_p_v12(mvm, params, vif,
2505					     &scan_p->general_params,
2506					     gen_flags, gen_flags2, version);
2507
2508	ret = iwl_mvm_fill_scan_sched_params(params,
2509					     scan_p->periodic_params.schedule,
2510					     &scan_p->periodic_params.delay);
2511	if (ret)
2512		return ret;
2513
2514	if (!params->scan_6ghz) {
2515		iwl_mvm_scan_umac_fill_probe_p_v4(params,
2516						  &scan_p->probe_params,
2517						  &bitmap_ssid);
2518		iwl_mvm_scan_umac_fill_ch_p_v7(mvm, params, vif,
2519					       &scan_p->channel_params,
2520					       bitmap_ssid,
2521					       version);
2522		return 0;
2523	} else {
2524		pb->preq = params->preq;
2525	}
2526
2527	cp->flags = iwl_mvm_scan_umac_chan_flags_v2(mvm, params, vif);
2528	cp->n_aps_override[0] = IWL_SCAN_ADWELL_N_APS_GO_FRIENDLY;
2529	cp->n_aps_override[1] = IWL_SCAN_ADWELL_N_APS_SOCIAL_CHS;
2530
2531	iwl_mvm_umac_scan_fill_6g_chan_list(mvm, params, pb);
2532
2533	cp->count = iwl_mvm_umac_scan_cfg_channels_v7_6g(mvm, params,
2534							 params->n_channels,
2535							 pb, cp, vif->type,
2536							 version);
2537	if (!cp->count) {
2538		mvm->scan_uid_status[uid] = 0;
2539		return -EINVAL;
2540	}
2541
2542	if (!params->n_ssids ||
2543	    (params->n_ssids == 1 && !params->ssids[0].ssid_len))
2544		cp->flags |= IWL_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER;
2545
2546	return 0;
2547}
2548
2549static int iwl_mvm_scan_umac_v14(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2550				 struct iwl_mvm_scan_params *params, int type,
2551				 int uid)
2552{
2553	return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 14);
2554}
2555
2556static int iwl_mvm_scan_umac_v15(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2557				 struct iwl_mvm_scan_params *params, int type,
2558				 int uid)
2559{
2560	return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 15);
2561}
2562
2563static int iwl_mvm_scan_umac_v16(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2564				 struct iwl_mvm_scan_params *params, int type,
2565				 int uid)
2566{
2567	return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 16);
2568}
2569
2570static int iwl_mvm_scan_umac_v17(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2571				 struct iwl_mvm_scan_params *params, int type,
2572				 int uid)
2573{
2574	return iwl_mvm_scan_umac_v14_and_above(mvm, vif, params, type, uid, 17);
2575}
2576
2577static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
2578{
2579	return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK);
2580}
2581
2582static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
2583{
2584	bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
2585					 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
2586
2587	/* This looks a bit arbitrary, but the idea is that if we run
2588	 * out of possible simultaneous scans and the userspace is
2589	 * trying to run a scan type that is already running, we
2590	 * return -EBUSY.  But if the userspace wants to start a
2591	 * different type of scan, we stop the opposite type to make
2592	 * space for the new request.  The reason is backwards
2593	 * compatibility with old wpa_supplicant that wouldn't stop a
2594	 * scheduled scan before starting a normal scan.
2595	 */
2596
2597	/* FW supports only a single periodic scan */
2598	if ((type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT) &&
2599	    mvm->scan_status & (IWL_MVM_SCAN_SCHED | IWL_MVM_SCAN_NETDETECT))
2600		return -EBUSY;
2601
2602	if (iwl_mvm_num_scans(mvm) < mvm->max_scans)
2603		return 0;
2604
2605	/* Use a switch, even though this is a bitmask, so that more
2606	 * than one bits set will fall in default and we will warn.
2607	 */
2608	switch (type) {
2609	case IWL_MVM_SCAN_REGULAR:
2610		if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
2611			return -EBUSY;
2612		return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
2613	case IWL_MVM_SCAN_SCHED:
2614		if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
2615			return -EBUSY;
2616		return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
2617	case IWL_MVM_SCAN_NETDETECT:
2618		/* For non-unified images, there's no need to stop
2619		 * anything for net-detect since the firmware is
2620		 * restarted anyway.  This way, any sched scans that
2621		 * were running will be restarted when we resume.
2622		 */
2623		if (!unified_image)
2624			return 0;
2625
2626		/* If this is a unified image and we ran out of scans,
2627		 * we need to stop something.  Prefer stopping regular
2628		 * scans, because the results are useless at this
2629		 * point, and we should be able to keep running
2630		 * another scheduled scan while suspended.
2631		 */
2632		if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
2633			return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR,
2634						 true);
2635		if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
2636			return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED,
2637						 true);
2638		/* Something is wrong if no scan was running but we
2639		 * ran out of scans.
 
2640		 */
2641		fallthrough;
2642	default:
2643		WARN_ON(1);
2644		break;
2645	}
2646
2647	return -EIO;
2648}
2649
2650#define SCAN_TIMEOUT 30000
2651
2652void iwl_mvm_scan_timeout_wk(struct work_struct *work)
2653{
2654	struct delayed_work *delayed_work = to_delayed_work(work);
2655	struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm,
2656					   scan_timeout_dwork);
2657
2658	IWL_ERR(mvm, "regular scan timed out\n");
2659
2660	iwl_force_nmi(mvm->trans);
2661}
2662
2663static void iwl_mvm_fill_scan_type(struct iwl_mvm *mvm,
2664				   struct iwl_mvm_scan_params *params,
2665				   struct ieee80211_vif *vif)
2666{
2667	if (iwl_mvm_is_cdb_supported(mvm)) {
2668		params->type =
2669			iwl_mvm_get_scan_type_band(mvm, vif,
2670						   NL80211_BAND_2GHZ);
2671		params->hb_type =
2672			iwl_mvm_get_scan_type_band(mvm, vif,
2673						   NL80211_BAND_5GHZ);
2674	} else {
2675		params->type = iwl_mvm_get_scan_type(mvm, vif);
2676	}
2677}
2678
2679struct iwl_scan_umac_handler {
2680	u8 version;
2681	int (*handler)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2682		       struct iwl_mvm_scan_params *params, int type, int uid);
2683};
2684
2685#define IWL_SCAN_UMAC_HANDLER(_ver) {		\
2686	.version = _ver,			\
2687	.handler = iwl_mvm_scan_umac_v##_ver,	\
2688}
2689
2690static const struct iwl_scan_umac_handler iwl_scan_umac_handlers[] = {
2691	/* set the newest version first to shorten the list traverse time */
2692	IWL_SCAN_UMAC_HANDLER(17),
2693	IWL_SCAN_UMAC_HANDLER(16),
2694	IWL_SCAN_UMAC_HANDLER(15),
2695	IWL_SCAN_UMAC_HANDLER(14),
2696	IWL_SCAN_UMAC_HANDLER(12),
2697};
2698
2699static void iwl_mvm_mei_scan_work(struct work_struct *wk)
2700{
2701	struct iwl_mei_scan_filter *scan_filter =
2702		container_of(wk, struct iwl_mei_scan_filter, scan_work);
2703	struct iwl_mvm *mvm =
2704		container_of(scan_filter, struct iwl_mvm, mei_scan_filter);
2705	struct iwl_mvm_csme_conn_info *info;
2706	struct sk_buff *skb;
2707	u8 bssid[ETH_ALEN];
2708
2709	mutex_lock(&mvm->mutex);
2710	info = iwl_mvm_get_csme_conn_info(mvm);
2711	memcpy(bssid, info->conn_info.bssid, ETH_ALEN);
2712	mutex_unlock(&mvm->mutex);
2713
2714	while ((skb = skb_dequeue(&scan_filter->scan_res))) {
2715		struct ieee80211_mgmt *mgmt = (void *)skb->data;
2716
2717		if (!memcmp(mgmt->bssid, bssid, ETH_ALEN))
2718			ieee80211_rx_irqsafe(mvm->hw, skb);
2719		else
2720			kfree_skb(skb);
2721	}
2722}
2723
2724void iwl_mvm_mei_scan_filter_init(struct iwl_mei_scan_filter *mei_scan_filter)
2725{
2726	skb_queue_head_init(&mei_scan_filter->scan_res);
2727	INIT_WORK(&mei_scan_filter->scan_work, iwl_mvm_mei_scan_work);
2728}
2729
2730/* In case CSME is connected and has link protection set, this function will
2731 * override the scan request to scan only the associated channel and only for
2732 * the associated SSID.
2733 */
2734static void iwl_mvm_mei_limited_scan(struct iwl_mvm *mvm,
2735				     struct iwl_mvm_scan_params *params)
2736{
2737	struct iwl_mvm_csme_conn_info *info = iwl_mvm_get_csme_conn_info(mvm);
2738	struct iwl_mei_conn_info *conn_info;
2739	struct ieee80211_channel *chan;
2740	int scan_iters, i;
2741
2742	if (!info) {
2743		IWL_DEBUG_SCAN(mvm, "mei_limited_scan: no connection info\n");
2744		return;
2745	}
2746
2747	conn_info = &info->conn_info;
2748	if (!info->conn_info.lp_state || !info->conn_info.ssid_len)
2749		return;
2750
2751	if (!params->n_channels || !params->n_ssids)
2752		return;
2753
2754	mvm->mei_scan_filter.is_mei_limited_scan = true;
2755
2756	chan = ieee80211_get_channel(mvm->hw->wiphy,
2757				     ieee80211_channel_to_frequency(conn_info->channel,
2758								    conn_info->band));
2759	if (!chan) {
2760		IWL_DEBUG_SCAN(mvm,
2761			       "Failed to get CSME channel (chan=%u band=%u)\n",
2762			       conn_info->channel, conn_info->band);
2763		return;
2764	}
2765
2766	/* The mei filtered scan must find the AP, otherwise CSME will
2767	 * take the NIC ownership. Add several iterations on the channel to
2768	 * make the scan more robust.
2769	 */
2770	scan_iters = min(IWL_MEI_SCAN_NUM_ITER, params->n_channels);
2771	params->n_channels = scan_iters;
2772	for (i = 0; i < scan_iters; i++)
2773		params->channels[i] = chan;
2774
2775	IWL_DEBUG_SCAN(mvm, "Mei scan: num iterations=%u\n", scan_iters);
2776
2777	params->n_ssids = 1;
2778	params->ssids[0].ssid_len = conn_info->ssid_len;
2779	memcpy(params->ssids[0].ssid, conn_info->ssid, conn_info->ssid_len);
2780}
2781
2782static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
2783				  struct ieee80211_vif *vif,
2784				  struct iwl_host_cmd *hcmd,
2785				  struct iwl_mvm_scan_params *params,
2786				  int type)
2787{
2788	int uid, i, err;
2789	u8 scan_ver;
2790
2791	lockdep_assert_held(&mvm->mutex);
2792	memset(mvm->scan_cmd, 0, mvm->scan_cmd_size);
2793
2794	iwl_mvm_mei_limited_scan(mvm, params);
2795
2796	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
2797		hcmd->id = SCAN_OFFLOAD_REQUEST_CMD;
2798
2799		return iwl_mvm_scan_lmac(mvm, vif, params);
2800	}
2801
2802	uid = iwl_mvm_scan_uid_by_status(mvm, 0);
2803	if (uid < 0)
2804		return uid;
2805
2806	hcmd->id = WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_REQ_UMAC);
2807
2808	scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC,
2809					 IWL_FW_CMD_VER_UNKNOWN);
2810
2811	for (i = 0; i < ARRAY_SIZE(iwl_scan_umac_handlers); i++) {
2812		const struct iwl_scan_umac_handler *ver_handler =
2813			&iwl_scan_umac_handlers[i];
2814
2815		if (ver_handler->version != scan_ver)
2816			continue;
2817
2818		return ver_handler->handler(mvm, vif, params, type, uid);
2819	}
2820
2821	err = iwl_mvm_scan_umac(mvm, vif, params, type, uid);
2822	if (err)
2823		return err;
2824
2825	return uid;
2826}
2827
2828struct iwl_mvm_scan_respect_p2p_go_iter_data {
2829	struct ieee80211_vif *current_vif;
2830	bool p2p_go;
2831	enum nl80211_band band;
2832};
2833
2834static void iwl_mvm_scan_respect_p2p_go_iter(void *_data, u8 *mac,
2835					     struct ieee80211_vif *vif)
2836{
2837	struct iwl_mvm_scan_respect_p2p_go_iter_data *data = _data;
2838	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2839
2840	/* exclude the given vif */
2841	if (vif == data->current_vif)
2842		return;
2843
2844	if (vif->type == NL80211_IFTYPE_AP && vif->p2p) {
2845		u32 link_id;
2846
2847		for (link_id = 0;
2848		     link_id < ARRAY_SIZE(mvmvif->link);
2849		     link_id++) {
2850			struct iwl_mvm_vif_link_info *link =
2851				mvmvif->link[link_id];
2852
2853			if (link && link->phy_ctxt->id < NUM_PHY_CTX &&
2854			    (data->band == NUM_NL80211_BANDS ||
2855			     link->phy_ctxt->channel->band == data->band)) {
2856				data->p2p_go = true;
2857				break;
2858			}
2859		}
2860	}
2861}
2862
2863static bool _iwl_mvm_get_respect_p2p_go(struct iwl_mvm *mvm,
2864					struct ieee80211_vif *vif,
2865					bool low_latency,
2866					enum nl80211_band band)
2867{
2868	struct iwl_mvm_scan_respect_p2p_go_iter_data data = {
2869		.current_vif = vif,
2870		.p2p_go = false,
2871		.band = band,
2872	};
2873
2874	if (!low_latency)
2875		return false;
2876
2877	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
2878						   IEEE80211_IFACE_ITER_NORMAL,
2879						   iwl_mvm_scan_respect_p2p_go_iter,
2880						   &data);
2881
2882	return data.p2p_go;
2883}
2884
2885static bool iwl_mvm_get_respect_p2p_go_band(struct iwl_mvm *mvm,
2886					    struct ieee80211_vif *vif,
2887					    enum nl80211_band band)
2888{
2889	bool low_latency = iwl_mvm_low_latency_band(mvm, band);
2890
2891	return _iwl_mvm_get_respect_p2p_go(mvm, vif, low_latency, band);
2892}
2893
2894static bool iwl_mvm_get_respect_p2p_go(struct iwl_mvm *mvm,
2895				       struct ieee80211_vif *vif)
2896{
2897	bool low_latency = iwl_mvm_low_latency(mvm);
2898
2899	return _iwl_mvm_get_respect_p2p_go(mvm, vif, low_latency,
2900					   NUM_NL80211_BANDS);
2901}
2902
2903static void iwl_mvm_fill_respect_p2p_go(struct iwl_mvm *mvm,
2904					struct iwl_mvm_scan_params *params,
2905					struct ieee80211_vif *vif)
2906{
2907	if (iwl_mvm_is_cdb_supported(mvm)) {
2908		params->respect_p2p_go =
2909			iwl_mvm_get_respect_p2p_go_band(mvm, vif,
2910							NL80211_BAND_2GHZ);
2911		params->respect_p2p_go_hb =
2912			iwl_mvm_get_respect_p2p_go_band(mvm, vif,
2913							NL80211_BAND_5GHZ);
2914	} else {
2915		params->respect_p2p_go = iwl_mvm_get_respect_p2p_go(mvm, vif);
2916	}
2917}
2918
2919int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2920			   struct cfg80211_scan_request *req,
2921			   struct ieee80211_scan_ies *ies)
2922{
2923	struct iwl_host_cmd hcmd = {
2924		.len = { iwl_mvm_scan_size(mvm), },
2925		.data = { mvm->scan_cmd, },
2926		.dataflags = { IWL_HCMD_DFL_NOCOPY, },
2927	};
2928	struct iwl_mvm_scan_params params = {};
2929	int ret, uid;
2930	struct cfg80211_sched_scan_plan scan_plan = { .iterations = 1 };
2931
2932	lockdep_assert_held(&mvm->mutex);
2933
2934	if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
2935		IWL_ERR(mvm, "scan while LAR regdomain is not set\n");
2936		return -EBUSY;
2937	}
2938
2939	ret = iwl_mvm_check_running_scans(mvm, IWL_MVM_SCAN_REGULAR);
2940	if (ret)
2941		return ret;
2942
2943	/* we should have failed registration if scan_cmd was NULL */
2944	if (WARN_ON(!mvm->scan_cmd))
2945		return -ENOMEM;
2946
2947	if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels))
2948		return -ENOBUFS;
2949
2950	params.n_ssids = req->n_ssids;
2951	params.flags = req->flags;
2952	params.n_channels = req->n_channels;
2953	params.delay = 0;
2954	params.ssids = req->ssids;
2955	params.channels = req->channels;
2956	params.mac_addr = req->mac_addr;
2957	params.mac_addr_mask = req->mac_addr_mask;
2958	params.no_cck = req->no_cck;
2959	params.pass_all = true;
2960	params.n_match_sets = 0;
2961	params.match_sets = NULL;
2962	ether_addr_copy(params.bssid, req->bssid);
2963
2964	params.scan_plans = &scan_plan;
2965	params.n_scan_plans = 1;
2966
2967	params.n_6ghz_params = req->n_6ghz_params;
2968	params.scan_6ghz_params = req->scan_6ghz_params;
2969	params.scan_6ghz = req->scan_6ghz;
2970	iwl_mvm_fill_scan_type(mvm, &params, vif);
2971	iwl_mvm_fill_respect_p2p_go(mvm, &params, vif);
2972
2973	if (req->duration)
2974		params.iter_notif = true;
2975
2976	params.tsf_report_link_id = req->tsf_report_link_id;
2977	if (params.tsf_report_link_id < 0) {
2978		if (vif->active_links)
2979			params.tsf_report_link_id = __ffs(vif->active_links);
2980		else
2981			params.tsf_report_link_id = 0;
2982	}
2983
2984	iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
 
 
2985
2986	iwl_mvm_scan_6ghz_passive_scan(mvm, &params, vif);
2987
2988	uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, &params,
2989				     IWL_MVM_SCAN_REGULAR);
2990
2991	if (uid < 0)
2992		return uid;
 
 
 
 
 
 
2993
2994	iwl_mvm_pause_tcm(mvm, false);
 
2995
2996	ret = iwl_mvm_send_cmd(mvm, &hcmd);
2997	if (ret) {
2998		/* If the scan failed, it usually means that the FW was unable
2999		 * to allocate the time events. Warn on it, but maybe we
3000		 * should try to send the command again with different params.
3001		 */
3002		IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
3003		iwl_mvm_resume_tcm(mvm);
3004		mvm->scan_uid_status[uid] = 0;
3005		return ret;
3006	}
3007
3008	IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
3009	mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
3010	mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif);
 
3011
3012	if (params.enable_6ghz_passive)
3013		mvm->last_6ghz_passive_scan_jiffies = jiffies;
3014
3015	schedule_delayed_work(&mvm->scan_timeout_dwork,
3016			      msecs_to_jiffies(SCAN_TIMEOUT));
3017
3018	return 0;
3019}
3020
3021int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
3022			     struct ieee80211_vif *vif,
3023			     struct cfg80211_sched_scan_request *req,
3024			     struct ieee80211_scan_ies *ies,
3025			     int type)
3026{
3027	struct iwl_host_cmd hcmd = {
3028		.len = { iwl_mvm_scan_size(mvm), },
3029		.data = { mvm->scan_cmd, },
3030		.dataflags = { IWL_HCMD_DFL_NOCOPY, },
3031	};
3032	struct iwl_mvm_scan_params params = {};
3033	int ret, uid;
3034	int i, j;
3035	bool non_psc_included = false;
3036
3037	lockdep_assert_held(&mvm->mutex);
3038
3039	if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
3040		IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n");
3041		return -EBUSY;
3042	}
3043
3044	ret = iwl_mvm_check_running_scans(mvm, type);
3045	if (ret)
3046		return ret;
3047
3048	/* we should have failed registration if scan_cmd was NULL */
3049	if (WARN_ON(!mvm->scan_cmd))
3050		return -ENOMEM;
3051
 
 
3052
3053	params.n_ssids = req->n_ssids;
3054	params.flags = req->flags;
3055	params.n_channels = req->n_channels;
3056	params.ssids = req->ssids;
3057	params.channels = req->channels;
3058	params.mac_addr = req->mac_addr;
3059	params.mac_addr_mask = req->mac_addr_mask;
3060	params.no_cck = false;
3061	params.pass_all =  iwl_mvm_scan_pass_all(mvm, req);
3062	params.n_match_sets = req->n_match_sets;
3063	params.match_sets = req->match_sets;
3064	eth_broadcast_addr(params.bssid);
3065	if (!req->n_scan_plans)
3066		return -EINVAL;
3067
3068	params.n_scan_plans = req->n_scan_plans;
3069	params.scan_plans = req->scan_plans;
3070
3071	iwl_mvm_fill_scan_type(mvm, &params, vif);
3072	iwl_mvm_fill_respect_p2p_go(mvm, &params, vif);
 
3073
3074	/* In theory, LMAC scans can handle a 32-bit delay, but since
3075	 * waiting for over 18 hours to start the scan is a bit silly
3076	 * and to keep it aligned with UMAC scans (which only support
3077	 * 16-bit delays), trim it down to 16-bits.
3078	 */
3079	if (req->delay > U16_MAX) {
3080		IWL_DEBUG_SCAN(mvm,
3081			       "delay value is > 16-bits, set to max possible\n");
3082		params.delay = U16_MAX;
3083	} else {
3084		params.delay = req->delay;
3085	}
3086
3087	ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
3088	if (ret)
3089		return ret;
3090
3091	iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
3092
3093	/* for 6 GHZ band only PSC channels need to be added */
3094	for (i = 0; i < params.n_channels; i++) {
3095		struct ieee80211_channel *channel = params.channels[i];
3096
3097		if (channel->band == NL80211_BAND_6GHZ &&
3098		    !cfg80211_channel_is_psc(channel)) {
3099			non_psc_included = true;
3100			break;
3101		}
3102	}
3103
3104	if (non_psc_included) {
3105		params.channels = kmemdup(params.channels,
3106					  sizeof(params.channels[0]) *
3107					  params.n_channels,
3108					  GFP_KERNEL);
3109		if (!params.channels)
3110			return -ENOMEM;
3111
3112		for (i = j = 0; i < params.n_channels; i++) {
3113			if (params.channels[i]->band == NL80211_BAND_6GHZ &&
3114			    !cfg80211_channel_is_psc(params.channels[i]))
3115				continue;
3116			params.channels[j++] = params.channels[i];
3117		}
3118		params.n_channels = j;
3119	}
3120
3121	if (non_psc_included &&
3122	    !iwl_mvm_scan_fits(mvm, req->n_ssids, ies, params.n_channels)) {
3123		kfree(params.channels);
3124		return -ENOBUFS;
3125	}
3126
3127	uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, &params, type);
3128
3129	if (non_psc_included)
3130		kfree(params.channels);
3131	if (uid < 0)
3132		return uid;
3133
3134	ret = iwl_mvm_send_cmd(mvm, &hcmd);
3135	if (!ret) {
3136		IWL_DEBUG_SCAN(mvm,
3137			       "Sched scan request was sent successfully\n");
3138		mvm->scan_status |= type;
3139	} else {
3140		/* If the scan failed, it usually means that the FW was unable
3141		 * to allocate the time events. Warn on it, but maybe we
3142		 * should try to send the command again with different params.
3143		 */
3144		IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
3145		mvm->scan_uid_status[uid] = 0;
3146		mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
3147	}
3148
3149	return ret;
3150}
3151
3152void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
3153					 struct iwl_rx_cmd_buffer *rxb)
3154{
3155	struct iwl_rx_packet *pkt = rxb_addr(rxb);
3156	struct iwl_umac_scan_complete *notif = (void *)pkt->data;
3157	u32 uid = __le32_to_cpu(notif->uid);
3158	bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
3159
3160	mvm->mei_scan_filter.is_mei_limited_scan = false;
3161
3162	if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status)))
3163		return;
3164
3165	/* if the scan is already stopping, we don't need to notify mac80211 */
3166	if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
3167		struct cfg80211_scan_info info = {
3168			.aborted = aborted,
3169			.scan_start_tsf = mvm->scan_start,
3170		};
3171		struct iwl_mvm_vif *scan_vif = mvm->scan_vif;
3172		struct iwl_mvm_vif_link_info *link_info =
3173			scan_vif->link[mvm->scan_link_id];
3174
3175		if (!WARN_ON(!link_info))
3176			memcpy(info.tsf_bssid, link_info->bssid, ETH_ALEN);
3177
 
3178		ieee80211_scan_completed(mvm->hw, &info);
3179		mvm->scan_vif = NULL;
 
3180		cancel_delayed_work(&mvm->scan_timeout_dwork);
3181		iwl_mvm_resume_tcm(mvm);
3182	} else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
3183		ieee80211_sched_scan_stopped(mvm->hw);
3184		mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
3185	}
3186
3187	mvm->scan_status &= ~mvm->scan_uid_status[uid];
3188	IWL_DEBUG_SCAN(mvm,
3189		       "Scan completed, uid %u type %u, status %s, EBS status %s\n",
3190		       uid, mvm->scan_uid_status[uid],
3191		       notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
3192				"completed" : "aborted",
3193		       iwl_mvm_ebs_status_str(notif->ebs_status));
3194	IWL_DEBUG_SCAN(mvm,
3195		       "Last line %d, Last iteration %d, Time from last iteration %d\n",
3196		       notif->last_schedule, notif->last_iter,
3197		       __le32_to_cpu(notif->time_from_last_iter));
3198
3199	if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
3200	    notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
3201		mvm->last_ebs_successful = false;
3202
3203	mvm->scan_uid_status[uid] = 0;
3204}
3205
3206void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
3207					      struct iwl_rx_cmd_buffer *rxb)
3208{
3209	struct iwl_rx_packet *pkt = rxb_addr(rxb);
3210	struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
 
3211
3212	mvm->scan_start = le64_to_cpu(notif->start_tsf);
3213
3214	IWL_DEBUG_SCAN(mvm,
3215		       "UMAC Scan iteration complete: status=0x%x scanned_channels=%d\n",
3216		       notif->status, notif->scanned_channels);
 
 
 
3217
3218	if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) {
3219		IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n");
3220		ieee80211_sched_scan_results(mvm->hw);
3221		mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
3222	}
3223
3224	IWL_DEBUG_SCAN(mvm,
3225		       "UMAC Scan iteration complete: scan started at %llu (TSF)\n",
3226		       mvm->scan_start);
3227}
3228
3229static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
3230{
3231	struct iwl_umac_scan_abort cmd = {};
3232	int uid, ret;
3233
3234	lockdep_assert_held(&mvm->mutex);
3235
3236	/* We should always get a valid index here, because we already
3237	 * checked that this type of scan was running in the generic
3238	 * code.
3239	 */
3240	uid = iwl_mvm_scan_uid_by_status(mvm, type);
3241	if (WARN_ON_ONCE(uid < 0))
3242		return uid;
3243
3244	cmd.uid = cpu_to_le32(uid);
3245
3246	IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
3247
3248	ret = iwl_mvm_send_cmd_pdu(mvm,
3249				   WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC),
 
3250				   0, sizeof(cmd), &cmd);
3251	if (!ret)
3252		mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
3253
3254	return ret;
3255}
3256
3257static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
3258{
3259	struct iwl_notification_wait wait_scan_done;
3260	static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
3261					      SCAN_OFFLOAD_COMPLETE, };
3262	int ret;
3263
3264	lockdep_assert_held(&mvm->mutex);
3265
3266	iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
3267				   scan_done_notif,
3268				   ARRAY_SIZE(scan_done_notif),
3269				   NULL, NULL);
3270
3271	IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type);
3272
3273	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
3274		ret = iwl_mvm_umac_scan_abort(mvm, type);
3275	else
3276		ret = iwl_mvm_lmac_scan_abort(mvm);
3277
3278	if (ret) {
3279		IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type);
3280		iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
3281		return ret;
3282	}
3283
3284	return iwl_wait_notification(&mvm->notif_wait, &wait_scan_done,
3285				     1 * HZ);
3286}
3287
3288static size_t iwl_scan_req_umac_get_size(u8 scan_ver)
3289{
3290	switch (scan_ver) {
3291	case 12:
3292		return sizeof(struct iwl_scan_req_umac_v12);
3293	case 14:
3294	case 15:
3295	case 16:
3296	case 17:
3297		return sizeof(struct iwl_scan_req_umac_v17);
3298	}
3299
3300	return 0;
3301}
3302
3303size_t iwl_mvm_scan_size(struct iwl_mvm *mvm)
3304{
3305	int base_size, tail_size;
3306	u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC,
3307					    IWL_FW_CMD_VER_UNKNOWN);
3308
3309	base_size = iwl_scan_req_umac_get_size(scan_ver);
3310	if (base_size)
3311		return base_size;
3312
3313
3314	if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm))
3315		base_size = IWL_SCAN_REQ_UMAC_SIZE_V8;
3316	else if (iwl_mvm_is_adaptive_dwell_supported(mvm))
3317		base_size = IWL_SCAN_REQ_UMAC_SIZE_V7;
3318	else if (iwl_mvm_cdb_scan_api(mvm))
3319		base_size = IWL_SCAN_REQ_UMAC_SIZE_V6;
3320	else
3321		base_size = IWL_SCAN_REQ_UMAC_SIZE_V1;
3322
3323	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
3324		if (iwl_mvm_is_scan_ext_chan_supported(mvm))
3325			tail_size = sizeof(struct iwl_scan_req_umac_tail_v2);
3326		else
3327			tail_size = sizeof(struct iwl_scan_req_umac_tail_v1);
3328
3329		return base_size +
3330			sizeof(struct iwl_scan_channel_cfg_umac) *
3331				mvm->fw->ucode_capa.n_scan_channels +
3332			tail_size;
3333	}
3334	return sizeof(struct iwl_scan_req_lmac) +
3335		sizeof(struct iwl_scan_channel_cfg_lmac) *
3336		mvm->fw->ucode_capa.n_scan_channels +
3337		sizeof(struct iwl_scan_probe_req_v1);
3338}
3339
3340/*
3341 * This function is used in nic restart flow, to inform mac80211 about scans
3342 * that was aborted by restart flow or by an assert.
3343 */
3344void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
3345{
3346	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
3347		int uid, i;
3348
3349		uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_REGULAR);
3350		if (uid >= 0) {
3351			struct cfg80211_scan_info info = {
3352				.aborted = true,
3353			};
3354
3355			cancel_delayed_work(&mvm->scan_timeout_dwork);
3356
3357			ieee80211_scan_completed(mvm->hw, &info);
3358			mvm->scan_uid_status[uid] = 0;
3359		}
3360		uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED);
3361		if (uid >= 0) {
3362			/* Sched scan will be restarted by mac80211 in
3363			 * restart_hw, so do not report if FW is about to be
3364			 * restarted.
3365			 */
3366			if (!mvm->fw_restart)
3367				ieee80211_sched_scan_stopped(mvm->hw);
3368			mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
3369			mvm->scan_uid_status[uid] = 0;
3370		}
3371		uid = iwl_mvm_scan_uid_by_status(mvm,
3372						 IWL_MVM_SCAN_STOPPING_REGULAR);
3373		if (uid >= 0)
3374			mvm->scan_uid_status[uid] = 0;
3375
3376		uid = iwl_mvm_scan_uid_by_status(mvm,
3377						 IWL_MVM_SCAN_STOPPING_SCHED);
3378		if (uid >= 0)
3379			mvm->scan_uid_status[uid] = 0;
3380
3381		/* We shouldn't have any UIDs still set.  Loop over all the
3382		 * UIDs to make sure there's nothing left there and warn if
3383		 * any is found.
3384		 */
3385		for (i = 0; i < mvm->max_scans; i++) {
3386			if (WARN_ONCE(mvm->scan_uid_status[i],
3387				      "UMAC scan UID %d status was not cleaned\n",
3388				      i))
3389				mvm->scan_uid_status[i] = 0;
3390		}
3391	} else {
3392		if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
3393			struct cfg80211_scan_info info = {
3394				.aborted = true,
3395			};
3396
3397			cancel_delayed_work(&mvm->scan_timeout_dwork);
3398			ieee80211_scan_completed(mvm->hw, &info);
3399		}
3400
3401		/* Sched scan will be restarted by mac80211 in
3402		 * restart_hw, so do not report if FW is about to be
3403		 * restarted.
3404		 */
3405		if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) &&
3406		    !mvm->fw_restart) {
3407			ieee80211_sched_scan_stopped(mvm->hw);
3408			mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
3409		}
3410	}
3411}
3412
3413int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify)
3414{
3415	int ret;
3416
3417	if (!(mvm->scan_status & type))
3418		return 0;
3419
3420	if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) {
3421		ret = 0;
3422		goto out;
3423	}
3424
3425	ret = iwl_mvm_scan_stop_wait(mvm, type);
3426	if (!ret)
3427		mvm->scan_status |= type << IWL_MVM_SCAN_STOPPING_SHIFT;
3428out:
3429	/* Clear the scan status so the next scan requests will
3430	 * succeed and mark the scan as stopping, so that the Rx
3431	 * handler doesn't do anything, as the scan was stopped from
3432	 * above.
3433	 */
3434	mvm->scan_status &= ~type;
3435
3436	if (type == IWL_MVM_SCAN_REGULAR) {
 
 
 
 
3437		cancel_delayed_work(&mvm->scan_timeout_dwork);
3438		if (notify) {
3439			struct cfg80211_scan_info info = {
3440				.aborted = true,
3441			};
3442
3443			ieee80211_scan_completed(mvm->hw, &info);
3444		}
3445	} else if (notify) {
3446		ieee80211_sched_scan_stopped(mvm->hw);
3447		mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
3448	}
3449
3450	return ret;
3451}
v4.10.11
   1/******************************************************************************
   2 *
   3 * This file is provided under a dual BSD/GPLv2 license.  When using or
   4 * redistributing this file, you may do so under either license.
   5 *
   6 * GPL LICENSE SUMMARY
   7 *
   8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
   9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  10 * Copyright(c) 2016 Intel Deutschland GmbH
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of version 2 of the GNU General Public License as
  14 * published by the Free Software Foundation.
  15 *
  16 * This program is distributed in the hope that it will be useful, but
  17 * WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  19 * General Public License for more details.
  20 *
  21 * You should have received a copy of the GNU General Public License
  22 * along with this program; if not, write to the Free Software
  23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  24 * USA
  25 *
  26 * The full GNU General Public License is included in this distribution
  27 * in the file called COPYING.
  28 *
  29 * Contact Information:
  30 *  Intel Linux Wireless <linuxwifi@intel.com>
  31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  32 *
  33 * BSD LICENSE
  34 *
  35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  37 * Copyright(c) 2016 Intel Deutschland GmbH
  38 * All rights reserved.
  39 *
  40 * Redistribution and use in source and binary forms, with or without
  41 * modification, are permitted provided that the following conditions
  42 * are met:
  43 *
  44 *  * Redistributions of source code must retain the above copyright
  45 *    notice, this list of conditions and the following disclaimer.
  46 *  * Redistributions in binary form must reproduce the above copyright
  47 *    notice, this list of conditions and the following disclaimer in
  48 *    the documentation and/or other materials provided with the
  49 *    distribution.
  50 *  * Neither the name Intel Corporation nor the names of its
  51 *    contributors may be used to endorse or promote products derived
  52 *    from this software without specific prior written permission.
  53 *
  54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  65 *
  66 *****************************************************************************/
  67
  68#include <linux/etherdevice.h>
  69#include <net/mac80211.h>
 
  70
  71#include "mvm.h"
  72#include "fw-api-scan.h"
  73#include "iwl-io.h"
  74
  75#define IWL_DENSE_EBS_SCAN_RATIO 5
  76#define IWL_SPARSE_EBS_SCAN_RATIO 1
  77
  78enum iwl_mvm_traffic_load {
  79	IWL_MVM_TRAFFIC_LOW,
  80	IWL_MVM_TRAFFIC_MEDIUM,
  81	IWL_MVM_TRAFFIC_HIGH,
  82};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  83
  84struct iwl_mvm_scan_timing_params {
  85	u32 dwell_active;
  86	u32 dwell_passive;
  87	u32 dwell_fragmented;
  88	u32 dwell_extended;
  89	u32 suspend_time;
  90	u32 max_out_time;
  91};
  92
  93static struct iwl_mvm_scan_timing_params scan_timing[] = {
  94	[IWL_SCAN_TYPE_UNASSOC] = {
  95		.dwell_active = 10,
  96		.dwell_passive = 110,
  97		.dwell_fragmented = 44,
  98		.dwell_extended = 90,
  99		.suspend_time = 0,
 100		.max_out_time = 0,
 101	},
 102	[IWL_SCAN_TYPE_WILD] = {
 103		.dwell_active = 10,
 104		.dwell_passive = 110,
 105		.dwell_fragmented = 44,
 106		.dwell_extended = 90,
 107		.suspend_time = 30,
 108		.max_out_time = 120,
 109	},
 110	[IWL_SCAN_TYPE_MILD] = {
 111		.dwell_active = 10,
 112		.dwell_passive = 110,
 113		.dwell_fragmented = 44,
 114		.dwell_extended = 90,
 115		.suspend_time = 120,
 116		.max_out_time = 120,
 117	},
 118	[IWL_SCAN_TYPE_FRAGMENTED] = {
 119		.dwell_active = 10,
 120		.dwell_passive = 110,
 121		.dwell_fragmented = 44,
 122		.suspend_time = 95,
 123		.max_out_time = 44,
 124	},
 
 
 
 
 125};
 126
 127struct iwl_mvm_scan_params {
 
 128	enum iwl_mvm_scan_type type;
 
 129	u32 n_channels;
 130	u16 delay;
 131	int n_ssids;
 132	struct cfg80211_ssid *ssids;
 133	struct ieee80211_channel **channels;
 134	u32 flags;
 135	u8 *mac_addr;
 136	u8 *mac_addr_mask;
 137	bool no_cck;
 138	bool pass_all;
 139	int n_match_sets;
 140	struct iwl_scan_probe_req preq;
 141	struct cfg80211_match_set *match_sets;
 142	int n_scan_plans;
 143	struct cfg80211_sched_scan_plan *scan_plans;
 144	u32 measurement_dwell;
 
 
 
 
 
 
 
 145};
 146
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 147static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
 148{
 149	if (mvm->scan_rx_ant != ANT_NONE)
 150		return mvm->scan_rx_ant;
 151	return iwl_mvm_get_valid_rx_ant(mvm);
 152}
 153
 154static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
 155{
 156	u16 rx_chain;
 157	u8 rx_ant;
 158
 159	rx_ant = iwl_mvm_scan_rx_ant(mvm);
 160	rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
 161	rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
 162	rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
 163	rx_chain |= 0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS;
 164	return cpu_to_le16(rx_chain);
 165}
 166
 167static __le32 iwl_mvm_scan_rxon_flags(enum nl80211_band band)
 168{
 169	if (band == NL80211_BAND_2GHZ)
 170		return cpu_to_le32(PHY_BAND_24);
 171	else
 172		return cpu_to_le32(PHY_BAND_5);
 173}
 174
 175static inline __le32
 176iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band,
 177			  bool no_cck)
 178{
 179	u32 tx_ant;
 180
 181	mvm->scan_last_antenna_idx =
 182		iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
 183				     mvm->scan_last_antenna_idx);
 184	tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
 185
 186	if (band == NL80211_BAND_2GHZ && !no_cck)
 187		return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK |
 188				   tx_ant);
 189	else
 190		return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
 191}
 192
 193static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
 194					    struct ieee80211_vif *vif)
 195{
 196	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 197	int *global_cnt = data;
 198
 199	if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
 200	    mvmvif->phy_ctxt->id < MAX_PHYS)
 201		*global_cnt += 1;
 
 202}
 203
 204static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm)
 
 
 
 
 
 
 
 205{
 206	return IWL_MVM_TRAFFIC_LOW;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 207}
 208
 209static enum
 210iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device)
 211{
 212	int global_cnt = 0;
 213	enum iwl_mvm_traffic_load load;
 214	bool low_latency;
 
 
 
 
 
 215
 216	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
 217					    IEEE80211_IFACE_ITER_NORMAL,
 218					    iwl_mvm_scan_condition_iterator,
 219					    &global_cnt);
 220	if (!global_cnt)
 
 221		return IWL_SCAN_TYPE_UNASSOC;
 222
 223	load = iwl_mvm_get_traffic_load(mvm);
 224	low_latency = iwl_mvm_low_latency(mvm);
 
 
 
 225
 226	if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) && !p2p_device &&
 227	    fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN))
 228		return IWL_SCAN_TYPE_FRAGMENTED;
 
 
 
 
 
 
 
 229
 230	if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
 231		return IWL_SCAN_TYPE_MILD;
 232
 233	return IWL_SCAN_TYPE_WILD;
 234}
 235
 236static int
 237iwl_mvm_get_measurement_dwell(struct iwl_mvm *mvm,
 238			      struct cfg80211_scan_request *req,
 239			      struct iwl_mvm_scan_params *params)
 
 
 
 
 
 
 
 
 
 
 
 
 
 240{
 241	if (!req->duration)
 242		return 0;
 243
 244	if (req->duration_mandatory &&
 245	    req->duration > scan_timing[params->type].max_out_time) {
 246		IWL_DEBUG_SCAN(mvm,
 247			       "Measurement scan - too long dwell %hu (max out time %u)\n",
 248			       req->duration,
 249			       scan_timing[params->type].max_out_time);
 250		return -EOPNOTSUPP;
 251	}
 252
 253	return min_t(u32, (u32)req->duration,
 254		     scan_timing[params->type].max_out_time);
 255}
 256
 257static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
 258{
 259	/* require rrm scan whenever the fw supports it */
 260	return fw_has_capa(&mvm->fw->ucode_capa,
 261			   IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT);
 262}
 263
 264static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm)
 265{
 266	int max_probe_len;
 267
 268	max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE;
 269
 270	/* we create the 802.11 header and SSID element */
 271	max_probe_len -= 24 + 2;
 272
 273	/* DS parameter set element is added on 2.4GHZ band if required */
 274	if (iwl_mvm_rrm_scan_needed(mvm))
 275		max_probe_len -= 3;
 276
 277	return max_probe_len;
 278}
 279
 280int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm)
 281{
 282	int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm);
 283
 284	/* TODO: [BUG] This function should return the maximum allowed size of
 285	 * scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs
 286	 * in the same command. So the correct implementation of this function
 287	 * is just iwl_mvm_max_scan_ie_fw_cmd_room() / 2. Currently the scan
 288	 * command has only 512 bytes and it would leave us with about 240
 289	 * bytes for scan IEs, which is clearly not enough. So meanwhile
 290	 * we will report an incorrect value. This may result in a failure to
 291	 * issue a scan in unified_scan_lmac and unified_sched_scan_lmac
 292	 * functions with -ENOBUFS, if a large enough probe will be provided.
 293	 */
 294	return max_ie_len;
 295}
 296
 297static u8 *iwl_mvm_dump_channel_list(struct iwl_scan_results_notif *res,
 298				     int num_res, u8 *buf, size_t buf_size)
 299{
 300	int i;
 301	u8 *pos = buf, *end = buf + buf_size;
 302
 303	for (i = 0; pos < end && i < num_res; i++)
 304		pos += snprintf(pos, end - pos, " %u", res[i].channel);
 305
 306	/* terminate the string in case the buffer was too short */
 307	*(buf + buf_size - 1) = '\0';
 308
 309	return buf;
 310}
 311
 312void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
 313					      struct iwl_rx_cmd_buffer *rxb)
 314{
 315	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 316	struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
 317	u8 buf[256];
 318
 319	IWL_DEBUG_SCAN(mvm,
 320		       "Scan offload iteration complete: status=0x%x scanned channels=%d channels list: %s\n",
 321		       notif->status, notif->scanned_channels,
 322		       iwl_mvm_dump_channel_list(notif->results,
 323						 notif->scanned_channels, buf,
 324						 sizeof(buf)));
 325
 326	if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) {
 327		IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n");
 328		ieee80211_sched_scan_results(mvm->hw);
 329		mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
 330	}
 331}
 332
 333void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
 334				 struct iwl_rx_cmd_buffer *rxb)
 335{
 336	IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
 337	ieee80211_sched_scan_results(mvm->hw);
 338}
 339
 340static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)
 341{
 342	switch (status) {
 343	case IWL_SCAN_EBS_SUCCESS:
 344		return "successful";
 345	case IWL_SCAN_EBS_INACTIVE:
 346		return "inactive";
 347	case IWL_SCAN_EBS_FAILED:
 348	case IWL_SCAN_EBS_CHAN_NOT_FOUND:
 349	default:
 350		return "failed";
 351	}
 352}
 353
 354void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
 355					 struct iwl_rx_cmd_buffer *rxb)
 356{
 357	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 358	struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data;
 359	bool aborted = (scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
 360
 361	/* If this happens, the firmware has mistakenly sent an LMAC
 362	 * notification during UMAC scans -- warn and ignore it.
 363	 */
 364	if (WARN_ON_ONCE(fw_has_capa(&mvm->fw->ucode_capa,
 365				     IWL_UCODE_TLV_CAPA_UMAC_SCAN)))
 366		return;
 367
 368	/* scan status must be locked for proper checking */
 369	lockdep_assert_held(&mvm->mutex);
 370
 371	/* We first check if we were stopping a scan, in which case we
 372	 * just clear the stopping flag.  Then we check if it was a
 373	 * firmware initiated stop, in which case we need to inform
 374	 * mac80211.
 375	 * Note that we can have a stopping and a running scan
 376	 * simultaneously, but we can't have two different types of
 377	 * scans stopping or running at the same time (since LMAC
 378	 * doesn't support it).
 379	 */
 380
 381	if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_SCHED) {
 382		WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR);
 383
 384		IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
 385			       aborted ? "aborted" : "completed",
 386			       iwl_mvm_ebs_status_str(scan_notif->ebs_status));
 387		IWL_DEBUG_SCAN(mvm,
 388			       "Last line %d, Last iteration %d, Time after last iteration %d\n",
 389			       scan_notif->last_schedule_line,
 390			       scan_notif->last_schedule_iteration,
 391			       __le32_to_cpu(scan_notif->time_after_last_iter));
 392
 393		mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED;
 394	} else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) {
 395		IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s\n",
 396			       aborted ? "aborted" : "completed",
 397			       iwl_mvm_ebs_status_str(scan_notif->ebs_status));
 398
 399		mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_REGULAR;
 400	} else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) {
 401		WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR);
 402
 403		IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
 404			       aborted ? "aborted" : "completed",
 405			       iwl_mvm_ebs_status_str(scan_notif->ebs_status));
 406		IWL_DEBUG_SCAN(mvm,
 407			       "Last line %d, Last iteration %d, Time after last iteration %d (FW)\n",
 408			       scan_notif->last_schedule_line,
 409			       scan_notif->last_schedule_iteration,
 410			       __le32_to_cpu(scan_notif->time_after_last_iter));
 411
 412		mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
 413		ieee80211_sched_scan_stopped(mvm->hw);
 414		mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
 415	} else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
 416		struct cfg80211_scan_info info = {
 417			.aborted = aborted,
 418		};
 419
 420		IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n",
 421			       aborted ? "aborted" : "completed",
 422			       iwl_mvm_ebs_status_str(scan_notif->ebs_status));
 423
 424		mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR;
 425		ieee80211_scan_completed(mvm->hw, &info);
 426		iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
 427		cancel_delayed_work(&mvm->scan_timeout_dwork);
 
 428	} else {
 429		IWL_ERR(mvm,
 430			"got scan complete notification but no scan is running\n");
 431	}
 432
 433	mvm->last_ebs_successful =
 434			scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ||
 435			scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE;
 436}
 437
 438static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
 439{
 440	int i;
 441
 442	for (i = 0; i < PROBE_OPTION_MAX; i++) {
 443		if (!ssid_list[i].len)
 444			break;
 445		if (ssid_list[i].len == ssid_len &&
 446		    !memcmp(ssid_list->ssid, ssid, ssid_len))
 447			return i;
 448	}
 449	return -1;
 450}
 451
 452/* We insert the SSIDs in an inverted order, because the FW will
 453 * invert it back.
 454 */
 455static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params,
 456				 struct iwl_ssid_ie *ssids,
 457				 u32 *ssid_bitmap)
 458{
 459	int i, j;
 460	int index;
 
 461
 462	/*
 463	 * copy SSIDs from match list.
 464	 * iwl_config_sched_scan_profiles() uses the order of these ssids to
 465	 * config match list.
 466	 */
 467	for (i = 0, j = params->n_match_sets - 1;
 468	     j >= 0 && i < PROBE_OPTION_MAX;
 469	     i++, j--) {
 470		/* skip empty SSID matchsets */
 471		if (!params->match_sets[j].ssid.ssid_len)
 472			continue;
 473		ssids[i].id = WLAN_EID_SSID;
 474		ssids[i].len = params->match_sets[j].ssid.ssid_len;
 475		memcpy(ssids[i].ssid, params->match_sets[j].ssid.ssid,
 476		       ssids[i].len);
 477	}
 478
 479	/* add SSIDs from scan SSID list */
 480	*ssid_bitmap = 0;
 481	for (j = params->n_ssids - 1;
 482	     j >= 0 && i < PROBE_OPTION_MAX;
 483	     i++, j--) {
 484		index = iwl_ssid_exist(params->ssids[j].ssid,
 485				       params->ssids[j].ssid_len,
 486				       ssids);
 487		if (index < 0) {
 488			ssids[i].id = WLAN_EID_SSID;
 489			ssids[i].len = params->ssids[j].ssid_len;
 490			memcpy(ssids[i].ssid, params->ssids[j].ssid,
 491			       ssids[i].len);
 492			*ssid_bitmap |= BIT(i);
 493		} else {
 494			*ssid_bitmap |= BIT(index);
 495		}
 496	}
 
 
 497}
 498
 499static int
 500iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
 501				   struct cfg80211_sched_scan_request *req)
 502{
 503	struct iwl_scan_offload_profile *profile;
 504	struct iwl_scan_offload_profile_cfg *profile_cfg;
 505	struct iwl_scan_offload_blacklist *blacklist;
 
 
 
 
 506	struct iwl_host_cmd cmd = {
 507		.id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
 508		.len[1] = sizeof(*profile_cfg),
 509		.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
 510		.dataflags[1] = IWL_HCMD_DFL_NOCOPY,
 511	};
 512	int blacklist_len;
 513	int i;
 514	int ret;
 515
 516	if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES))
 517		return -EIO;
 518
 519	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL)
 520		blacklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN;
 521	else
 522		blacklist_len = IWL_SCAN_MAX_BLACKLIST_LEN;
 523
 524	blacklist = kzalloc(sizeof(*blacklist) * blacklist_len, GFP_KERNEL);
 525	if (!blacklist)
 526		return -ENOMEM;
 527
 528	profile_cfg = kzalloc(sizeof(*profile_cfg), GFP_KERNEL);
 529	if (!profile_cfg) {
 530		ret = -ENOMEM;
 531		goto free_blacklist;
 532	}
 533
 534	cmd.data[0] = blacklist;
 535	cmd.len[0] = sizeof(*blacklist) * blacklist_len;
 536	cmd.data[1] = profile_cfg;
 
 
 
 
 
 537
 538	/* No blacklist configuration */
 
 
 
 
 
 
 
 
 
 539
 540	profile_cfg->num_profiles = req->n_match_sets;
 541	profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN;
 542	profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN;
 543	profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN;
 544	if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
 545		profile_cfg->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
 546
 547	for (i = 0; i < req->n_match_sets; i++) {
 548		profile = &profile_cfg->profiles[i];
 549		profile->ssid_index = i;
 550		/* Support any cipher and auth algorithm */
 551		profile->unicast_cipher = 0xff;
 552		profile->auth_alg = 0xff;
 
 
 553		profile->network_type = IWL_NETWORK_TYPE_ANY;
 554		profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY;
 555		profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN;
 556	}
 557
 558	IWL_DEBUG_SCAN(mvm, "Sending scheduled scan profile config\n");
 559
 560	ret = iwl_mvm_send_cmd(mvm, &cmd);
 561	kfree(profile_cfg);
 562free_blacklist:
 563	kfree(blacklist);
 564
 565	return ret;
 566}
 567
 568static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
 569				  struct cfg80211_sched_scan_request *req)
 570{
 571	if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
 572		IWL_DEBUG_SCAN(mvm,
 573			       "Sending scheduled scan with filtering, n_match_sets %d\n",
 574			       req->n_match_sets);
 575		mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
 576		return false;
 577	}
 578
 579	IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n");
 580
 581	mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
 582	return true;
 583}
 584
 585static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm)
 586{
 587	int ret;
 588	struct iwl_host_cmd cmd = {
 589		.id = SCAN_OFFLOAD_ABORT_CMD,
 590	};
 591	u32 status;
 592
 593	ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
 594	if (ret)
 595		return ret;
 596
 597	if (status != CAN_ABORT_STATUS) {
 598		/*
 599		 * The scan abort will return 1 for success or
 600		 * 2 for "failure".  A failure condition can be
 601		 * due to simply not being in an active scan which
 602		 * can occur if we send the scan abort before the
 603		 * microcode has notified us that a scan is completed.
 604		 */
 605		IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status);
 606		ret = -ENOENT;
 607	}
 608
 609	return ret;
 610}
 611
 612static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm,
 613				     struct iwl_scan_req_tx_cmd *tx_cmd,
 614				     bool no_cck)
 615{
 616	tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
 617					 TX_CMD_FLG_BT_DIS);
 618	tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
 619							   NL80211_BAND_2GHZ,
 620							   no_cck);
 621	tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
 
 
 
 
 
 
 
 
 
 
 
 
 622
 623	tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
 624					 TX_CMD_FLG_BT_DIS);
 
 625	tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
 626							   NL80211_BAND_5GHZ,
 627							   no_cck);
 628	tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
 629}
 630
 631static void
 632iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm,
 633			       struct ieee80211_channel **channels,
 634			       int n_channels, u32 ssid_bitmap,
 635			       struct iwl_scan_req_lmac *cmd)
 636{
 637	struct iwl_scan_channel_cfg_lmac *channel_cfg = (void *)&cmd->data;
 638	int i;
 639
 640	for (i = 0; i < n_channels; i++) {
 641		channel_cfg[i].channel_num =
 642			cpu_to_le16(channels[i]->hw_value);
 643		channel_cfg[i].iter_count = cpu_to_le16(1);
 644		channel_cfg[i].iter_interval = 0;
 645		channel_cfg[i].flags =
 646			cpu_to_le32(IWL_UNIFIED_SCAN_CHANNEL_PARTIAL |
 647				    ssid_bitmap);
 648	}
 649}
 650
 651static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies,
 652					   size_t len, u8 *const pos)
 653{
 654	static const u8 before_ds_params[] = {
 655			WLAN_EID_SSID,
 656			WLAN_EID_SUPP_RATES,
 657			WLAN_EID_REQUEST,
 658			WLAN_EID_EXT_SUPP_RATES,
 659	};
 660	size_t offs;
 661	u8 *newpos = pos;
 662
 663	if (!iwl_mvm_rrm_scan_needed(mvm)) {
 664		memcpy(newpos, ies, len);
 665		return newpos + len;
 666	}
 667
 668	offs = ieee80211_ie_split(ies, len,
 669				  before_ds_params,
 670				  ARRAY_SIZE(before_ds_params),
 671				  0);
 672
 673	memcpy(newpos, ies, offs);
 674	newpos += offs;
 675
 676	/* Add a placeholder for DS Parameter Set element */
 677	*newpos++ = WLAN_EID_DS_PARAMS;
 678	*newpos++ = 1;
 679	*newpos++ = 0;
 680
 681	memcpy(newpos, ies + offs, len - offs);
 682	newpos += len - offs;
 683
 684	return newpos;
 685}
 686
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 687static void
 688iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 689			 struct ieee80211_scan_ies *ies,
 690			 struct iwl_mvm_scan_params *params)
 691{
 692	struct ieee80211_mgmt *frame = (void *)params->preq.buf;
 693	u8 *pos, *newpos;
 694	const u8 *mac_addr = params->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
 695		params->mac_addr : NULL;
 696
 697	/*
 698	 * Unfortunately, right now the offload scan doesn't support randomising
 699	 * within the firmware, so until the firmware API is ready we implement
 700	 * it in the driver. This means that the scan iterations won't really be
 701	 * random, only when it's restarted, but at least that helps a bit.
 702	 */
 703	if (mac_addr)
 704		get_random_mask_addr(frame->sa, mac_addr,
 705				     params->mac_addr_mask);
 706	else
 707		memcpy(frame->sa, vif->addr, ETH_ALEN);
 708
 709	frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
 710	eth_broadcast_addr(frame->da);
 711	eth_broadcast_addr(frame->bssid);
 712	frame->seq_ctrl = 0;
 713
 714	pos = frame->u.probe_req.variable;
 715	*pos++ = WLAN_EID_SSID;
 716	*pos++ = 0;
 717
 718	params->preq.mac_header.offset = 0;
 719	params->preq.mac_header.len = cpu_to_le16(24 + 2);
 720
 721	/* Insert ds parameter set element on 2.4 GHz band */
 722	newpos = iwl_mvm_copy_and_insert_ds_elem(mvm,
 723						 ies->ies[NL80211_BAND_2GHZ],
 724						 ies->len[NL80211_BAND_2GHZ],
 725						 pos);
 726	params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf);
 727	params->preq.band_data[0].len = cpu_to_le16(newpos - pos);
 728	pos = newpos;
 729
 730	memcpy(pos, ies->ies[NL80211_BAND_5GHZ],
 731	       ies->len[NL80211_BAND_5GHZ]);
 732	params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf);
 733	params->preq.band_data[1].len =
 734		cpu_to_le16(ies->len[NL80211_BAND_5GHZ]);
 735	pos += ies->len[NL80211_BAND_5GHZ];
 736
 
 
 
 
 
 
 737	memcpy(pos, ies->common_ies, ies->common_ie_len);
 738	params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf);
 739	params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
 
 
 
 
 
 
 
 
 
 740}
 741
 742static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
 743				    struct iwl_scan_req_lmac *cmd,
 744				    struct iwl_mvm_scan_params *params)
 745{
 746	cmd->active_dwell = scan_timing[params->type].dwell_active;
 747	cmd->passive_dwell = scan_timing[params->type].dwell_passive;
 748	cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
 749	cmd->extended_dwell = scan_timing[params->type].dwell_extended;
 750	cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
 751	cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
 752	cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
 753}
 754
 755static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids,
 756				     struct ieee80211_scan_ies *ies,
 757				     int n_channels)
 758{
 759	return ((n_ssids <= PROBE_OPTION_MAX) &&
 760		(n_channels <= mvm->fw->ucode_capa.n_scan_channels) &
 761		(ies->common_ie_len +
 762		 ies->len[NL80211_BAND_2GHZ] +
 763		 ies->len[NL80211_BAND_5GHZ] <=
 764		 iwl_mvm_max_scan_ie_fw_cmd_room(mvm)));
 765}
 766
 767static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
 768					struct ieee80211_vif *vif)
 769{
 770	const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa;
 
 
 
 
 
 
 771
 772	/* We can only use EBS if:
 773	 *	1. the feature is supported;
 774	 *	2. the last EBS was successful;
 775	 *	3. if only single scan, the single scan EBS API is supported;
 776	 *	4. it's not a p2p find operation.
 
 
 777	 */
 778	return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) &&
 779		mvm->last_ebs_successful &&
 780		vif->type != NL80211_IFTYPE_P2P_DEVICE);
 
 781}
 782
 783static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params)
 784{
 785	return params->n_scan_plans == 1 &&
 786		params->scan_plans[0].iterations == 1;
 787}
 788
 
 
 
 
 
 
 789static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
 790				   struct iwl_mvm_scan_params *params,
 791				   struct ieee80211_vif *vif)
 792{
 793	int flags = 0;
 794
 795	if (params->n_ssids == 0)
 796		flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
 797
 798	if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
 799		flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
 800
 801	if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
 802		flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
 803
 804	if (iwl_mvm_rrm_scan_needed(mvm))
 
 
 805		flags |= IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED;
 806
 807	if (params->pass_all)
 808		flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
 809	else
 810		flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
 811
 812#ifdef CONFIG_IWLWIFI_DEBUGFS
 813	if (mvm->scan_iter_notif_enabled)
 814		flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
 815#endif
 816
 817	if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
 818		flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
 819
 820	if (iwl_mvm_is_regular_scan(params) &&
 821	    vif->type != NL80211_IFTYPE_P2P_DEVICE &&
 822	    params->type != IWL_SCAN_TYPE_FRAGMENTED)
 823		flags |= IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL;
 824
 825	return flags;
 826}
 827
 
 
 
 
 
 
 
 
 
 
 
 
 
 828static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 829			     struct iwl_mvm_scan_params *params)
 830{
 831	struct iwl_scan_req_lmac *cmd = mvm->scan_cmd;
 832	struct iwl_scan_probe_req *preq =
 833		(void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
 834			 mvm->fw->ucode_capa.n_scan_channels);
 835	u32 ssid_bitmap = 0;
 836	int i;
 837
 838	lockdep_assert_held(&mvm->mutex);
 839
 840	memset(cmd, 0, ksize(cmd));
 841
 842	if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
 843		return -EINVAL;
 844
 845	iwl_mvm_scan_lmac_dwell(mvm, cmd, params);
 846
 847	cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
 848	cmd->iter_num = cpu_to_le32(1);
 849	cmd->n_channels = (u8)params->n_channels;
 850
 851	cmd->delay = cpu_to_le32(params->delay);
 852
 853	cmd->scan_flags = cpu_to_le32(iwl_mvm_scan_lmac_flags(mvm, params,
 854							      vif));
 855
 856	cmd->flags = iwl_mvm_scan_rxon_flags(params->channels[0]->band);
 
 857	cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
 858					MAC_FILTER_IN_BEACON);
 859	iwl_mvm_scan_fill_tx_cmd(mvm, cmd->tx_cmd, params->no_cck);
 860	iwl_scan_build_ssids(params, cmd->direct_scan, &ssid_bitmap);
 861
 862	/* this API uses bits 1-20 instead of 0-19 */
 863	ssid_bitmap <<= 1;
 864
 865	for (i = 0; i < params->n_scan_plans; i++) {
 866		struct cfg80211_sched_scan_plan *scan_plan =
 867			&params->scan_plans[i];
 868
 869		cmd->schedule[i].delay =
 870			cpu_to_le16(scan_plan->interval);
 871		cmd->schedule[i].iterations = scan_plan->iterations;
 872		cmd->schedule[i].full_scan_mul = 1;
 873	}
 874
 875	/*
 876	 * If the number of iterations of the last scan plan is set to
 877	 * zero, it should run infinitely. However, this is not always the case.
 878	 * For example, when regular scan is requested the driver sets one scan
 879	 * plan with one iteration.
 880	 */
 881	if (!cmd->schedule[i - 1].iterations)
 882		cmd->schedule[i - 1].iterations = 0xff;
 883
 884	if (iwl_mvm_scan_use_ebs(mvm, vif)) {
 885		cmd->channel_opt[0].flags =
 886			cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
 887				    IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
 888				    IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
 889		cmd->channel_opt[0].non_ebs_ratio =
 890			cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
 891		cmd->channel_opt[1].flags =
 892			cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
 893				    IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
 894				    IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
 895		cmd->channel_opt[1].non_ebs_ratio =
 896			cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
 897	}
 898
 899	iwl_mvm_lmac_scan_cfg_channels(mvm, params->channels,
 900				       params->n_channels, ssid_bitmap, cmd);
 901
 902	*preq = params->preq;
 903
 904	return 0;
 905}
 906
 907static int rate_to_scan_rate_flag(unsigned int rate)
 908{
 909	static const int rate_to_scan_rate[IWL_RATE_COUNT] = {
 910		[IWL_RATE_1M_INDEX]	= SCAN_CONFIG_RATE_1M,
 911		[IWL_RATE_2M_INDEX]	= SCAN_CONFIG_RATE_2M,
 912		[IWL_RATE_5M_INDEX]	= SCAN_CONFIG_RATE_5M,
 913		[IWL_RATE_11M_INDEX]	= SCAN_CONFIG_RATE_11M,
 914		[IWL_RATE_6M_INDEX]	= SCAN_CONFIG_RATE_6M,
 915		[IWL_RATE_9M_INDEX]	= SCAN_CONFIG_RATE_9M,
 916		[IWL_RATE_12M_INDEX]	= SCAN_CONFIG_RATE_12M,
 917		[IWL_RATE_18M_INDEX]	= SCAN_CONFIG_RATE_18M,
 918		[IWL_RATE_24M_INDEX]	= SCAN_CONFIG_RATE_24M,
 919		[IWL_RATE_36M_INDEX]	= SCAN_CONFIG_RATE_36M,
 920		[IWL_RATE_48M_INDEX]	= SCAN_CONFIG_RATE_48M,
 921		[IWL_RATE_54M_INDEX]	= SCAN_CONFIG_RATE_54M,
 922	};
 923
 924	return rate_to_scan_rate[rate];
 925}
 926
 927static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm)
 928{
 929	struct ieee80211_supported_band *band;
 930	unsigned int rates = 0;
 931	int i;
 932
 933	band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
 934	for (i = 0; i < band->n_bitrates; i++)
 935		rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
 936	band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
 937	for (i = 0; i < band->n_bitrates; i++)
 938		rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
 939
 940	/* Set both basic rates and supported rates */
 941	rates |= SCAN_CONFIG_SUPPORTED_RATE(rates);
 942
 943	return cpu_to_le32(rates);
 944}
 945
 946int iwl_mvm_config_scan(struct iwl_mvm *mvm)
 
 
 
 
 
 
 
 
 
 
 947{
 948	struct iwl_scan_config *scan_config;
 949	struct ieee80211_supported_band *band;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 950	int num_channels =
 951		mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels +
 952		mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels;
 953	int ret, i, j = 0, cmd_size;
 954	struct iwl_host_cmd cmd = {
 955		.id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
 956	};
 957	enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
 958
 959	if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
 960		return -ENOBUFS;
 961
 962	if (type == mvm->scan_type) {
 963		IWL_DEBUG_SCAN(mvm,
 964			       "Ignoring UMAC scan config of the same type\n");
 965		return 0;
 
 
 
 
 
 
 
 966	}
 967
 968	cmd_size = sizeof(*scan_config) + mvm->fw->ucode_capa.n_scan_channels;
 
 
 
 
 969
 970	scan_config = kzalloc(cmd_size, GFP_KERNEL);
 971	if (!scan_config)
 972		return -ENOMEM;
 973
 974	scan_config->flags = cpu_to_le32(SCAN_CONFIG_FLAG_ACTIVATE |
 975					 SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
 976					 SCAN_CONFIG_FLAG_SET_TX_CHAINS |
 977					 SCAN_CONFIG_FLAG_SET_RX_CHAINS |
 978					 SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
 979					 SCAN_CONFIG_FLAG_SET_ALL_TIMES |
 980					 SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
 981					 SCAN_CONFIG_FLAG_SET_MAC_ADDR |
 982					 SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
 983					 SCAN_CONFIG_N_CHANNELS(num_channels) |
 984					 (type == IWL_SCAN_TYPE_FRAGMENTED ?
 985					  SCAN_CONFIG_FLAG_SET_FRAGMENTED :
 986					  SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED));
 987	scan_config->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
 988	scan_config->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
 989	scan_config->legacy_rates = iwl_mvm_scan_config_rates(mvm);
 990	scan_config->out_of_channel_time =
 991		cpu_to_le32(scan_timing[type].max_out_time);
 992	scan_config->suspend_time = cpu_to_le32(scan_timing[type].suspend_time);
 993	scan_config->dwell_active = scan_timing[type].dwell_active;
 994	scan_config->dwell_passive = scan_timing[type].dwell_passive;
 995	scan_config->dwell_fragmented = scan_timing[type].dwell_fragmented;
 996	scan_config->dwell_extended = scan_timing[type].dwell_extended;
 997
 998	memcpy(&scan_config->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
 999
1000	scan_config->bcast_sta_id = mvm->aux_sta.sta_id;
1001	scan_config->channel_flags = IWL_CHANNEL_FLAG_EBS |
1002				     IWL_CHANNEL_FLAG_ACCURATE_EBS |
1003				     IWL_CHANNEL_FLAG_EBS_ADD |
1004				     IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
1005
1006	band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
1007	for (i = 0; i < band->n_channels; i++, j++)
1008		scan_config->channel_array[j] = band->channels[i].hw_value;
1009	band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
1010	for (i = 0; i < band->n_channels; i++, j++)
1011		scan_config->channel_array[j] = band->channels[i].hw_value;
 
 
 
 
 
 
 
 
 
1012
1013	cmd.data[0] = scan_config;
1014	cmd.len[0] = cmd_size;
1015	cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
1016
1017	IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
1018
1019	ret = iwl_mvm_send_cmd(mvm, &cmd);
1020	if (!ret)
1021		mvm->scan_type = type;
 
 
1022
1023	kfree(scan_config);
1024	return ret;
1025}
1026
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1027static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status)
1028{
1029	int i;
1030
1031	for (i = 0; i < mvm->max_scans; i++)
1032		if (mvm->scan_uid_status[i] == status)
1033			return i;
1034
1035	return -ENOENT;
1036}
1037
1038static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
1039				    struct iwl_scan_req_umac *cmd,
1040				    struct iwl_mvm_scan_params *params)
1041{
1042	if (params->measurement_dwell) {
1043		cmd->active_dwell = params->measurement_dwell;
1044		cmd->passive_dwell = params->measurement_dwell;
1045		cmd->extended_dwell = params->measurement_dwell;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1046	} else {
1047		cmd->active_dwell = scan_timing[params->type].dwell_active;
1048		cmd->passive_dwell = scan_timing[params->type].dwell_passive;
1049		cmd->extended_dwell = scan_timing[params->type].dwell_extended;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1050	}
1051	cmd->fragmented_dwell = scan_timing[params->type].dwell_fragmented;
1052	cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
1053	cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
1054	cmd->scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
1055
1056	if (iwl_mvm_is_regular_scan(params))
1057		cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
1058	else
1059		cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
1060}
1061
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1062static void
1063iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
1064			       struct ieee80211_channel **channels,
1065			       int n_channels, u32 ssid_bitmap,
1066			       struct iwl_scan_req_umac *cmd)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1067{
1068	struct iwl_scan_channel_cfg_umac *channel_cfg = (void *)&cmd->data;
1069	int i;
1070
1071	for (i = 0; i < n_channels; i++) {
1072		channel_cfg[i].flags = cpu_to_le32(ssid_bitmap);
1073		channel_cfg[i].channel_num = channels[i]->hw_value;
1074		channel_cfg[i].iter_count = 1;
1075		channel_cfg[i].iter_interval = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1076	}
 
 
 
 
 
 
 
1077}
1078
1079static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
1080				   struct iwl_mvm_scan_params *params,
1081				   struct ieee80211_vif *vif)
1082{
1083	u16 flags = 0;
1084
1085	if (params->n_ssids == 0)
1086		flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE;
1087
1088	if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
1089		flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
1090
1091	if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
1092		flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
1093
1094	if (iwl_mvm_rrm_scan_needed(mvm))
 
 
 
 
 
 
1095		flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
1096
1097	if (params->pass_all)
1098		flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
1099	else
1100		flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH;
1101
1102	if (!iwl_mvm_is_regular_scan(params))
1103		flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
1104
1105	if (params->measurement_dwell)
1106		flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
1107
1108#ifdef CONFIG_IWLWIFI_DEBUGFS
1109	if (mvm->scan_iter_notif_enabled)
1110		flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
1111#endif
1112
1113	if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
1114		flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
1115
 
 
 
 
 
 
 
 
1116	if (iwl_mvm_is_regular_scan(params) &&
1117	    vif->type != NL80211_IFTYPE_P2P_DEVICE &&
1118	    params->type != IWL_SCAN_TYPE_FRAGMENTED)
 
 
1119		flags |= IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL;
1120
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1121	return flags;
1122}
1123
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1124static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1125			     struct iwl_mvm_scan_params *params,
1126			     int type)
1127{
1128	struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
1129	struct iwl_scan_req_umac_tail *sec_part = (void *)&cmd->data +
1130		sizeof(struct iwl_scan_channel_cfg_umac) *
1131			mvm->fw->ucode_capa.n_scan_channels;
1132	int uid, i;
 
 
 
 
 
1133	u32 ssid_bitmap = 0;
 
 
1134	struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif);
1135
1136	lockdep_assert_held(&mvm->mutex);
1137
1138	if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
1139		return -EINVAL;
1140
1141	uid = iwl_mvm_scan_uid_by_status(mvm, 0);
1142	if (uid < 0)
1143		return uid;
1144
1145	memset(cmd, 0, ksize(cmd));
1146
1147	iwl_mvm_scan_umac_dwell(mvm, cmd, params);
1148
1149	mvm->scan_uid_status[uid] = type;
1150
1151	cmd->uid = cpu_to_le32(uid);
1152	cmd->general_flags = cpu_to_le16(iwl_mvm_scan_umac_flags(mvm, params,
1153								 vif));
 
 
 
 
 
 
 
 
 
 
 
 
1154	cmd->scan_start_mac_id = scan_vif->id;
1155
1156	if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT)
1157		cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
1158
1159	if (iwl_mvm_scan_use_ebs(mvm, vif))
1160		cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
1161				     IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
1162				     IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
 
 
 
 
 
 
 
 
 
 
1163
1164	cmd->n_channels = params->n_channels;
 
1165
1166	iwl_scan_build_ssids(params, sec_part->direct_scan, &ssid_bitmap);
 
 
 
 
 
1167
 
 
 
 
 
 
 
 
 
 
1168	iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
1169				       params->n_channels, ssid_bitmap, cmd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1170
1171	for (i = 0; i < params->n_scan_plans; i++) {
1172		struct cfg80211_sched_scan_plan *scan_plan =
1173			&params->scan_plans[i];
 
 
 
 
 
 
1174
1175		sec_part->schedule[i].iter_count = scan_plan->iterations;
1176		sec_part->schedule[i].interval =
1177			cpu_to_le16(scan_plan->interval);
 
 
 
 
 
 
 
 
1178	}
1179
1180	/*
1181	 * If the number of iterations of the last scan plan is set to
1182	 * zero, it should run infinitely. However, this is not always the case.
1183	 * For example, when regular scan is requested the driver sets one scan
1184	 * plan with one iteration.
1185	 */
1186	if (!sec_part->schedule[i - 1].iter_count)
1187		sec_part->schedule[i - 1].iter_count = 0xff;
 
 
 
 
 
 
1188
1189	sec_part->delay = cpu_to_le16(params->delay);
1190	sec_part->preq = params->preq;
 
1191
1192	return 0;
1193}
1194
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1195static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
1196{
1197	return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK);
1198}
1199
1200static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
1201{
1202	bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
1203					 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
1204
1205	/* This looks a bit arbitrary, but the idea is that if we run
1206	 * out of possible simultaneous scans and the userspace is
1207	 * trying to run a scan type that is already running, we
1208	 * return -EBUSY.  But if the userspace wants to start a
1209	 * different type of scan, we stop the opposite type to make
1210	 * space for the new request.  The reason is backwards
1211	 * compatibility with old wpa_supplicant that wouldn't stop a
1212	 * scheduled scan before starting a normal scan.
1213	 */
1214
 
 
 
 
 
1215	if (iwl_mvm_num_scans(mvm) < mvm->max_scans)
1216		return 0;
1217
1218	/* Use a switch, even though this is a bitmask, so that more
1219	 * than one bits set will fall in default and we will warn.
1220	 */
1221	switch (type) {
1222	case IWL_MVM_SCAN_REGULAR:
1223		if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
1224			return -EBUSY;
1225		return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
1226	case IWL_MVM_SCAN_SCHED:
1227		if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
1228			return -EBUSY;
1229		return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
1230	case IWL_MVM_SCAN_NETDETECT:
1231		/* For non-unified images, there's no need to stop
1232		 * anything for net-detect since the firmware is
1233		 * restarted anyway.  This way, any sched scans that
1234		 * were running will be restarted when we resume.
1235		 */
1236		if (!unified_image)
1237			return 0;
1238
1239		/* If this is a unified image and we ran out of scans,
1240		 * we need to stop something.  Prefer stopping regular
1241		 * scans, because the results are useless at this
1242		 * point, and we should be able to keep running
1243		 * another scheduled scan while suspended.
1244		 */
1245		if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
1246			return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR,
1247						 true);
1248		if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
1249			return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED,
1250						 true);
1251
1252		/* fall through, something is wrong if no scan was
1253		 * running but we ran out of scans.
1254		 */
 
1255	default:
1256		WARN_ON(1);
1257		break;
1258	}
1259
1260	return -EIO;
1261}
1262
1263#define SCAN_TIMEOUT 20000
1264
1265void iwl_mvm_scan_timeout_wk(struct work_struct *work)
1266{
1267	struct delayed_work *delayed_work = to_delayed_work(work);
1268	struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm,
1269					   scan_timeout_dwork);
1270
1271	IWL_ERR(mvm, "regular scan timed out\n");
1272
1273	iwl_force_nmi(mvm->trans);
1274}
1275
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1276int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1277			   struct cfg80211_scan_request *req,
1278			   struct ieee80211_scan_ies *ies)
1279{
1280	struct iwl_host_cmd hcmd = {
1281		.len = { iwl_mvm_scan_size(mvm), },
1282		.data = { mvm->scan_cmd, },
1283		.dataflags = { IWL_HCMD_DFL_NOCOPY, },
1284	};
1285	struct iwl_mvm_scan_params params = {};
1286	int ret;
1287	struct cfg80211_sched_scan_plan scan_plan = { .iterations = 1 };
1288
1289	lockdep_assert_held(&mvm->mutex);
1290
1291	if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
1292		IWL_ERR(mvm, "scan while LAR regdomain is not set\n");
1293		return -EBUSY;
1294	}
1295
1296	ret = iwl_mvm_check_running_scans(mvm, IWL_MVM_SCAN_REGULAR);
1297	if (ret)
1298		return ret;
1299
1300	/* we should have failed registration if scan_cmd was NULL */
1301	if (WARN_ON(!mvm->scan_cmd))
1302		return -ENOMEM;
1303
1304	if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels))
1305		return -ENOBUFS;
1306
1307	params.n_ssids = req->n_ssids;
1308	params.flags = req->flags;
1309	params.n_channels = req->n_channels;
1310	params.delay = 0;
1311	params.ssids = req->ssids;
1312	params.channels = req->channels;
1313	params.mac_addr = req->mac_addr;
1314	params.mac_addr_mask = req->mac_addr_mask;
1315	params.no_cck = req->no_cck;
1316	params.pass_all = true;
1317	params.n_match_sets = 0;
1318	params.match_sets = NULL;
 
1319
1320	params.scan_plans = &scan_plan;
1321	params.n_scan_plans = 1;
1322
1323	params.type =
1324		iwl_mvm_get_scan_type(mvm,
1325				      vif->type == NL80211_IFTYPE_P2P_DEVICE);
 
 
 
 
 
 
 
 
 
 
 
 
 
1326
1327	ret = iwl_mvm_get_measurement_dwell(mvm, req, &params);
1328	if (ret < 0)
1329		return ret;
1330
1331	params.measurement_dwell = ret;
1332
1333	iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
 
1334
1335	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1336		hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
1337		ret = iwl_mvm_scan_umac(mvm, vif, &params,
1338					IWL_MVM_SCAN_REGULAR);
1339	} else {
1340		hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
1341		ret = iwl_mvm_scan_lmac(mvm, vif, &params);
1342	}
1343
1344	if (ret)
1345		return ret;
1346
1347	ret = iwl_mvm_send_cmd(mvm, &hcmd);
1348	if (ret) {
1349		/* If the scan failed, it usually means that the FW was unable
1350		 * to allocate the time events. Warn on it, but maybe we
1351		 * should try to send the command again with different params.
1352		 */
1353		IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
 
 
1354		return ret;
1355	}
1356
1357	IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
1358	mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
1359	mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif);
1360	iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
1361
1362	queue_delayed_work(system_wq, &mvm->scan_timeout_dwork,
1363			   msecs_to_jiffies(SCAN_TIMEOUT));
 
 
 
1364
1365	return 0;
1366}
1367
1368int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
1369			     struct ieee80211_vif *vif,
1370			     struct cfg80211_sched_scan_request *req,
1371			     struct ieee80211_scan_ies *ies,
1372			     int type)
1373{
1374	struct iwl_host_cmd hcmd = {
1375		.len = { iwl_mvm_scan_size(mvm), },
1376		.data = { mvm->scan_cmd, },
1377		.dataflags = { IWL_HCMD_DFL_NOCOPY, },
1378	};
1379	struct iwl_mvm_scan_params params = {};
1380	int ret;
 
 
1381
1382	lockdep_assert_held(&mvm->mutex);
1383
1384	if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
1385		IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n");
1386		return -EBUSY;
1387	}
1388
1389	ret = iwl_mvm_check_running_scans(mvm, type);
1390	if (ret)
1391		return ret;
1392
1393	/* we should have failed registration if scan_cmd was NULL */
1394	if (WARN_ON(!mvm->scan_cmd))
1395		return -ENOMEM;
1396
1397	if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels))
1398		return -ENOBUFS;
1399
1400	params.n_ssids = req->n_ssids;
1401	params.flags = req->flags;
1402	params.n_channels = req->n_channels;
1403	params.ssids = req->ssids;
1404	params.channels = req->channels;
1405	params.mac_addr = req->mac_addr;
1406	params.mac_addr_mask = req->mac_addr_mask;
1407	params.no_cck = false;
1408	params.pass_all =  iwl_mvm_scan_pass_all(mvm, req);
1409	params.n_match_sets = req->n_match_sets;
1410	params.match_sets = req->match_sets;
 
1411	if (!req->n_scan_plans)
1412		return -EINVAL;
1413
1414	params.n_scan_plans = req->n_scan_plans;
1415	params.scan_plans = req->scan_plans;
1416
1417	params.type =
1418		iwl_mvm_get_scan_type(mvm,
1419				      vif->type == NL80211_IFTYPE_P2P_DEVICE);
1420
1421	/* In theory, LMAC scans can handle a 32-bit delay, but since
1422	 * waiting for over 18 hours to start the scan is a bit silly
1423	 * and to keep it aligned with UMAC scans (which only support
1424	 * 16-bit delays), trim it down to 16-bits.
1425	 */
1426	if (req->delay > U16_MAX) {
1427		IWL_DEBUG_SCAN(mvm,
1428			       "delay value is > 16-bits, set to max possible\n");
1429		params.delay = U16_MAX;
1430	} else {
1431		params.delay = req->delay;
1432	}
1433
1434	ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
1435	if (ret)
1436		return ret;
1437
1438	iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
1439
1440	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1441		hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
1442		ret = iwl_mvm_scan_umac(mvm, vif, &params, type);
1443	} else {
1444		hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
1445		ret = iwl_mvm_scan_lmac(mvm, vif, &params);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1446	}
1447
1448	if (ret)
1449		return ret;
 
 
 
 
1450
1451	ret = iwl_mvm_send_cmd(mvm, &hcmd);
1452	if (!ret) {
1453		IWL_DEBUG_SCAN(mvm,
1454			       "Sched scan request was sent successfully\n");
1455		mvm->scan_status |= type;
1456	} else {
1457		/* If the scan failed, it usually means that the FW was unable
1458		 * to allocate the time events. Warn on it, but maybe we
1459		 * should try to send the command again with different params.
1460		 */
1461		IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
 
 
1462	}
1463
1464	return ret;
1465}
1466
1467void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
1468					 struct iwl_rx_cmd_buffer *rxb)
1469{
1470	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1471	struct iwl_umac_scan_complete *notif = (void *)pkt->data;
1472	u32 uid = __le32_to_cpu(notif->uid);
1473	bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
1474
 
 
1475	if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status)))
1476		return;
1477
1478	/* if the scan is already stopping, we don't need to notify mac80211 */
1479	if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
1480		struct cfg80211_scan_info info = {
1481			.aborted = aborted,
1482			.scan_start_tsf = mvm->scan_start,
1483		};
 
 
 
 
 
 
1484
1485		memcpy(info.tsf_bssid, mvm->scan_vif->bssid, ETH_ALEN);
1486		ieee80211_scan_completed(mvm->hw, &info);
1487		mvm->scan_vif = NULL;
1488		iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1489		cancel_delayed_work(&mvm->scan_timeout_dwork);
 
1490	} else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
1491		ieee80211_sched_scan_stopped(mvm->hw);
1492		mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
1493	}
1494
1495	mvm->scan_status &= ~mvm->scan_uid_status[uid];
1496	IWL_DEBUG_SCAN(mvm,
1497		       "Scan completed, uid %u type %u, status %s, EBS status %s\n",
1498		       uid, mvm->scan_uid_status[uid],
1499		       notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
1500				"completed" : "aborted",
1501		       iwl_mvm_ebs_status_str(notif->ebs_status));
1502	IWL_DEBUG_SCAN(mvm,
1503		       "Last line %d, Last iteration %d, Time from last iteration %d\n",
1504		       notif->last_schedule, notif->last_iter,
1505		       __le32_to_cpu(notif->time_from_last_iter));
1506
1507	if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
1508	    notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
1509		mvm->last_ebs_successful = false;
1510
1511	mvm->scan_uid_status[uid] = 0;
1512}
1513
1514void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
1515					      struct iwl_rx_cmd_buffer *rxb)
1516{
1517	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1518	struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
1519	u8 buf[256];
1520
1521	mvm->scan_start = le64_to_cpu(notif->start_tsf);
1522
1523	IWL_DEBUG_SCAN(mvm,
1524		       "UMAC Scan iteration complete: status=0x%x scanned_channels=%d channels list: %s\n",
1525		       notif->status, notif->scanned_channels,
1526		       iwl_mvm_dump_channel_list(notif->results,
1527						 notif->scanned_channels, buf,
1528						 sizeof(buf)));
1529
1530	if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) {
1531		IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n");
1532		ieee80211_sched_scan_results(mvm->hw);
1533		mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
1534	}
1535
1536	IWL_DEBUG_SCAN(mvm,
1537		       "UMAC Scan iteration complete: scan started at %llu (TSF)\n",
1538		       mvm->scan_start);
1539}
1540
1541static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
1542{
1543	struct iwl_umac_scan_abort cmd = {};
1544	int uid, ret;
1545
1546	lockdep_assert_held(&mvm->mutex);
1547
1548	/* We should always get a valid index here, because we already
1549	 * checked that this type of scan was running in the generic
1550	 * code.
1551	 */
1552	uid = iwl_mvm_scan_uid_by_status(mvm, type);
1553	if (WARN_ON_ONCE(uid < 0))
1554		return uid;
1555
1556	cmd.uid = cpu_to_le32(uid);
1557
1558	IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
1559
1560	ret = iwl_mvm_send_cmd_pdu(mvm,
1561				   iwl_cmd_id(SCAN_ABORT_UMAC,
1562					      IWL_ALWAYS_LONG_GROUP, 0),
1563				   0, sizeof(cmd), &cmd);
1564	if (!ret)
1565		mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
1566
1567	return ret;
1568}
1569
1570static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
1571{
1572	struct iwl_notification_wait wait_scan_done;
1573	static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
1574					      SCAN_OFFLOAD_COMPLETE, };
1575	int ret;
1576
1577	lockdep_assert_held(&mvm->mutex);
1578
1579	iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
1580				   scan_done_notif,
1581				   ARRAY_SIZE(scan_done_notif),
1582				   NULL, NULL);
1583
1584	IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type);
1585
1586	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
1587		ret = iwl_mvm_umac_scan_abort(mvm, type);
1588	else
1589		ret = iwl_mvm_lmac_scan_abort(mvm);
1590
1591	if (ret) {
1592		IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type);
1593		iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
1594		return ret;
1595	}
1596
1597	ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1598
1599	return ret;
1600}
1601
1602int iwl_mvm_scan_size(struct iwl_mvm *mvm)
1603{
1604	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
1605		return sizeof(struct iwl_scan_req_umac) +
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1606			sizeof(struct iwl_scan_channel_cfg_umac) *
1607				mvm->fw->ucode_capa.n_scan_channels +
1608			sizeof(struct iwl_scan_req_umac_tail);
1609
1610	return sizeof(struct iwl_scan_req_lmac) +
1611		sizeof(struct iwl_scan_channel_cfg_lmac) *
1612		mvm->fw->ucode_capa.n_scan_channels +
1613		sizeof(struct iwl_scan_probe_req);
1614}
1615
1616/*
1617 * This function is used in nic restart flow, to inform mac80211 about scans
1618 * that was aborted by restart flow or by an assert.
1619 */
1620void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
1621{
1622	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1623		int uid, i;
1624
1625		uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_REGULAR);
1626		if (uid >= 0) {
1627			struct cfg80211_scan_info info = {
1628				.aborted = true,
1629			};
1630
 
 
1631			ieee80211_scan_completed(mvm->hw, &info);
1632			mvm->scan_uid_status[uid] = 0;
1633		}
1634		uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED);
1635		if (uid >= 0 && !mvm->restart_fw) {
1636			ieee80211_sched_scan_stopped(mvm->hw);
 
 
 
 
 
1637			mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
1638			mvm->scan_uid_status[uid] = 0;
1639		}
 
 
 
 
 
 
 
 
 
1640
1641		/* We shouldn't have any UIDs still set.  Loop over all the
1642		 * UIDs to make sure there's nothing left there and warn if
1643		 * any is found.
1644		 */
1645		for (i = 0; i < mvm->max_scans; i++) {
1646			if (WARN_ONCE(mvm->scan_uid_status[i],
1647				      "UMAC scan UID %d status was not cleaned\n",
1648				      i))
1649				mvm->scan_uid_status[i] = 0;
1650		}
1651	} else {
1652		if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
1653			struct cfg80211_scan_info info = {
1654				.aborted = true,
1655			};
1656
 
1657			ieee80211_scan_completed(mvm->hw, &info);
1658		}
1659
1660		/* Sched scan will be restarted by mac80211 in
1661		 * restart_hw, so do not report if FW is about to be
1662		 * restarted.
1663		 */
1664		if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) &&
1665		    !mvm->restart_fw) {
1666			ieee80211_sched_scan_stopped(mvm->hw);
1667			mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
1668		}
1669	}
1670}
1671
1672int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify)
1673{
1674	int ret;
1675
1676	if (!(mvm->scan_status & type))
1677		return 0;
1678
1679	if (iwl_mvm_is_radio_killed(mvm)) {
1680		ret = 0;
1681		goto out;
1682	}
1683
1684	ret = iwl_mvm_scan_stop_wait(mvm, type);
1685	if (!ret)
1686		mvm->scan_status |= type << IWL_MVM_SCAN_STOPPING_SHIFT;
1687out:
1688	/* Clear the scan status so the next scan requests will
1689	 * succeed and mark the scan as stopping, so that the Rx
1690	 * handler doesn't do anything, as the scan was stopped from
1691	 * above.
1692	 */
1693	mvm->scan_status &= ~type;
1694
1695	if (type == IWL_MVM_SCAN_REGULAR) {
1696		/* Since the rx handler won't do anything now, we have
1697		 * to release the scan reference here.
1698		 */
1699		iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1700		cancel_delayed_work(&mvm->scan_timeout_dwork);
1701		if (notify) {
1702			struct cfg80211_scan_info info = {
1703				.aborted = true,
1704			};
1705
1706			ieee80211_scan_completed(mvm->hw, &info);
1707		}
1708	} else if (notify) {
1709		ieee80211_sched_scan_stopped(mvm->hw);
1710		mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
1711	}
1712
1713	return ret;
1714}