Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
   2/*
   3 * Copyright (C) 2015-2017 Intel Deutschland GmbH
   4 * Copyright (C) 2018-2022 Intel Corporation
   5 */
   6#include <linux/etherdevice.h>
   7#include <linux/math64.h>
   8#include <net/cfg80211.h>
   9#include "mvm.h"
  10#include "iwl-io.h"
  11#include "iwl-prph.h"
  12#include "constants.h"
  13
  14struct iwl_mvm_loc_entry {
  15	struct list_head list;
  16	u8 addr[ETH_ALEN];
  17	u8 lci_len, civic_len;
  18	u8 buf[];
  19};
  20
  21struct iwl_mvm_smooth_entry {
  22	struct list_head list;
  23	u8 addr[ETH_ALEN];
  24	s64 rtt_avg;
  25	u64 host_time;
  26};
  27
  28enum iwl_mvm_pasn_flags {
  29	IWL_MVM_PASN_FLAG_HAS_HLTK = BIT(0),
  30};
  31
  32struct iwl_mvm_ftm_pasn_entry {
  33	struct list_head list;
  34	u8 addr[ETH_ALEN];
  35	u8 hltk[HLTK_11AZ_LEN];
  36	u8 tk[TK_11AZ_LEN];
  37	u8 cipher;
  38	u8 tx_pn[IEEE80211_CCMP_PN_LEN];
  39	u8 rx_pn[IEEE80211_CCMP_PN_LEN];
  40	u32 flags;
  41};
  42
  43int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
  44			     u8 *addr, u32 cipher, u8 *tk, u32 tk_len,
  45			     u8 *hltk, u32 hltk_len)
  46{
  47	struct iwl_mvm_ftm_pasn_entry *pasn = kzalloc(sizeof(*pasn),
  48						      GFP_KERNEL);
  49	u32 expected_tk_len;
  50
  51	lockdep_assert_held(&mvm->mutex);
  52
  53	if (!pasn)
  54		return -ENOBUFS;
  55
 
 
  56	pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher);
  57
  58	switch (pasn->cipher) {
  59	case IWL_LOCATION_CIPHER_CCMP_128:
  60	case IWL_LOCATION_CIPHER_GCMP_128:
  61		expected_tk_len = WLAN_KEY_LEN_CCMP;
  62		break;
  63	case IWL_LOCATION_CIPHER_GCMP_256:
  64		expected_tk_len = WLAN_KEY_LEN_GCMP_256;
  65		break;
  66	default:
  67		goto out;
  68	}
  69
  70	/*
  71	 * If associated to this AP and already have security context,
  72	 * the TK is already configured for this station, so it
  73	 * shouldn't be set again here.
  74	 */
  75	if (vif->cfg.assoc) {
  76		struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  77		struct ieee80211_bss_conf *link_conf;
  78		unsigned int link_id;
  79		struct ieee80211_sta *sta;
  80		u8 sta_id;
  81
  82		rcu_read_lock();
  83		for_each_vif_active_link(vif, link_conf, link_id) {
  84			if (memcmp(addr, link_conf->bssid, ETH_ALEN))
  85				continue;
  86
  87			sta_id = mvmvif->link[link_id]->ap_sta_id;
  88			sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
  89			if (!IS_ERR_OR_NULL(sta) && sta->mfp)
  90				expected_tk_len = 0;
  91			break;
  92		}
  93		rcu_read_unlock();
  94	}
  95
  96	if (tk_len != expected_tk_len ||
  97	    (hltk_len && hltk_len != sizeof(pasn->hltk))) {
  98		IWL_ERR(mvm, "Invalid key length: tk_len=%u hltk_len=%u\n",
  99			tk_len, hltk_len);
 100		goto out;
 101	}
 102
 103	if (!expected_tk_len && !hltk_len) {
 104		IWL_ERR(mvm, "TK and HLTK not set\n");
 105		goto out;
 106	}
 107
 108	memcpy(pasn->addr, addr, sizeof(pasn->addr));
 109
 110	if (hltk_len) {
 111		memcpy(pasn->hltk, hltk, sizeof(pasn->hltk));
 112		pasn->flags |= IWL_MVM_PASN_FLAG_HAS_HLTK;
 113	}
 114
 115	if (tk && tk_len)
 116		memcpy(pasn->tk, tk, sizeof(pasn->tk));
 117
 118	list_add_tail(&pasn->list, &mvm->ftm_initiator.pasn_list);
 119	return 0;
 120out:
 121	kfree(pasn);
 122	return -EINVAL;
 123}
 124
 125void iwl_mvm_ftm_remove_pasn_sta(struct iwl_mvm *mvm, u8 *addr)
 126{
 127	struct iwl_mvm_ftm_pasn_entry *entry, *prev;
 128
 129	lockdep_assert_held(&mvm->mutex);
 130
 131	list_for_each_entry_safe(entry, prev, &mvm->ftm_initiator.pasn_list,
 132				 list) {
 133		if (memcmp(entry->addr, addr, sizeof(entry->addr)))
 134			continue;
 135
 136		list_del(&entry->list);
 137		kfree(entry);
 138		return;
 139	}
 140}
 141
 142static void iwl_mvm_ftm_reset(struct iwl_mvm *mvm)
 143{
 144	struct iwl_mvm_loc_entry *e, *t;
 145
 146	mvm->ftm_initiator.req = NULL;
 147	mvm->ftm_initiator.req_wdev = NULL;
 148	memset(mvm->ftm_initiator.responses, 0,
 149	       sizeof(mvm->ftm_initiator.responses));
 150
 151	list_for_each_entry_safe(e, t, &mvm->ftm_initiator.loc_list, list) {
 152		list_del(&e->list);
 153		kfree(e);
 154	}
 155}
 156
 157void iwl_mvm_ftm_restart(struct iwl_mvm *mvm)
 158{
 159	struct cfg80211_pmsr_result result = {
 160		.status = NL80211_PMSR_STATUS_FAILURE,
 161		.final = 1,
 162		.host_time = ktime_get_boottime_ns(),
 163		.type = NL80211_PMSR_TYPE_FTM,
 164	};
 165	int i;
 166
 167	lockdep_assert_held(&mvm->mutex);
 168
 169	if (!mvm->ftm_initiator.req)
 170		return;
 171
 172	for (i = 0; i < mvm->ftm_initiator.req->n_peers; i++) {
 173		memcpy(result.addr, mvm->ftm_initiator.req->peers[i].addr,
 174		       ETH_ALEN);
 175		result.ftm.burst_index = mvm->ftm_initiator.responses[i];
 176
 177		cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev,
 178				     mvm->ftm_initiator.req,
 179				     &result, GFP_KERNEL);
 180	}
 181
 182	cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev,
 183			       mvm->ftm_initiator.req, GFP_KERNEL);
 184	iwl_mvm_ftm_reset(mvm);
 185}
 186
 187void iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm *mvm)
 188{
 189	INIT_LIST_HEAD(&mvm->ftm_initiator.smooth.resp);
 190
 191	IWL_DEBUG_INFO(mvm,
 192		       "enable=%u, alpha=%u, age_jiffies=%u, thresh=(%u:%u)\n",
 193			IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH,
 194			IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA,
 195			IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * HZ,
 196			IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT,
 197			IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT);
 198}
 199
 200void iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm *mvm)
 201{
 202	struct iwl_mvm_smooth_entry *se, *st;
 203
 204	list_for_each_entry_safe(se, st, &mvm->ftm_initiator.smooth.resp,
 205				 list) {
 206		list_del(&se->list);
 207		kfree(se);
 208	}
 209}
 210
 211static int
 212iwl_ftm_range_request_status_to_err(enum iwl_tof_range_request_status s)
 213{
 214	switch (s) {
 215	case IWL_TOF_RANGE_REQUEST_STATUS_SUCCESS:
 216		return 0;
 217	case IWL_TOF_RANGE_REQUEST_STATUS_BUSY:
 218		return -EBUSY;
 219	default:
 220		WARN_ON_ONCE(1);
 221		return -EIO;
 222	}
 223}
 224
 225static void iwl_mvm_ftm_cmd_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 226			       struct iwl_tof_range_req_cmd_v5 *cmd,
 227			       struct cfg80211_pmsr_request *req)
 228{
 229	int i;
 230
 231	cmd->request_id = req->cookie;
 232	cmd->num_of_ap = req->n_peers;
 233
 234	/* use maximum for "no timeout" or bigger than what we can do */
 235	if (!req->timeout || req->timeout > 255 * 100)
 236		cmd->req_timeout = 255;
 237	else
 238		cmd->req_timeout = DIV_ROUND_UP(req->timeout, 100);
 239
 240	/*
 241	 * We treat it always as random, since if not we'll
 242	 * have filled our local address there instead.
 243	 */
 244	cmd->macaddr_random = 1;
 245	memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN);
 246	for (i = 0; i < ETH_ALEN; i++)
 247		cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
 248
 249	if (vif->cfg.assoc)
 250		memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN);
 251	else
 252		eth_broadcast_addr(cmd->range_req_bssid);
 253}
 254
 255static void iwl_mvm_ftm_cmd_common(struct iwl_mvm *mvm,
 256				   struct ieee80211_vif *vif,
 257				   struct iwl_tof_range_req_cmd_v9 *cmd,
 258				   struct cfg80211_pmsr_request *req)
 259{
 260	int i;
 261
 262	cmd->initiator_flags =
 263		cpu_to_le32(IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM |
 264			    IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT);
 265	cmd->request_id = req->cookie;
 266	cmd->num_of_ap = req->n_peers;
 267
 268	/*
 269	 * Use a large value for "no timeout". Don't use the maximum value
 270	 * because of fw limitations.
 271	 */
 272	if (req->timeout)
 273		cmd->req_timeout_ms = cpu_to_le32(req->timeout);
 274	else
 275		cmd->req_timeout_ms = cpu_to_le32(0xfffff);
 276
 277	memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN);
 278	for (i = 0; i < ETH_ALEN; i++)
 279		cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
 280
 281	if (vif->cfg.assoc) {
 282		memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN);
 283
 284		/* AP's TSF is only relevant if associated */
 285		for (i = 0; i < req->n_peers; i++) {
 286			if (req->peers[i].report_ap_tsf) {
 287				struct iwl_mvm_vif *mvmvif =
 288					iwl_mvm_vif_from_mac80211(vif);
 289
 290				cmd->tsf_mac_id = cpu_to_le32(mvmvif->id);
 291				return;
 292			}
 293		}
 294	} else {
 295		eth_broadcast_addr(cmd->range_req_bssid);
 296	}
 297
 298	/* Don't report AP's TSF */
 299	cmd->tsf_mac_id = cpu_to_le32(0xff);
 300}
 301
 302static void iwl_mvm_ftm_cmd_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 303			       struct iwl_tof_range_req_cmd_v8 *cmd,
 304			       struct cfg80211_pmsr_request *req)
 305{
 306	iwl_mvm_ftm_cmd_common(mvm, vif, (void *)cmd, req);
 307}
 308
 309static int
 310iwl_mvm_ftm_target_chandef_v1(struct iwl_mvm *mvm,
 311			      struct cfg80211_pmsr_request_peer *peer,
 312			      u8 *channel, u8 *bandwidth,
 313			      u8 *ctrl_ch_position)
 314{
 315	u32 freq = peer->chandef.chan->center_freq;
 316
 317	*channel = ieee80211_frequency_to_channel(freq);
 318
 319	switch (peer->chandef.width) {
 320	case NL80211_CHAN_WIDTH_20_NOHT:
 321		*bandwidth = IWL_TOF_BW_20_LEGACY;
 322		break;
 323	case NL80211_CHAN_WIDTH_20:
 324		*bandwidth = IWL_TOF_BW_20_HT;
 325		break;
 326	case NL80211_CHAN_WIDTH_40:
 327		*bandwidth = IWL_TOF_BW_40;
 328		break;
 329	case NL80211_CHAN_WIDTH_80:
 330		*bandwidth = IWL_TOF_BW_80;
 331		break;
 332	default:
 333		IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n",
 334			peer->chandef.width);
 335		return -EINVAL;
 336	}
 337
 338	*ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ?
 339		iwl_mvm_get_ctrl_pos(&peer->chandef) : 0;
 340
 341	return 0;
 342}
 343
 344static int
 345iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm,
 346			      struct cfg80211_pmsr_request_peer *peer,
 347			      u8 *channel, u8 *format_bw,
 348			      u8 *ctrl_ch_position)
 349{
 350	u32 freq = peer->chandef.chan->center_freq;
 351	u8 cmd_ver;
 352
 353	*channel = ieee80211_frequency_to_channel(freq);
 354
 355	switch (peer->chandef.width) {
 356	case NL80211_CHAN_WIDTH_20_NOHT:
 357		*format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY;
 358		*format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
 359		break;
 360	case NL80211_CHAN_WIDTH_20:
 361		*format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
 362		*format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
 363		break;
 364	case NL80211_CHAN_WIDTH_40:
 365		*format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
 366		*format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS;
 367		break;
 368	case NL80211_CHAN_WIDTH_80:
 369		*format_bw = IWL_LOCATION_FRAME_FORMAT_VHT;
 370		*format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS;
 371		break;
 372	case NL80211_CHAN_WIDTH_160:
 373		cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
 374						WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
 375						IWL_FW_CMD_VER_UNKNOWN);
 376
 377		if (cmd_ver >= 13) {
 378			*format_bw = IWL_LOCATION_FRAME_FORMAT_HE;
 379			*format_bw |= IWL_LOCATION_BW_160MHZ << LOCATION_BW_POS;
 380			break;
 381		}
 382		fallthrough;
 383	default:
 384		IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n",
 385			peer->chandef.width);
 386		return -EINVAL;
 387	}
 388
 389	/* non EDCA based measurement must use HE preamble */
 390	if (peer->ftm.trigger_based || peer->ftm.non_trigger_based)
 391		*format_bw |= IWL_LOCATION_FRAME_FORMAT_HE;
 392
 393	*ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ?
 394		iwl_mvm_get_ctrl_pos(&peer->chandef) : 0;
 395
 396	return 0;
 397}
 398
 399static int
 400iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm,
 401			  struct cfg80211_pmsr_request_peer *peer,
 402			  struct iwl_tof_range_req_ap_entry_v2 *target)
 403{
 404	int ret;
 405
 406	ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num,
 407					    &target->bandwidth,
 408					    &target->ctrl_ch_position);
 409	if (ret)
 410		return ret;
 411
 412	memcpy(target->bssid, peer->addr, ETH_ALEN);
 413	target->burst_period =
 414		cpu_to_le16(peer->ftm.burst_period);
 415	target->samples_per_burst = peer->ftm.ftms_per_burst;
 416	target->num_of_bursts = peer->ftm.num_bursts_exp;
 417	target->measure_type = 0; /* regular two-sided FTM */
 418	target->retries_per_sample = peer->ftm.ftmr_retries;
 419	target->asap_mode = peer->ftm.asap;
 420	target->enable_dyn_ack = IWL_MVM_FTM_INITIATOR_DYNACK;
 421
 422	if (peer->ftm.request_lci)
 423		target->location_req |= IWL_TOF_LOC_LCI;
 424	if (peer->ftm.request_civicloc)
 425		target->location_req |= IWL_TOF_LOC_CIVIC;
 426
 427	target->algo_type = IWL_MVM_FTM_INITIATOR_ALGO;
 428
 429	return 0;
 430}
 431
 432#define FTM_PUT_FLAG(flag)	(target->initiator_ap_flags |= \
 433				 cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag))
 434
 435static void
 436iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm,
 437			      struct cfg80211_pmsr_request_peer *peer,
 438			      struct iwl_tof_range_req_ap_entry_v6 *target)
 439{
 440	memcpy(target->bssid, peer->addr, ETH_ALEN);
 441	target->burst_period =
 442		cpu_to_le16(peer->ftm.burst_period);
 443	target->samples_per_burst = peer->ftm.ftms_per_burst;
 444	target->num_of_bursts = peer->ftm.num_bursts_exp;
 445	target->ftmr_max_retries = peer->ftm.ftmr_retries;
 446	target->initiator_ap_flags = cpu_to_le32(0);
 447
 448	if (peer->ftm.asap)
 449		FTM_PUT_FLAG(ASAP);
 450
 451	if (peer->ftm.request_lci)
 452		FTM_PUT_FLAG(LCI_REQUEST);
 453
 454	if (peer->ftm.request_civicloc)
 455		FTM_PUT_FLAG(CIVIC_REQUEST);
 456
 457	if (IWL_MVM_FTM_INITIATOR_DYNACK)
 458		FTM_PUT_FLAG(DYN_ACK);
 459
 460	if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG)
 461		FTM_PUT_FLAG(ALGO_LR);
 462	else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT)
 463		FTM_PUT_FLAG(ALGO_FFT);
 464
 465	if (peer->ftm.trigger_based)
 466		FTM_PUT_FLAG(TB);
 467	else if (peer->ftm.non_trigger_based)
 468		FTM_PUT_FLAG(NON_TB);
 469
 470	if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) &&
 471	    peer->ftm.lmr_feedback)
 472		FTM_PUT_FLAG(LMR_FEEDBACK);
 473}
 474
 475static int
 476iwl_mvm_ftm_put_target_v3(struct iwl_mvm *mvm,
 477			  struct cfg80211_pmsr_request_peer *peer,
 478			  struct iwl_tof_range_req_ap_entry_v3 *target)
 479{
 480	int ret;
 481
 482	ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num,
 483					    &target->bandwidth,
 484					    &target->ctrl_ch_position);
 485	if (ret)
 486		return ret;
 487
 488	/*
 489	 * Versions 3 and 4 has some common fields, so
 490	 * iwl_mvm_ftm_put_target_common() can be used for version 7 too.
 491	 */
 492	iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target);
 493
 494	return 0;
 495}
 496
 497static int
 498iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm,
 499			  struct cfg80211_pmsr_request_peer *peer,
 500			  struct iwl_tof_range_req_ap_entry_v4 *target)
 501{
 502	int ret;
 503
 504	ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
 505					    &target->format_bw,
 506					    &target->ctrl_ch_position);
 507	if (ret)
 508		return ret;
 509
 510	iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target);
 511
 512	return 0;
 513}
 514
 515static int
 516iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 517		       struct cfg80211_pmsr_request_peer *peer,
 518		       struct iwl_tof_range_req_ap_entry_v6 *target)
 519{
 520	int ret;
 521
 522	ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
 523					    &target->format_bw,
 524					    &target->ctrl_ch_position);
 525	if (ret)
 526		return ret;
 527
 528	iwl_mvm_ftm_put_target_common(mvm, peer, target);
 529
 530	if (vif->cfg.assoc) {
 531		struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 532		struct ieee80211_sta *sta;
 533		struct ieee80211_bss_conf *link_conf;
 534		unsigned int link_id;
 535
 536		rcu_read_lock();
 537		for_each_vif_active_link(vif, link_conf, link_id) {
 538			if (memcmp(peer->addr, link_conf->bssid, ETH_ALEN))
 539				continue;
 540
 541			target->sta_id = mvmvif->link[link_id]->ap_sta_id;
 542			sta = rcu_dereference(mvm->fw_id_to_mac_id[target->sta_id]);
 543			if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
 544				rcu_read_unlock();
 545				return PTR_ERR_OR_ZERO(sta);
 546			}
 547
 548			if (sta->mfp && (peer->ftm.trigger_based ||
 549					 peer->ftm.non_trigger_based))
 550				FTM_PUT_FLAG(PMF);
 551			break;
 552		}
 553		rcu_read_unlock();
 554	} else {
 555		target->sta_id = IWL_MVM_INVALID_STA;
 556	}
 557
 558	/*
 559	 * TODO: Beacon interval is currently unknown, so use the common value
 560	 * of 100 TUs.
 561	 */
 562	target->beacon_interval = cpu_to_le16(100);
 563	return 0;
 564}
 565
 566static int iwl_mvm_ftm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *hcmd)
 567{
 568	u32 status;
 569	int err = iwl_mvm_send_cmd_status(mvm, hcmd, &status);
 570
 571	if (!err && status) {
 572		IWL_ERR(mvm, "FTM range request command failure, status: %u\n",
 573			status);
 574		err = iwl_ftm_range_request_status_to_err(status);
 575	}
 576
 577	return err;
 578}
 579
 580static int iwl_mvm_ftm_start_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 581				struct cfg80211_pmsr_request *req)
 582{
 583	struct iwl_tof_range_req_cmd_v5 cmd_v5;
 584	struct iwl_host_cmd hcmd = {
 585		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
 586		.dataflags[0] = IWL_HCMD_DFL_DUP,
 587		.data[0] = &cmd_v5,
 588		.len[0] = sizeof(cmd_v5),
 589	};
 590	u8 i;
 591	int err;
 592
 593	iwl_mvm_ftm_cmd_v5(mvm, vif, &cmd_v5, req);
 594
 595	for (i = 0; i < cmd_v5.num_of_ap; i++) {
 596		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
 597
 598		err = iwl_mvm_ftm_put_target_v2(mvm, peer, &cmd_v5.ap[i]);
 599		if (err)
 600			return err;
 601	}
 602
 603	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
 604}
 605
 606static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 607				struct cfg80211_pmsr_request *req)
 608{
 609	struct iwl_tof_range_req_cmd_v7 cmd_v7;
 610	struct iwl_host_cmd hcmd = {
 611		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
 612		.dataflags[0] = IWL_HCMD_DFL_DUP,
 613		.data[0] = &cmd_v7,
 614		.len[0] = sizeof(cmd_v7),
 615	};
 616	u8 i;
 617	int err;
 618
 619	/*
 620	 * Versions 7 and 8 has the same structure except from the responders
 621	 * list, so iwl_mvm_ftm_cmd() can be used for version 7 too.
 622	 */
 623	iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd_v7, req);
 624
 625	for (i = 0; i < cmd_v7.num_of_ap; i++) {
 626		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
 627
 628		err = iwl_mvm_ftm_put_target_v3(mvm, peer, &cmd_v7.ap[i]);
 629		if (err)
 630			return err;
 631	}
 632
 633	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
 634}
 635
 636static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 637				struct cfg80211_pmsr_request *req)
 638{
 639	struct iwl_tof_range_req_cmd_v8 cmd;
 640	struct iwl_host_cmd hcmd = {
 641		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
 642		.dataflags[0] = IWL_HCMD_DFL_DUP,
 643		.data[0] = &cmd,
 644		.len[0] = sizeof(cmd),
 645	};
 646	u8 i;
 647	int err;
 648
 649	iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd, req);
 650
 651	for (i = 0; i < cmd.num_of_ap; i++) {
 652		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
 653
 654		err = iwl_mvm_ftm_put_target_v4(mvm, peer, &cmd.ap[i]);
 655		if (err)
 656			return err;
 657	}
 658
 659	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
 660}
 661
 662static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 663				struct cfg80211_pmsr_request *req)
 664{
 665	struct iwl_tof_range_req_cmd_v9 cmd;
 666	struct iwl_host_cmd hcmd = {
 667		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
 668		.dataflags[0] = IWL_HCMD_DFL_DUP,
 669		.data[0] = &cmd,
 670		.len[0] = sizeof(cmd),
 671	};
 672	u8 i;
 673	int err;
 674
 675	iwl_mvm_ftm_cmd_common(mvm, vif, &cmd, req);
 676
 677	for (i = 0; i < cmd.num_of_ap; i++) {
 678		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
 679		struct iwl_tof_range_req_ap_entry_v6 *target = &cmd.ap[i];
 680
 681		err = iwl_mvm_ftm_put_target(mvm, vif, peer, target);
 682		if (err)
 683			return err;
 684	}
 685
 686	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
 687}
 688
 689static void iter(struct ieee80211_hw *hw,
 690		 struct ieee80211_vif *vif,
 691		 struct ieee80211_sta *sta,
 692		 struct ieee80211_key_conf *key,
 693		 void *data)
 694{
 695	struct iwl_tof_range_req_ap_entry_v6 *target = data;
 696
 697	if (!sta || memcmp(sta->addr, target->bssid, ETH_ALEN))
 698		return;
 699
 700	WARN_ON(!sta->mfp);
 701
 702	if (WARN_ON(key->keylen > sizeof(target->tk)))
 703		return;
 704
 705	memcpy(target->tk, key->key, key->keylen);
 706	target->cipher = iwl_mvm_cipher_to_location_cipher(key->cipher);
 707	WARN_ON(target->cipher == IWL_LOCATION_CIPHER_INVALID);
 708}
 709
 710static void
 711iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 712				struct iwl_tof_range_req_ap_entry_v7 *target)
 713{
 714	struct iwl_mvm_ftm_pasn_entry *entry;
 715	u32 flags = le32_to_cpu(target->initiator_ap_flags);
 716
 717	if (!(flags & (IWL_INITIATOR_AP_FLAGS_NON_TB |
 718		       IWL_INITIATOR_AP_FLAGS_TB)))
 719		return;
 720
 721	lockdep_assert_held(&mvm->mutex);
 722
 723	list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) {
 724		if (memcmp(entry->addr, target->bssid, sizeof(entry->addr)))
 725			continue;
 726
 727		target->cipher = entry->cipher;
 728
 729		if (entry->flags & IWL_MVM_PASN_FLAG_HAS_HLTK)
 730			memcpy(target->hltk, entry->hltk, sizeof(target->hltk));
 731		else
 732			memset(target->hltk, 0, sizeof(target->hltk));
 733
 734		if (vif->cfg.assoc &&
 735		    !memcmp(vif->bss_conf.bssid, target->bssid,
 736			    sizeof(target->bssid)))
 737			ieee80211_iter_keys(mvm->hw, vif, iter, target);
 738		else
 739			memcpy(target->tk, entry->tk, sizeof(target->tk));
 740
 741		memcpy(target->rx_pn, entry->rx_pn, sizeof(target->rx_pn));
 742		memcpy(target->tx_pn, entry->tx_pn, sizeof(target->tx_pn));
 743
 744		target->initiator_ap_flags |=
 745			cpu_to_le32(IWL_INITIATOR_AP_FLAGS_SECURED);
 746		return;
 747	}
 748}
 749
 750static int
 751iwl_mvm_ftm_put_target_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 752			  struct cfg80211_pmsr_request_peer *peer,
 753			  struct iwl_tof_range_req_ap_entry_v7 *target)
 754{
 755	int err = iwl_mvm_ftm_put_target(mvm, vif, peer, (void *)target);
 756	if (err)
 757		return err;
 758
 759	iwl_mvm_ftm_set_secured_ranging(mvm, vif, target);
 760	return err;
 761}
 762
 763static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm,
 764				 struct ieee80211_vif *vif,
 765				 struct cfg80211_pmsr_request *req)
 766{
 767	struct iwl_tof_range_req_cmd_v11 cmd;
 768	struct iwl_host_cmd hcmd = {
 769		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
 770		.dataflags[0] = IWL_HCMD_DFL_DUP,
 771		.data[0] = &cmd,
 772		.len[0] = sizeof(cmd),
 773	};
 774	u8 i;
 775	int err;
 776
 777	iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
 778
 779	for (i = 0; i < cmd.num_of_ap; i++) {
 780		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
 781		struct iwl_tof_range_req_ap_entry_v7 *target = &cmd.ap[i];
 782
 783		err = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, target);
 784		if (err)
 785			return err;
 786	}
 787
 788	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
 789}
 790
 791static void
 792iwl_mvm_ftm_set_ndp_params(struct iwl_mvm *mvm,
 793			   struct iwl_tof_range_req_ap_entry_v8 *target)
 794{
 795	/* Only 2 STS are supported on Tx */
 796	u32 i2r_max_sts = IWL_MVM_FTM_I2R_MAX_STS > 1 ? 1 :
 797		IWL_MVM_FTM_I2R_MAX_STS;
 798
 799	target->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP |
 800		(IWL_MVM_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS);
 801	target->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP |
 802		(i2r_max_sts << IWL_LOCATION_MAX_STS_POS);
 803	target->r2i_max_total_ltf = IWL_MVM_FTM_R2I_MAX_TOTAL_LTF;
 804	target->i2r_max_total_ltf = IWL_MVM_FTM_I2R_MAX_TOTAL_LTF;
 805}
 806
 807static int
 808iwl_mvm_ftm_put_target_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 809			  struct cfg80211_pmsr_request_peer *peer,
 810			  struct iwl_tof_range_req_ap_entry_v8 *target)
 811{
 812	u32 flags;
 813	int ret = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, (void *)target);
 814
 815	if (ret)
 816		return ret;
 817
 818	iwl_mvm_ftm_set_ndp_params(mvm, target);
 819
 820	/*
 821	 * If secure LTF is turned off, replace the flag with PMF only
 822	 */
 823	flags = le32_to_cpu(target->initiator_ap_flags);
 824	if ((flags & IWL_INITIATOR_AP_FLAGS_SECURED) &&
 825	    !IWL_MVM_FTM_INITIATOR_SECURE_LTF) {
 826		flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED;
 
 827		flags |= IWL_INITIATOR_AP_FLAGS_PMF;
 828		target->initiator_ap_flags = cpu_to_le32(flags);
 829	}
 830
 831	return 0;
 832}
 833
 834static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm,
 835				 struct ieee80211_vif *vif,
 836				 struct cfg80211_pmsr_request *req)
 837{
 838	struct iwl_tof_range_req_cmd_v12 cmd;
 839	struct iwl_host_cmd hcmd = {
 840		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
 841		.dataflags[0] = IWL_HCMD_DFL_DUP,
 842		.data[0] = &cmd,
 843		.len[0] = sizeof(cmd),
 844	};
 845	u8 i;
 846	int err;
 847
 848	iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
 849
 850	for (i = 0; i < cmd.num_of_ap; i++) {
 851		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
 852		struct iwl_tof_range_req_ap_entry_v8 *target = &cmd.ap[i];
 853
 854		err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, target);
 855		if (err)
 856			return err;
 857	}
 858
 859	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
 860}
 861
 862static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm,
 863				 struct ieee80211_vif *vif,
 864				 struct cfg80211_pmsr_request *req)
 865{
 866	struct iwl_tof_range_req_cmd_v13 cmd;
 867	struct iwl_host_cmd hcmd = {
 868		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
 869		.dataflags[0] = IWL_HCMD_DFL_DUP,
 870		.data[0] = &cmd,
 871		.len[0] = sizeof(cmd),
 872	};
 873	u8 i;
 874	int err;
 875
 876	iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
 877
 878	for (i = 0; i < cmd.num_of_ap; i++) {
 879		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
 880		struct iwl_tof_range_req_ap_entry_v9 *target = &cmd.ap[i];
 881
 882		err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, (void *)target);
 883		if (err)
 884			return err;
 885
 886		if (peer->ftm.trigger_based || peer->ftm.non_trigger_based)
 887			target->bss_color = peer->ftm.bss_color;
 888
 889		if (peer->ftm.non_trigger_based) {
 890			target->min_time_between_msr =
 891				cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR);
 892			target->burst_period =
 893				cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR);
 894		} else {
 895			target->min_time_between_msr = cpu_to_le16(0);
 896		}
 897
 898		target->band =
 899			iwl_mvm_phy_band_from_nl80211(peer->chandef.chan->band);
 900	}
 901
 902	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
 903}
 904
 905int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 906		      struct cfg80211_pmsr_request *req)
 907{
 908	bool new_api = fw_has_api(&mvm->fw->ucode_capa,
 909				  IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ);
 910	int err;
 911
 912	lockdep_assert_held(&mvm->mutex);
 913
 914	if (mvm->ftm_initiator.req)
 915		return -EBUSY;
 916
 917	if (new_api) {
 918		u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
 919						   WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
 920						   IWL_FW_CMD_VER_UNKNOWN);
 921
 922		switch (cmd_ver) {
 923		case 13:
 924			err = iwl_mvm_ftm_start_v13(mvm, vif, req);
 925			break;
 926		case 12:
 927			err = iwl_mvm_ftm_start_v12(mvm, vif, req);
 928			break;
 929		case 11:
 930			err = iwl_mvm_ftm_start_v11(mvm, vif, req);
 931			break;
 932		case 9:
 933		case 10:
 934			err = iwl_mvm_ftm_start_v9(mvm, vif, req);
 935			break;
 936		case 8:
 937			err = iwl_mvm_ftm_start_v8(mvm, vif, req);
 938			break;
 939		default:
 940			err = iwl_mvm_ftm_start_v7(mvm, vif, req);
 941			break;
 942		}
 943	} else {
 944		err = iwl_mvm_ftm_start_v5(mvm, vif, req);
 945	}
 946
 947	if (!err) {
 948		mvm->ftm_initiator.req = req;
 949		mvm->ftm_initiator.req_wdev = ieee80211_vif_to_wdev(vif);
 950	}
 951
 952	return err;
 953}
 954
 955void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req)
 956{
 957	struct iwl_tof_range_abort_cmd cmd = {
 958		.request_id = req->cookie,
 959	};
 960
 961	lockdep_assert_held(&mvm->mutex);
 962
 963	if (req != mvm->ftm_initiator.req)
 964		return;
 965
 966	iwl_mvm_ftm_reset(mvm);
 967
 968	if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(LOCATION_GROUP, TOF_RANGE_ABORT_CMD),
 969				 0, sizeof(cmd), &cmd))
 970		IWL_ERR(mvm, "failed to abort FTM process\n");
 971}
 972
 973static int iwl_mvm_ftm_find_peer(struct cfg80211_pmsr_request *req,
 974				 const u8 *addr)
 975{
 976	int i;
 977
 978	for (i = 0; i < req->n_peers; i++) {
 979		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
 980
 981		if (ether_addr_equal_unaligned(peer->addr, addr))
 982			return i;
 983	}
 984
 985	return -ENOENT;
 986}
 987
 988static u64 iwl_mvm_ftm_get_host_time(struct iwl_mvm *mvm, __le32 fw_gp2_ts)
 989{
 990	u32 gp2_ts = le32_to_cpu(fw_gp2_ts);
 991	u32 curr_gp2, diff;
 992	u64 now_from_boot_ns;
 993
 994	iwl_mvm_get_sync_time(mvm, CLOCK_BOOTTIME, &curr_gp2,
 995			      &now_from_boot_ns, NULL);
 996
 997	if (curr_gp2 >= gp2_ts)
 998		diff = curr_gp2 - gp2_ts;
 999	else
1000		diff = curr_gp2 + (U32_MAX - gp2_ts + 1);
1001
1002	return now_from_boot_ns - (u64)diff * 1000;
1003}
1004
1005static void iwl_mvm_ftm_get_lci_civic(struct iwl_mvm *mvm,
1006				      struct cfg80211_pmsr_result *res)
1007{
1008	struct iwl_mvm_loc_entry *entry;
1009
1010	list_for_each_entry(entry, &mvm->ftm_initiator.loc_list, list) {
1011		if (!ether_addr_equal_unaligned(res->addr, entry->addr))
1012			continue;
1013
1014		if (entry->lci_len) {
1015			res->ftm.lci_len = entry->lci_len;
1016			res->ftm.lci = entry->buf;
1017		}
1018
1019		if (entry->civic_len) {
1020			res->ftm.civicloc_len = entry->civic_len;
1021			res->ftm.civicloc = entry->buf + entry->lci_len;
1022		}
1023
1024		/* we found the entry we needed */
1025		break;
1026	}
1027}
1028
1029static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id,
1030					u8 num_of_aps)
1031{
1032	lockdep_assert_held(&mvm->mutex);
1033
1034	if (request_id != (u8)mvm->ftm_initiator.req->cookie) {
1035		IWL_ERR(mvm, "Request ID mismatch, got %u, active %u\n",
1036			request_id, (u8)mvm->ftm_initiator.req->cookie);
1037		return -EINVAL;
1038	}
1039
1040	if (num_of_aps > mvm->ftm_initiator.req->n_peers) {
1041		IWL_ERR(mvm, "FTM range response invalid\n");
1042		return -EINVAL;
1043	}
1044
1045	return 0;
1046}
1047
1048static void iwl_mvm_ftm_rtt_smoothing(struct iwl_mvm *mvm,
1049				      struct cfg80211_pmsr_result *res)
1050{
1051	struct iwl_mvm_smooth_entry *resp = NULL, *iter;
1052	s64 rtt_avg, rtt = res->ftm.rtt_avg;
1053	u32 undershoot, overshoot;
1054	u8 alpha;
1055
1056	if (!IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH)
1057		return;
1058
1059	WARN_ON(rtt < 0);
1060
1061	if (res->status != NL80211_PMSR_STATUS_SUCCESS) {
1062		IWL_DEBUG_INFO(mvm,
1063			       ": %pM: ignore failed measurement. Status=%u\n",
1064			       res->addr, res->status);
1065		return;
1066	}
1067
1068	list_for_each_entry(iter, &mvm->ftm_initiator.smooth.resp, list) {
1069		if (!memcmp(res->addr, iter->addr, ETH_ALEN)) {
1070			resp = iter;
1071			break;
1072		}
1073	}
1074
1075	if (!resp) {
1076		resp = kzalloc(sizeof(*resp), GFP_KERNEL);
1077		if (!resp)
1078			return;
1079
1080		memcpy(resp->addr, res->addr, ETH_ALEN);
1081		list_add_tail(&resp->list, &mvm->ftm_initiator.smooth.resp);
1082
1083		resp->rtt_avg = rtt;
1084
1085		IWL_DEBUG_INFO(mvm, "new: %pM: rtt_avg=%lld\n",
1086			       resp->addr, resp->rtt_avg);
1087		goto update_time;
1088	}
1089
1090	if (res->host_time - resp->host_time >
1091	    IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * 1000000000) {
1092		resp->rtt_avg = rtt;
1093
1094		IWL_DEBUG_INFO(mvm, "expired: %pM: rtt_avg=%lld\n",
1095			       resp->addr, resp->rtt_avg);
1096		goto update_time;
1097	}
1098
1099	/* Smooth the results based on the tracked RTT average */
1100	undershoot = IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT;
1101	overshoot = IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT;
1102	alpha = IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA;
1103
1104	rtt_avg = div_s64(alpha * rtt + (100 - alpha) * resp->rtt_avg, 100);
1105
1106	IWL_DEBUG_INFO(mvm,
1107		       "%pM: prev rtt_avg=%lld, new rtt_avg=%lld, rtt=%lld\n",
1108		       resp->addr, resp->rtt_avg, rtt_avg, rtt);
1109
1110	/*
1111	 * update the responder's average RTT results regardless of
1112	 * the under/over shoot logic below
1113	 */
1114	resp->rtt_avg = rtt_avg;
1115
1116	/* smooth the results */
1117	if (rtt_avg > rtt && (rtt_avg - rtt) > undershoot) {
1118		res->ftm.rtt_avg = rtt_avg;
1119
1120		IWL_DEBUG_INFO(mvm,
1121			       "undershoot: val=%lld\n",
1122			       (rtt_avg - rtt));
1123	} else if (rtt_avg < rtt && (rtt - rtt_avg) >
1124		   overshoot) {
1125		res->ftm.rtt_avg = rtt_avg;
1126		IWL_DEBUG_INFO(mvm,
1127			       "overshoot: val=%lld\n",
1128			       (rtt - rtt_avg));
1129	}
1130
1131update_time:
1132	resp->host_time = res->host_time;
1133}
1134
1135static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index,
1136				     struct cfg80211_pmsr_result *res)
1137{
1138	s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666);
1139
1140	IWL_DEBUG_INFO(mvm, "entry %d\n", index);
1141	IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status);
1142	IWL_DEBUG_INFO(mvm, "\tBSSID: %pM\n", res->addr);
1143	IWL_DEBUG_INFO(mvm, "\thost time: %llu\n", res->host_time);
1144	IWL_DEBUG_INFO(mvm, "\tburst index: %d\n", res->ftm.burst_index);
1145	IWL_DEBUG_INFO(mvm, "\tsuccess num: %u\n", res->ftm.num_ftmr_successes);
1146	IWL_DEBUG_INFO(mvm, "\trssi: %d\n", res->ftm.rssi_avg);
1147	IWL_DEBUG_INFO(mvm, "\trssi spread: %d\n", res->ftm.rssi_spread);
1148	IWL_DEBUG_INFO(mvm, "\trtt: %lld\n", res->ftm.rtt_avg);
1149	IWL_DEBUG_INFO(mvm, "\trtt var: %llu\n", res->ftm.rtt_variance);
1150	IWL_DEBUG_INFO(mvm, "\trtt spread: %llu\n", res->ftm.rtt_spread);
1151	IWL_DEBUG_INFO(mvm, "\tdistance: %lld\n", rtt_avg);
1152}
1153
1154static void
1155iwl_mvm_ftm_pasn_update_pn(struct iwl_mvm *mvm,
1156			   struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap)
1157{
1158	struct iwl_mvm_ftm_pasn_entry *entry;
1159
1160	lockdep_assert_held(&mvm->mutex);
1161
1162	list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) {
1163		if (memcmp(fw_ap->bssid, entry->addr, sizeof(entry->addr)))
1164			continue;
1165
1166		memcpy(entry->rx_pn, fw_ap->rx_pn, sizeof(entry->rx_pn));
1167		memcpy(entry->tx_pn, fw_ap->tx_pn, sizeof(entry->tx_pn));
1168		return;
1169	}
1170}
1171
1172static u8 iwl_mvm_ftm_get_range_resp_ver(struct iwl_mvm *mvm)
1173{
1174	if (!fw_has_api(&mvm->fw->ucode_capa,
1175			IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ))
1176		return 5;
1177
1178	/* Starting from version 8, the FW advertises the version */
1179	if (mvm->cmd_ver.range_resp >= 8)
1180		return mvm->cmd_ver.range_resp;
1181	else if (fw_has_api(&mvm->fw->ucode_capa,
1182			    IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
1183		return 7;
1184
1185	/* The first version of the new range request API */
1186	return 6;
1187}
1188
1189static bool iwl_mvm_ftm_resp_size_validation(u8 ver, unsigned int pkt_len)
1190{
1191	switch (ver) {
1192	case 9:
1193	case 8:
1194		return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v8);
1195	case 7:
1196		return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v7);
1197	case 6:
1198		return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v6);
1199	case 5:
1200		return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v5);
1201	default:
1202		WARN_ONCE(1, "FTM: unsupported range response version %u", ver);
1203		return false;
1204	}
1205}
1206
1207void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
1208{
1209	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1210	unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
1211	struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data;
1212	struct iwl_tof_range_rsp_ntfy_v6 *fw_resp_v6 = (void *)pkt->data;
1213	struct iwl_tof_range_rsp_ntfy_v7 *fw_resp_v7 = (void *)pkt->data;
1214	struct iwl_tof_range_rsp_ntfy_v8 *fw_resp_v8 = (void *)pkt->data;
1215	int i;
1216	bool new_api = fw_has_api(&mvm->fw->ucode_capa,
1217				  IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ);
1218	u8 num_of_aps, last_in_batch;
1219	u8 notif_ver = iwl_mvm_ftm_get_range_resp_ver(mvm);
1220
1221	lockdep_assert_held(&mvm->mutex);
1222
1223	if (!mvm->ftm_initiator.req) {
1224		return;
1225	}
1226
1227	if (unlikely(!iwl_mvm_ftm_resp_size_validation(notif_ver, pkt_len)))
1228		return;
1229
1230	if (new_api) {
1231		if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v8->request_id,
1232						 fw_resp_v8->num_of_aps))
1233			return;
1234
1235		num_of_aps = fw_resp_v8->num_of_aps;
1236		last_in_batch = fw_resp_v8->last_report;
1237	} else {
1238		if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v5->request_id,
1239						 fw_resp_v5->num_of_aps))
1240			return;
1241
1242		num_of_aps = fw_resp_v5->num_of_aps;
1243		last_in_batch = fw_resp_v5->last_in_batch;
1244	}
1245
1246	IWL_DEBUG_INFO(mvm, "Range response received\n");
1247	IWL_DEBUG_INFO(mvm, "request id: %lld, num of entries: %u\n",
1248		       mvm->ftm_initiator.req->cookie, num_of_aps);
1249
1250	for (i = 0; i < num_of_aps && i < IWL_MVM_TOF_MAX_APS; i++) {
1251		struct cfg80211_pmsr_result result = {};
1252		struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap;
1253		int peer_idx;
1254
1255		if (new_api) {
1256			if (notif_ver >= 8) {
1257				fw_ap = &fw_resp_v8->ap[i];
1258				iwl_mvm_ftm_pasn_update_pn(mvm, fw_ap);
1259			} else if (notif_ver == 7) {
1260				fw_ap = (void *)&fw_resp_v7->ap[i];
1261			} else {
1262				fw_ap = (void *)&fw_resp_v6->ap[i];
1263			}
1264
1265			result.final = fw_ap->last_burst;
1266			result.ap_tsf = le32_to_cpu(fw_ap->start_tsf);
1267			result.ap_tsf_valid = 1;
1268		} else {
1269			/* the first part is the same for old and new APIs */
1270			fw_ap = (void *)&fw_resp_v5->ap[i];
1271			/*
1272			 * FIXME: the firmware needs to report this, we don't
1273			 * even know the number of bursts the responder picked
1274			 * (if we asked it to)
1275			 */
1276			result.final = 0;
1277		}
1278
1279		peer_idx = iwl_mvm_ftm_find_peer(mvm->ftm_initiator.req,
1280						 fw_ap->bssid);
1281		if (peer_idx < 0) {
1282			IWL_WARN(mvm,
1283				 "Unknown address (%pM, target #%d) in FTM response\n",
1284				 fw_ap->bssid, i);
1285			continue;
1286		}
1287
1288		switch (fw_ap->measure_status) {
1289		case IWL_TOF_ENTRY_SUCCESS:
1290			result.status = NL80211_PMSR_STATUS_SUCCESS;
1291			break;
1292		case IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT:
1293			result.status = NL80211_PMSR_STATUS_TIMEOUT;
1294			break;
1295		case IWL_TOF_ENTRY_NO_RESPONSE:
1296			result.status = NL80211_PMSR_STATUS_FAILURE;
1297			result.ftm.failure_reason =
1298				NL80211_PMSR_FTM_FAILURE_NO_RESPONSE;
1299			break;
1300		case IWL_TOF_ENTRY_REQUEST_REJECTED:
1301			result.status = NL80211_PMSR_STATUS_FAILURE;
1302			result.ftm.failure_reason =
1303				NL80211_PMSR_FTM_FAILURE_PEER_BUSY;
1304			result.ftm.busy_retry_time = fw_ap->refusal_period;
1305			break;
1306		default:
1307			result.status = NL80211_PMSR_STATUS_FAILURE;
1308			result.ftm.failure_reason =
1309				NL80211_PMSR_FTM_FAILURE_UNSPECIFIED;
1310			break;
1311		}
1312		memcpy(result.addr, fw_ap->bssid, ETH_ALEN);
1313		result.host_time = iwl_mvm_ftm_get_host_time(mvm,
1314							     fw_ap->timestamp);
1315		result.type = NL80211_PMSR_TYPE_FTM;
1316		result.ftm.burst_index = mvm->ftm_initiator.responses[peer_idx];
1317		mvm->ftm_initiator.responses[peer_idx]++;
1318		result.ftm.rssi_avg = fw_ap->rssi;
1319		result.ftm.rssi_avg_valid = 1;
1320		result.ftm.rssi_spread = fw_ap->rssi_spread;
1321		result.ftm.rssi_spread_valid = 1;
1322		result.ftm.rtt_avg = (s32)le32_to_cpu(fw_ap->rtt);
1323		result.ftm.rtt_avg_valid = 1;
1324		result.ftm.rtt_variance = le32_to_cpu(fw_ap->rtt_variance);
1325		result.ftm.rtt_variance_valid = 1;
1326		result.ftm.rtt_spread = le32_to_cpu(fw_ap->rtt_spread);
1327		result.ftm.rtt_spread_valid = 1;
1328
1329		iwl_mvm_ftm_get_lci_civic(mvm, &result);
1330
1331		iwl_mvm_ftm_rtt_smoothing(mvm, &result);
1332
1333		cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev,
1334				     mvm->ftm_initiator.req,
1335				     &result, GFP_KERNEL);
1336
1337		if (fw_has_api(&mvm->fw->ucode_capa,
1338			       IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
1339			IWL_DEBUG_INFO(mvm, "RTT confidence: %u\n",
1340				       fw_ap->rttConfidence);
1341
1342		iwl_mvm_debug_range_resp(mvm, i, &result);
1343	}
1344
1345	if (last_in_batch) {
1346		cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev,
1347				       mvm->ftm_initiator.req,
1348				       GFP_KERNEL);
1349		iwl_mvm_ftm_reset(mvm);
1350	}
1351}
1352
1353void iwl_mvm_ftm_lc_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
1354{
1355	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1356	const struct ieee80211_mgmt *mgmt = (void *)pkt->data;
1357	size_t len = iwl_rx_packet_payload_len(pkt);
1358	struct iwl_mvm_loc_entry *entry;
1359	const u8 *ies, *lci, *civic, *msr_ie;
1360	size_t ies_len, lci_len = 0, civic_len = 0;
1361	size_t baselen = IEEE80211_MIN_ACTION_SIZE +
1362			 sizeof(mgmt->u.action.u.ftm);
1363	static const u8 rprt_type_lci = IEEE80211_SPCT_MSR_RPRT_TYPE_LCI;
1364	static const u8 rprt_type_civic = IEEE80211_SPCT_MSR_RPRT_TYPE_CIVIC;
1365
1366	if (len <= baselen)
1367		return;
1368
1369	lockdep_assert_held(&mvm->mutex);
1370
1371	ies = mgmt->u.action.u.ftm.variable;
1372	ies_len = len - baselen;
1373
1374	msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len,
1375					&rprt_type_lci, 1, 4);
1376	if (msr_ie) {
1377		lci = msr_ie + 2;
1378		lci_len = msr_ie[1];
1379	}
1380
1381	msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len,
1382					&rprt_type_civic, 1, 4);
1383	if (msr_ie) {
1384		civic = msr_ie + 2;
1385		civic_len = msr_ie[1];
1386	}
1387
1388	entry = kmalloc(sizeof(*entry) + lci_len + civic_len, GFP_KERNEL);
1389	if (!entry)
1390		return;
1391
1392	memcpy(entry->addr, mgmt->bssid, ETH_ALEN);
1393
1394	entry->lci_len = lci_len;
1395	if (lci_len)
1396		memcpy(entry->buf, lci, lci_len);
1397
1398	entry->civic_len = civic_len;
1399	if (civic_len)
1400		memcpy(entry->buf + lci_len, civic, civic_len);
1401
1402	list_add_tail(&entry->list, &mvm->ftm_initiator.loc_list);
1403}
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
   2/*
   3 * Copyright (C) 2015-2017 Intel Deutschland GmbH
   4 * Copyright (C) 2018-2023 Intel Corporation
   5 */
   6#include <linux/etherdevice.h>
   7#include <linux/math64.h>
   8#include <net/cfg80211.h>
   9#include "mvm.h"
  10#include "iwl-io.h"
  11#include "iwl-prph.h"
  12#include "constants.h"
  13
  14struct iwl_mvm_loc_entry {
  15	struct list_head list;
  16	u8 addr[ETH_ALEN];
  17	u8 lci_len, civic_len;
  18	u8 buf[];
  19};
  20
  21struct iwl_mvm_smooth_entry {
  22	struct list_head list;
  23	u8 addr[ETH_ALEN];
  24	s64 rtt_avg;
  25	u64 host_time;
  26};
  27
  28enum iwl_mvm_pasn_flags {
  29	IWL_MVM_PASN_FLAG_HAS_HLTK = BIT(0),
  30};
  31
  32struct iwl_mvm_ftm_pasn_entry {
  33	struct list_head list;
  34	u8 addr[ETH_ALEN];
  35	u8 hltk[HLTK_11AZ_LEN];
  36	u8 tk[TK_11AZ_LEN];
  37	u8 cipher;
  38	u8 tx_pn[IEEE80211_CCMP_PN_LEN];
  39	u8 rx_pn[IEEE80211_CCMP_PN_LEN];
  40	u32 flags;
  41};
  42
  43int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
  44			     u8 *addr, u32 cipher, u8 *tk, u32 tk_len,
  45			     u8 *hltk, u32 hltk_len)
  46{
  47	struct iwl_mvm_ftm_pasn_entry *pasn = kzalloc(sizeof(*pasn),
  48						      GFP_KERNEL);
  49	u32 expected_tk_len;
  50
  51	lockdep_assert_held(&mvm->mutex);
  52
  53	if (!pasn)
  54		return -ENOBUFS;
  55
  56	iwl_mvm_ftm_remove_pasn_sta(mvm, addr);
  57
  58	pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher);
  59
  60	switch (pasn->cipher) {
  61	case IWL_LOCATION_CIPHER_CCMP_128:
  62	case IWL_LOCATION_CIPHER_GCMP_128:
  63		expected_tk_len = WLAN_KEY_LEN_CCMP;
  64		break;
  65	case IWL_LOCATION_CIPHER_GCMP_256:
  66		expected_tk_len = WLAN_KEY_LEN_GCMP_256;
  67		break;
  68	default:
  69		goto out;
  70	}
  71
  72	/*
  73	 * If associated to this AP and already have security context,
  74	 * the TK is already configured for this station, so it
  75	 * shouldn't be set again here.
  76	 */
  77	if (vif->cfg.assoc) {
  78		struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
  79		struct ieee80211_bss_conf *link_conf;
  80		unsigned int link_id;
  81		struct ieee80211_sta *sta;
  82		u8 sta_id;
  83
  84		rcu_read_lock();
  85		for_each_vif_active_link(vif, link_conf, link_id) {
  86			if (memcmp(addr, link_conf->bssid, ETH_ALEN))
  87				continue;
  88
  89			sta_id = mvmvif->link[link_id]->ap_sta_id;
  90			sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
  91			if (!IS_ERR_OR_NULL(sta) && sta->mfp)
  92				expected_tk_len = 0;
  93			break;
  94		}
  95		rcu_read_unlock();
  96	}
  97
  98	if (tk_len != expected_tk_len ||
  99	    (hltk_len && hltk_len != sizeof(pasn->hltk))) {
 100		IWL_ERR(mvm, "Invalid key length: tk_len=%u hltk_len=%u\n",
 101			tk_len, hltk_len);
 102		goto out;
 103	}
 104
 105	if (!expected_tk_len && !hltk_len) {
 106		IWL_ERR(mvm, "TK and HLTK not set\n");
 107		goto out;
 108	}
 109
 110	memcpy(pasn->addr, addr, sizeof(pasn->addr));
 111
 112	if (hltk_len) {
 113		memcpy(pasn->hltk, hltk, sizeof(pasn->hltk));
 114		pasn->flags |= IWL_MVM_PASN_FLAG_HAS_HLTK;
 115	}
 116
 117	if (tk && tk_len)
 118		memcpy(pasn->tk, tk, sizeof(pasn->tk));
 119
 120	list_add_tail(&pasn->list, &mvm->ftm_initiator.pasn_list);
 121	return 0;
 122out:
 123	kfree(pasn);
 124	return -EINVAL;
 125}
 126
 127void iwl_mvm_ftm_remove_pasn_sta(struct iwl_mvm *mvm, u8 *addr)
 128{
 129	struct iwl_mvm_ftm_pasn_entry *entry, *prev;
 130
 131	lockdep_assert_held(&mvm->mutex);
 132
 133	list_for_each_entry_safe(entry, prev, &mvm->ftm_initiator.pasn_list,
 134				 list) {
 135		if (memcmp(entry->addr, addr, sizeof(entry->addr)))
 136			continue;
 137
 138		list_del(&entry->list);
 139		kfree(entry);
 140		return;
 141	}
 142}
 143
 144static void iwl_mvm_ftm_reset(struct iwl_mvm *mvm)
 145{
 146	struct iwl_mvm_loc_entry *e, *t;
 147
 148	mvm->ftm_initiator.req = NULL;
 149	mvm->ftm_initiator.req_wdev = NULL;
 150	memset(mvm->ftm_initiator.responses, 0,
 151	       sizeof(mvm->ftm_initiator.responses));
 152
 153	list_for_each_entry_safe(e, t, &mvm->ftm_initiator.loc_list, list) {
 154		list_del(&e->list);
 155		kfree(e);
 156	}
 157}
 158
 159void iwl_mvm_ftm_restart(struct iwl_mvm *mvm)
 160{
 161	struct cfg80211_pmsr_result result = {
 162		.status = NL80211_PMSR_STATUS_FAILURE,
 163		.final = 1,
 164		.host_time = ktime_get_boottime_ns(),
 165		.type = NL80211_PMSR_TYPE_FTM,
 166	};
 167	int i;
 168
 169	lockdep_assert_held(&mvm->mutex);
 170
 171	if (!mvm->ftm_initiator.req)
 172		return;
 173
 174	for (i = 0; i < mvm->ftm_initiator.req->n_peers; i++) {
 175		memcpy(result.addr, mvm->ftm_initiator.req->peers[i].addr,
 176		       ETH_ALEN);
 177		result.ftm.burst_index = mvm->ftm_initiator.responses[i];
 178
 179		cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev,
 180				     mvm->ftm_initiator.req,
 181				     &result, GFP_KERNEL);
 182	}
 183
 184	cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev,
 185			       mvm->ftm_initiator.req, GFP_KERNEL);
 186	iwl_mvm_ftm_reset(mvm);
 187}
 188
 189void iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm *mvm)
 190{
 191	INIT_LIST_HEAD(&mvm->ftm_initiator.smooth.resp);
 192
 193	IWL_DEBUG_INFO(mvm,
 194		       "enable=%u, alpha=%u, age_jiffies=%u, thresh=(%u:%u)\n",
 195			IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH,
 196			IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA,
 197			IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * HZ,
 198			IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT,
 199			IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT);
 200}
 201
 202void iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm *mvm)
 203{
 204	struct iwl_mvm_smooth_entry *se, *st;
 205
 206	list_for_each_entry_safe(se, st, &mvm->ftm_initiator.smooth.resp,
 207				 list) {
 208		list_del(&se->list);
 209		kfree(se);
 210	}
 211}
 212
 213static int
 214iwl_ftm_range_request_status_to_err(enum iwl_tof_range_request_status s)
 215{
 216	switch (s) {
 217	case IWL_TOF_RANGE_REQUEST_STATUS_SUCCESS:
 218		return 0;
 219	case IWL_TOF_RANGE_REQUEST_STATUS_BUSY:
 220		return -EBUSY;
 221	default:
 222		WARN_ON_ONCE(1);
 223		return -EIO;
 224	}
 225}
 226
 227static void iwl_mvm_ftm_cmd_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 228			       struct iwl_tof_range_req_cmd_v5 *cmd,
 229			       struct cfg80211_pmsr_request *req)
 230{
 231	int i;
 232
 233	cmd->request_id = req->cookie;
 234	cmd->num_of_ap = req->n_peers;
 235
 236	/* use maximum for "no timeout" or bigger than what we can do */
 237	if (!req->timeout || req->timeout > 255 * 100)
 238		cmd->req_timeout = 255;
 239	else
 240		cmd->req_timeout = DIV_ROUND_UP(req->timeout, 100);
 241
 242	/*
 243	 * We treat it always as random, since if not we'll
 244	 * have filled our local address there instead.
 245	 */
 246	cmd->macaddr_random = 1;
 247	memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN);
 248	for (i = 0; i < ETH_ALEN; i++)
 249		cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
 250
 251	if (vif->cfg.assoc)
 252		memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN);
 253	else
 254		eth_broadcast_addr(cmd->range_req_bssid);
 255}
 256
 257static void iwl_mvm_ftm_cmd_common(struct iwl_mvm *mvm,
 258				   struct ieee80211_vif *vif,
 259				   struct iwl_tof_range_req_cmd_v9 *cmd,
 260				   struct cfg80211_pmsr_request *req)
 261{
 262	int i;
 263
 264	cmd->initiator_flags =
 265		cpu_to_le32(IWL_TOF_INITIATOR_FLAGS_MACADDR_RANDOM |
 266			    IWL_TOF_INITIATOR_FLAGS_NON_ASAP_SUPPORT);
 267	cmd->request_id = req->cookie;
 268	cmd->num_of_ap = req->n_peers;
 269
 270	/*
 271	 * Use a large value for "no timeout". Don't use the maximum value
 272	 * because of fw limitations.
 273	 */
 274	if (req->timeout)
 275		cmd->req_timeout_ms = cpu_to_le32(req->timeout);
 276	else
 277		cmd->req_timeout_ms = cpu_to_le32(0xfffff);
 278
 279	memcpy(cmd->macaddr_template, req->mac_addr, ETH_ALEN);
 280	for (i = 0; i < ETH_ALEN; i++)
 281		cmd->macaddr_mask[i] = ~req->mac_addr_mask[i];
 282
 283	if (vif->cfg.assoc) {
 284		memcpy(cmd->range_req_bssid, vif->bss_conf.bssid, ETH_ALEN);
 285
 286		/* AP's TSF is only relevant if associated */
 287		for (i = 0; i < req->n_peers; i++) {
 288			if (req->peers[i].report_ap_tsf) {
 289				struct iwl_mvm_vif *mvmvif =
 290					iwl_mvm_vif_from_mac80211(vif);
 291
 292				cmd->tsf_mac_id = cpu_to_le32(mvmvif->id);
 293				return;
 294			}
 295		}
 296	} else {
 297		eth_broadcast_addr(cmd->range_req_bssid);
 298	}
 299
 300	/* Don't report AP's TSF */
 301	cmd->tsf_mac_id = cpu_to_le32(0xff);
 302}
 303
 304static void iwl_mvm_ftm_cmd_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 305			       struct iwl_tof_range_req_cmd_v8 *cmd,
 306			       struct cfg80211_pmsr_request *req)
 307{
 308	iwl_mvm_ftm_cmd_common(mvm, vif, (void *)cmd, req);
 309}
 310
 311static int
 312iwl_mvm_ftm_target_chandef_v1(struct iwl_mvm *mvm,
 313			      struct cfg80211_pmsr_request_peer *peer,
 314			      u8 *channel, u8 *bandwidth,
 315			      u8 *ctrl_ch_position)
 316{
 317	u32 freq = peer->chandef.chan->center_freq;
 318
 319	*channel = ieee80211_frequency_to_channel(freq);
 320
 321	switch (peer->chandef.width) {
 322	case NL80211_CHAN_WIDTH_20_NOHT:
 323		*bandwidth = IWL_TOF_BW_20_LEGACY;
 324		break;
 325	case NL80211_CHAN_WIDTH_20:
 326		*bandwidth = IWL_TOF_BW_20_HT;
 327		break;
 328	case NL80211_CHAN_WIDTH_40:
 329		*bandwidth = IWL_TOF_BW_40;
 330		break;
 331	case NL80211_CHAN_WIDTH_80:
 332		*bandwidth = IWL_TOF_BW_80;
 333		break;
 334	default:
 335		IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n",
 336			peer->chandef.width);
 337		return -EINVAL;
 338	}
 339
 340	*ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ?
 341		iwl_mvm_get_ctrl_pos(&peer->chandef) : 0;
 342
 343	return 0;
 344}
 345
 346static int
 347iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm,
 348			      struct cfg80211_pmsr_request_peer *peer,
 349			      u8 *channel, u8 *format_bw,
 350			      u8 *ctrl_ch_position)
 351{
 352	u32 freq = peer->chandef.chan->center_freq;
 353	u8 cmd_ver;
 354
 355	*channel = ieee80211_frequency_to_channel(freq);
 356
 357	switch (peer->chandef.width) {
 358	case NL80211_CHAN_WIDTH_20_NOHT:
 359		*format_bw = IWL_LOCATION_FRAME_FORMAT_LEGACY;
 360		*format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
 361		break;
 362	case NL80211_CHAN_WIDTH_20:
 363		*format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
 364		*format_bw |= IWL_LOCATION_BW_20MHZ << LOCATION_BW_POS;
 365		break;
 366	case NL80211_CHAN_WIDTH_40:
 367		*format_bw = IWL_LOCATION_FRAME_FORMAT_HT;
 368		*format_bw |= IWL_LOCATION_BW_40MHZ << LOCATION_BW_POS;
 369		break;
 370	case NL80211_CHAN_WIDTH_80:
 371		*format_bw = IWL_LOCATION_FRAME_FORMAT_VHT;
 372		*format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS;
 373		break;
 374	case NL80211_CHAN_WIDTH_160:
 375		cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
 376						WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
 377						IWL_FW_CMD_VER_UNKNOWN);
 378
 379		if (cmd_ver >= 13) {
 380			*format_bw = IWL_LOCATION_FRAME_FORMAT_HE;
 381			*format_bw |= IWL_LOCATION_BW_160MHZ << LOCATION_BW_POS;
 382			break;
 383		}
 384		fallthrough;
 385	default:
 386		IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n",
 387			peer->chandef.width);
 388		return -EINVAL;
 389	}
 390
 391	/* non EDCA based measurement must use HE preamble */
 392	if (peer->ftm.trigger_based || peer->ftm.non_trigger_based)
 393		*format_bw |= IWL_LOCATION_FRAME_FORMAT_HE;
 394
 395	*ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ?
 396		iwl_mvm_get_ctrl_pos(&peer->chandef) : 0;
 397
 398	return 0;
 399}
 400
 401static int
 402iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm,
 403			  struct cfg80211_pmsr_request_peer *peer,
 404			  struct iwl_tof_range_req_ap_entry_v2 *target)
 405{
 406	int ret;
 407
 408	ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num,
 409					    &target->bandwidth,
 410					    &target->ctrl_ch_position);
 411	if (ret)
 412		return ret;
 413
 414	memcpy(target->bssid, peer->addr, ETH_ALEN);
 415	target->burst_period =
 416		cpu_to_le16(peer->ftm.burst_period);
 417	target->samples_per_burst = peer->ftm.ftms_per_burst;
 418	target->num_of_bursts = peer->ftm.num_bursts_exp;
 419	target->measure_type = 0; /* regular two-sided FTM */
 420	target->retries_per_sample = peer->ftm.ftmr_retries;
 421	target->asap_mode = peer->ftm.asap;
 422	target->enable_dyn_ack = IWL_MVM_FTM_INITIATOR_DYNACK;
 423
 424	if (peer->ftm.request_lci)
 425		target->location_req |= IWL_TOF_LOC_LCI;
 426	if (peer->ftm.request_civicloc)
 427		target->location_req |= IWL_TOF_LOC_CIVIC;
 428
 429	target->algo_type = IWL_MVM_FTM_INITIATOR_ALGO;
 430
 431	return 0;
 432}
 433
 434#define FTM_PUT_FLAG(flag)	(target->initiator_ap_flags |= \
 435				 cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag))
 436
 437static void
 438iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm,
 439			      struct cfg80211_pmsr_request_peer *peer,
 440			      struct iwl_tof_range_req_ap_entry_v6 *target)
 441{
 442	memcpy(target->bssid, peer->addr, ETH_ALEN);
 443	target->burst_period =
 444		cpu_to_le16(peer->ftm.burst_period);
 445	target->samples_per_burst = peer->ftm.ftms_per_burst;
 446	target->num_of_bursts = peer->ftm.num_bursts_exp;
 447	target->ftmr_max_retries = peer->ftm.ftmr_retries;
 448	target->initiator_ap_flags = cpu_to_le32(0);
 449
 450	if (peer->ftm.asap)
 451		FTM_PUT_FLAG(ASAP);
 452
 453	if (peer->ftm.request_lci)
 454		FTM_PUT_FLAG(LCI_REQUEST);
 455
 456	if (peer->ftm.request_civicloc)
 457		FTM_PUT_FLAG(CIVIC_REQUEST);
 458
 459	if (IWL_MVM_FTM_INITIATOR_DYNACK)
 460		FTM_PUT_FLAG(DYN_ACK);
 461
 462	if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG)
 463		FTM_PUT_FLAG(ALGO_LR);
 464	else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT)
 465		FTM_PUT_FLAG(ALGO_FFT);
 466
 467	if (peer->ftm.trigger_based)
 468		FTM_PUT_FLAG(TB);
 469	else if (peer->ftm.non_trigger_based)
 470		FTM_PUT_FLAG(NON_TB);
 471
 472	if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) &&
 473	    peer->ftm.lmr_feedback)
 474		FTM_PUT_FLAG(LMR_FEEDBACK);
 475}
 476
 477static int
 478iwl_mvm_ftm_put_target_v3(struct iwl_mvm *mvm,
 479			  struct cfg80211_pmsr_request_peer *peer,
 480			  struct iwl_tof_range_req_ap_entry_v3 *target)
 481{
 482	int ret;
 483
 484	ret = iwl_mvm_ftm_target_chandef_v1(mvm, peer, &target->channel_num,
 485					    &target->bandwidth,
 486					    &target->ctrl_ch_position);
 487	if (ret)
 488		return ret;
 489
 490	/*
 491	 * Versions 3 and 4 has some common fields, so
 492	 * iwl_mvm_ftm_put_target_common() can be used for version 7 too.
 493	 */
 494	iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target);
 495
 496	return 0;
 497}
 498
 499static int
 500iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm,
 501			  struct cfg80211_pmsr_request_peer *peer,
 502			  struct iwl_tof_range_req_ap_entry_v4 *target)
 503{
 504	int ret;
 505
 506	ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
 507					    &target->format_bw,
 508					    &target->ctrl_ch_position);
 509	if (ret)
 510		return ret;
 511
 512	iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target);
 513
 514	return 0;
 515}
 516
 517static int
 518iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 519		       struct cfg80211_pmsr_request_peer *peer,
 520		       struct iwl_tof_range_req_ap_entry_v6 *target)
 521{
 522	int ret;
 523
 524	ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
 525					    &target->format_bw,
 526					    &target->ctrl_ch_position);
 527	if (ret)
 528		return ret;
 529
 530	iwl_mvm_ftm_put_target_common(mvm, peer, target);
 531
 532	if (vif->cfg.assoc) {
 533		struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
 534		struct ieee80211_sta *sta;
 535		struct ieee80211_bss_conf *link_conf;
 536		unsigned int link_id;
 537
 538		rcu_read_lock();
 539		for_each_vif_active_link(vif, link_conf, link_id) {
 540			if (memcmp(peer->addr, link_conf->bssid, ETH_ALEN))
 541				continue;
 542
 543			target->sta_id = mvmvif->link[link_id]->ap_sta_id;
 544			sta = rcu_dereference(mvm->fw_id_to_mac_id[target->sta_id]);
 545			if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
 546				rcu_read_unlock();
 547				return PTR_ERR_OR_ZERO(sta);
 548			}
 549
 550			if (sta->mfp && (peer->ftm.trigger_based ||
 551					 peer->ftm.non_trigger_based))
 552				FTM_PUT_FLAG(PMF);
 553			break;
 554		}
 555		rcu_read_unlock();
 556	} else {
 557		target->sta_id = IWL_MVM_INVALID_STA;
 558	}
 559
 560	/*
 561	 * TODO: Beacon interval is currently unknown, so use the common value
 562	 * of 100 TUs.
 563	 */
 564	target->beacon_interval = cpu_to_le16(100);
 565	return 0;
 566}
 567
 568static int iwl_mvm_ftm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *hcmd)
 569{
 570	u32 status;
 571	int err = iwl_mvm_send_cmd_status(mvm, hcmd, &status);
 572
 573	if (!err && status) {
 574		IWL_ERR(mvm, "FTM range request command failure, status: %u\n",
 575			status);
 576		err = iwl_ftm_range_request_status_to_err(status);
 577	}
 578
 579	return err;
 580}
 581
 582static int iwl_mvm_ftm_start_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 583				struct cfg80211_pmsr_request *req)
 584{
 585	struct iwl_tof_range_req_cmd_v5 cmd_v5;
 586	struct iwl_host_cmd hcmd = {
 587		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
 588		.dataflags[0] = IWL_HCMD_DFL_DUP,
 589		.data[0] = &cmd_v5,
 590		.len[0] = sizeof(cmd_v5),
 591	};
 592	u8 i;
 593	int err;
 594
 595	iwl_mvm_ftm_cmd_v5(mvm, vif, &cmd_v5, req);
 596
 597	for (i = 0; i < cmd_v5.num_of_ap; i++) {
 598		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
 599
 600		err = iwl_mvm_ftm_put_target_v2(mvm, peer, &cmd_v5.ap[i]);
 601		if (err)
 602			return err;
 603	}
 604
 605	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
 606}
 607
 608static int iwl_mvm_ftm_start_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 609				struct cfg80211_pmsr_request *req)
 610{
 611	struct iwl_tof_range_req_cmd_v7 cmd_v7;
 612	struct iwl_host_cmd hcmd = {
 613		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
 614		.dataflags[0] = IWL_HCMD_DFL_DUP,
 615		.data[0] = &cmd_v7,
 616		.len[0] = sizeof(cmd_v7),
 617	};
 618	u8 i;
 619	int err;
 620
 621	/*
 622	 * Versions 7 and 8 has the same structure except from the responders
 623	 * list, so iwl_mvm_ftm_cmd() can be used for version 7 too.
 624	 */
 625	iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd_v7, req);
 626
 627	for (i = 0; i < cmd_v7.num_of_ap; i++) {
 628		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
 629
 630		err = iwl_mvm_ftm_put_target_v3(mvm, peer, &cmd_v7.ap[i]);
 631		if (err)
 632			return err;
 633	}
 634
 635	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
 636}
 637
 638static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 639				struct cfg80211_pmsr_request *req)
 640{
 641	struct iwl_tof_range_req_cmd_v8 cmd;
 642	struct iwl_host_cmd hcmd = {
 643		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
 644		.dataflags[0] = IWL_HCMD_DFL_DUP,
 645		.data[0] = &cmd,
 646		.len[0] = sizeof(cmd),
 647	};
 648	u8 i;
 649	int err;
 650
 651	iwl_mvm_ftm_cmd_v8(mvm, vif, (void *)&cmd, req);
 652
 653	for (i = 0; i < cmd.num_of_ap; i++) {
 654		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
 655
 656		err = iwl_mvm_ftm_put_target_v4(mvm, peer, &cmd.ap[i]);
 657		if (err)
 658			return err;
 659	}
 660
 661	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
 662}
 663
 664static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 665				struct cfg80211_pmsr_request *req)
 666{
 667	struct iwl_tof_range_req_cmd_v9 cmd;
 668	struct iwl_host_cmd hcmd = {
 669		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
 670		.dataflags[0] = IWL_HCMD_DFL_DUP,
 671		.data[0] = &cmd,
 672		.len[0] = sizeof(cmd),
 673	};
 674	u8 i;
 675	int err;
 676
 677	iwl_mvm_ftm_cmd_common(mvm, vif, &cmd, req);
 678
 679	for (i = 0; i < cmd.num_of_ap; i++) {
 680		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
 681		struct iwl_tof_range_req_ap_entry_v6 *target = &cmd.ap[i];
 682
 683		err = iwl_mvm_ftm_put_target(mvm, vif, peer, target);
 684		if (err)
 685			return err;
 686	}
 687
 688	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
 689}
 690
 691static void iter(struct ieee80211_hw *hw,
 692		 struct ieee80211_vif *vif,
 693		 struct ieee80211_sta *sta,
 694		 struct ieee80211_key_conf *key,
 695		 void *data)
 696{
 697	struct iwl_tof_range_req_ap_entry_v6 *target = data;
 698
 699	if (!sta || memcmp(sta->addr, target->bssid, ETH_ALEN))
 700		return;
 701
 702	WARN_ON(!sta->mfp);
 703
 704	if (WARN_ON(key->keylen > sizeof(target->tk)))
 705		return;
 706
 707	memcpy(target->tk, key->key, key->keylen);
 708	target->cipher = iwl_mvm_cipher_to_location_cipher(key->cipher);
 709	WARN_ON(target->cipher == IWL_LOCATION_CIPHER_INVALID);
 710}
 711
 712static void
 713iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 714				struct iwl_tof_range_req_ap_entry_v7 *target)
 715{
 716	struct iwl_mvm_ftm_pasn_entry *entry;
 717	u32 flags = le32_to_cpu(target->initiator_ap_flags);
 718
 719	if (!(flags & (IWL_INITIATOR_AP_FLAGS_NON_TB |
 720		       IWL_INITIATOR_AP_FLAGS_TB)))
 721		return;
 722
 723	lockdep_assert_held(&mvm->mutex);
 724
 725	list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) {
 726		if (memcmp(entry->addr, target->bssid, sizeof(entry->addr)))
 727			continue;
 728
 729		target->cipher = entry->cipher;
 730
 731		if (entry->flags & IWL_MVM_PASN_FLAG_HAS_HLTK)
 732			memcpy(target->hltk, entry->hltk, sizeof(target->hltk));
 733		else
 734			memset(target->hltk, 0, sizeof(target->hltk));
 735
 736		if (vif->cfg.assoc &&
 737		    !memcmp(vif->bss_conf.bssid, target->bssid,
 738			    sizeof(target->bssid)))
 739			ieee80211_iter_keys(mvm->hw, vif, iter, target);
 740		else
 741			memcpy(target->tk, entry->tk, sizeof(target->tk));
 742
 743		memcpy(target->rx_pn, entry->rx_pn, sizeof(target->rx_pn));
 744		memcpy(target->tx_pn, entry->tx_pn, sizeof(target->tx_pn));
 745
 746		target->initiator_ap_flags |=
 747			cpu_to_le32(IWL_INITIATOR_AP_FLAGS_SECURED);
 748		return;
 749	}
 750}
 751
 752static int
 753iwl_mvm_ftm_put_target_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 754			  struct cfg80211_pmsr_request_peer *peer,
 755			  struct iwl_tof_range_req_ap_entry_v7 *target)
 756{
 757	int err = iwl_mvm_ftm_put_target(mvm, vif, peer, (void *)target);
 758	if (err)
 759		return err;
 760
 761	iwl_mvm_ftm_set_secured_ranging(mvm, vif, target);
 762	return err;
 763}
 764
 765static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm,
 766				 struct ieee80211_vif *vif,
 767				 struct cfg80211_pmsr_request *req)
 768{
 769	struct iwl_tof_range_req_cmd_v11 cmd;
 770	struct iwl_host_cmd hcmd = {
 771		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
 772		.dataflags[0] = IWL_HCMD_DFL_DUP,
 773		.data[0] = &cmd,
 774		.len[0] = sizeof(cmd),
 775	};
 776	u8 i;
 777	int err;
 778
 779	iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
 780
 781	for (i = 0; i < cmd.num_of_ap; i++) {
 782		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
 783		struct iwl_tof_range_req_ap_entry_v7 *target = &cmd.ap[i];
 784
 785		err = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, target);
 786		if (err)
 787			return err;
 788	}
 789
 790	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
 791}
 792
 793static void
 794iwl_mvm_ftm_set_ndp_params(struct iwl_mvm *mvm,
 795			   struct iwl_tof_range_req_ap_entry_v8 *target)
 796{
 797	/* Only 2 STS are supported on Tx */
 798	u32 i2r_max_sts = IWL_MVM_FTM_I2R_MAX_STS > 1 ? 1 :
 799		IWL_MVM_FTM_I2R_MAX_STS;
 800
 801	target->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP |
 802		(IWL_MVM_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS);
 803	target->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP |
 804		(i2r_max_sts << IWL_LOCATION_MAX_STS_POS);
 805	target->r2i_max_total_ltf = IWL_MVM_FTM_R2I_MAX_TOTAL_LTF;
 806	target->i2r_max_total_ltf = IWL_MVM_FTM_I2R_MAX_TOTAL_LTF;
 807}
 808
 809static int
 810iwl_mvm_ftm_put_target_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 811			  struct cfg80211_pmsr_request_peer *peer,
 812			  struct iwl_tof_range_req_ap_entry_v8 *target)
 813{
 814	u32 flags;
 815	int ret = iwl_mvm_ftm_put_target_v7(mvm, vif, peer, (void *)target);
 816
 817	if (ret)
 818		return ret;
 819
 820	iwl_mvm_ftm_set_ndp_params(mvm, target);
 821
 822	/*
 823	 * If secure LTF is turned off, replace the flag with PMF only
 824	 */
 825	flags = le32_to_cpu(target->initiator_ap_flags);
 826	if (flags & IWL_INITIATOR_AP_FLAGS_SECURED) {
 827		if (!IWL_MVM_FTM_INITIATOR_SECURE_LTF)
 828			flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED;
 829
 830		flags |= IWL_INITIATOR_AP_FLAGS_PMF;
 831		target->initiator_ap_flags = cpu_to_le32(flags);
 832	}
 833
 834	return 0;
 835}
 836
 837static int iwl_mvm_ftm_start_v12(struct iwl_mvm *mvm,
 838				 struct ieee80211_vif *vif,
 839				 struct cfg80211_pmsr_request *req)
 840{
 841	struct iwl_tof_range_req_cmd_v12 cmd;
 842	struct iwl_host_cmd hcmd = {
 843		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
 844		.dataflags[0] = IWL_HCMD_DFL_DUP,
 845		.data[0] = &cmd,
 846		.len[0] = sizeof(cmd),
 847	};
 848	u8 i;
 849	int err;
 850
 851	iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
 852
 853	for (i = 0; i < cmd.num_of_ap; i++) {
 854		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
 855		struct iwl_tof_range_req_ap_entry_v8 *target = &cmd.ap[i];
 856
 857		err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, target);
 858		if (err)
 859			return err;
 860	}
 861
 862	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
 863}
 864
 865static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm,
 866				 struct ieee80211_vif *vif,
 867				 struct cfg80211_pmsr_request *req)
 868{
 869	struct iwl_tof_range_req_cmd_v13 cmd;
 870	struct iwl_host_cmd hcmd = {
 871		.id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
 872		.dataflags[0] = IWL_HCMD_DFL_DUP,
 873		.data[0] = &cmd,
 874		.len[0] = sizeof(cmd),
 875	};
 876	u8 i;
 877	int err;
 878
 879	iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
 880
 881	for (i = 0; i < cmd.num_of_ap; i++) {
 882		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
 883		struct iwl_tof_range_req_ap_entry_v9 *target = &cmd.ap[i];
 884
 885		err = iwl_mvm_ftm_put_target_v8(mvm, vif, peer, (void *)target);
 886		if (err)
 887			return err;
 888
 889		if (peer->ftm.trigger_based || peer->ftm.non_trigger_based)
 890			target->bss_color = peer->ftm.bss_color;
 891
 892		if (peer->ftm.non_trigger_based) {
 893			target->min_time_between_msr =
 894				cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR);
 895			target->burst_period =
 896				cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR);
 897		} else {
 898			target->min_time_between_msr = cpu_to_le16(0);
 899		}
 900
 901		target->band =
 902			iwl_mvm_phy_band_from_nl80211(peer->chandef.chan->band);
 903	}
 904
 905	return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
 906}
 907
 908int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 909		      struct cfg80211_pmsr_request *req)
 910{
 911	bool new_api = fw_has_api(&mvm->fw->ucode_capa,
 912				  IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ);
 913	int err;
 914
 915	lockdep_assert_held(&mvm->mutex);
 916
 917	if (mvm->ftm_initiator.req)
 918		return -EBUSY;
 919
 920	if (new_api) {
 921		u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
 922						   WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
 923						   IWL_FW_CMD_VER_UNKNOWN);
 924
 925		switch (cmd_ver) {
 926		case 13:
 927			err = iwl_mvm_ftm_start_v13(mvm, vif, req);
 928			break;
 929		case 12:
 930			err = iwl_mvm_ftm_start_v12(mvm, vif, req);
 931			break;
 932		case 11:
 933			err = iwl_mvm_ftm_start_v11(mvm, vif, req);
 934			break;
 935		case 9:
 936		case 10:
 937			err = iwl_mvm_ftm_start_v9(mvm, vif, req);
 938			break;
 939		case 8:
 940			err = iwl_mvm_ftm_start_v8(mvm, vif, req);
 941			break;
 942		default:
 943			err = iwl_mvm_ftm_start_v7(mvm, vif, req);
 944			break;
 945		}
 946	} else {
 947		err = iwl_mvm_ftm_start_v5(mvm, vif, req);
 948	}
 949
 950	if (!err) {
 951		mvm->ftm_initiator.req = req;
 952		mvm->ftm_initiator.req_wdev = ieee80211_vif_to_wdev(vif);
 953	}
 954
 955	return err;
 956}
 957
 958void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req)
 959{
 960	struct iwl_tof_range_abort_cmd cmd = {
 961		.request_id = req->cookie,
 962	};
 963
 964	lockdep_assert_held(&mvm->mutex);
 965
 966	if (req != mvm->ftm_initiator.req)
 967		return;
 968
 969	iwl_mvm_ftm_reset(mvm);
 970
 971	if (iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(LOCATION_GROUP, TOF_RANGE_ABORT_CMD),
 972				 0, sizeof(cmd), &cmd))
 973		IWL_ERR(mvm, "failed to abort FTM process\n");
 974}
 975
 976static int iwl_mvm_ftm_find_peer(struct cfg80211_pmsr_request *req,
 977				 const u8 *addr)
 978{
 979	int i;
 980
 981	for (i = 0; i < req->n_peers; i++) {
 982		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
 983
 984		if (ether_addr_equal_unaligned(peer->addr, addr))
 985			return i;
 986	}
 987
 988	return -ENOENT;
 989}
 990
 991static u64 iwl_mvm_ftm_get_host_time(struct iwl_mvm *mvm, __le32 fw_gp2_ts)
 992{
 993	u32 gp2_ts = le32_to_cpu(fw_gp2_ts);
 994	u32 curr_gp2, diff;
 995	u64 now_from_boot_ns;
 996
 997	iwl_mvm_get_sync_time(mvm, CLOCK_BOOTTIME, &curr_gp2,
 998			      &now_from_boot_ns, NULL);
 999
1000	if (curr_gp2 >= gp2_ts)
1001		diff = curr_gp2 - gp2_ts;
1002	else
1003		diff = curr_gp2 + (U32_MAX - gp2_ts + 1);
1004
1005	return now_from_boot_ns - (u64)diff * 1000;
1006}
1007
1008static void iwl_mvm_ftm_get_lci_civic(struct iwl_mvm *mvm,
1009				      struct cfg80211_pmsr_result *res)
1010{
1011	struct iwl_mvm_loc_entry *entry;
1012
1013	list_for_each_entry(entry, &mvm->ftm_initiator.loc_list, list) {
1014		if (!ether_addr_equal_unaligned(res->addr, entry->addr))
1015			continue;
1016
1017		if (entry->lci_len) {
1018			res->ftm.lci_len = entry->lci_len;
1019			res->ftm.lci = entry->buf;
1020		}
1021
1022		if (entry->civic_len) {
1023			res->ftm.civicloc_len = entry->civic_len;
1024			res->ftm.civicloc = entry->buf + entry->lci_len;
1025		}
1026
1027		/* we found the entry we needed */
1028		break;
1029	}
1030}
1031
1032static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id,
1033					u8 num_of_aps)
1034{
1035	lockdep_assert_held(&mvm->mutex);
1036
1037	if (request_id != (u8)mvm->ftm_initiator.req->cookie) {
1038		IWL_ERR(mvm, "Request ID mismatch, got %u, active %u\n",
1039			request_id, (u8)mvm->ftm_initiator.req->cookie);
1040		return -EINVAL;
1041	}
1042
1043	if (num_of_aps > mvm->ftm_initiator.req->n_peers) {
1044		IWL_ERR(mvm, "FTM range response invalid\n");
1045		return -EINVAL;
1046	}
1047
1048	return 0;
1049}
1050
1051static void iwl_mvm_ftm_rtt_smoothing(struct iwl_mvm *mvm,
1052				      struct cfg80211_pmsr_result *res)
1053{
1054	struct iwl_mvm_smooth_entry *resp = NULL, *iter;
1055	s64 rtt_avg, rtt = res->ftm.rtt_avg;
1056	u32 undershoot, overshoot;
1057	u8 alpha;
1058
1059	if (!IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH)
1060		return;
1061
1062	WARN_ON(rtt < 0);
1063
1064	if (res->status != NL80211_PMSR_STATUS_SUCCESS) {
1065		IWL_DEBUG_INFO(mvm,
1066			       ": %pM: ignore failed measurement. Status=%u\n",
1067			       res->addr, res->status);
1068		return;
1069	}
1070
1071	list_for_each_entry(iter, &mvm->ftm_initiator.smooth.resp, list) {
1072		if (!memcmp(res->addr, iter->addr, ETH_ALEN)) {
1073			resp = iter;
1074			break;
1075		}
1076	}
1077
1078	if (!resp) {
1079		resp = kzalloc(sizeof(*resp), GFP_KERNEL);
1080		if (!resp)
1081			return;
1082
1083		memcpy(resp->addr, res->addr, ETH_ALEN);
1084		list_add_tail(&resp->list, &mvm->ftm_initiator.smooth.resp);
1085
1086		resp->rtt_avg = rtt;
1087
1088		IWL_DEBUG_INFO(mvm, "new: %pM: rtt_avg=%lld\n",
1089			       resp->addr, resp->rtt_avg);
1090		goto update_time;
1091	}
1092
1093	if (res->host_time - resp->host_time >
1094	    IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * 1000000000) {
1095		resp->rtt_avg = rtt;
1096
1097		IWL_DEBUG_INFO(mvm, "expired: %pM: rtt_avg=%lld\n",
1098			       resp->addr, resp->rtt_avg);
1099		goto update_time;
1100	}
1101
1102	/* Smooth the results based on the tracked RTT average */
1103	undershoot = IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT;
1104	overshoot = IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT;
1105	alpha = IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA;
1106
1107	rtt_avg = div_s64(alpha * rtt + (100 - alpha) * resp->rtt_avg, 100);
1108
1109	IWL_DEBUG_INFO(mvm,
1110		       "%pM: prev rtt_avg=%lld, new rtt_avg=%lld, rtt=%lld\n",
1111		       resp->addr, resp->rtt_avg, rtt_avg, rtt);
1112
1113	/*
1114	 * update the responder's average RTT results regardless of
1115	 * the under/over shoot logic below
1116	 */
1117	resp->rtt_avg = rtt_avg;
1118
1119	/* smooth the results */
1120	if (rtt_avg > rtt && (rtt_avg - rtt) > undershoot) {
1121		res->ftm.rtt_avg = rtt_avg;
1122
1123		IWL_DEBUG_INFO(mvm,
1124			       "undershoot: val=%lld\n",
1125			       (rtt_avg - rtt));
1126	} else if (rtt_avg < rtt && (rtt - rtt_avg) >
1127		   overshoot) {
1128		res->ftm.rtt_avg = rtt_avg;
1129		IWL_DEBUG_INFO(mvm,
1130			       "overshoot: val=%lld\n",
1131			       (rtt - rtt_avg));
1132	}
1133
1134update_time:
1135	resp->host_time = res->host_time;
1136}
1137
1138static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index,
1139				     struct cfg80211_pmsr_result *res)
1140{
1141	s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666);
1142
1143	IWL_DEBUG_INFO(mvm, "entry %d\n", index);
1144	IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status);
1145	IWL_DEBUG_INFO(mvm, "\tBSSID: %pM\n", res->addr);
1146	IWL_DEBUG_INFO(mvm, "\thost time: %llu\n", res->host_time);
1147	IWL_DEBUG_INFO(mvm, "\tburst index: %d\n", res->ftm.burst_index);
1148	IWL_DEBUG_INFO(mvm, "\tsuccess num: %u\n", res->ftm.num_ftmr_successes);
1149	IWL_DEBUG_INFO(mvm, "\trssi: %d\n", res->ftm.rssi_avg);
1150	IWL_DEBUG_INFO(mvm, "\trssi spread: %d\n", res->ftm.rssi_spread);
1151	IWL_DEBUG_INFO(mvm, "\trtt: %lld\n", res->ftm.rtt_avg);
1152	IWL_DEBUG_INFO(mvm, "\trtt var: %llu\n", res->ftm.rtt_variance);
1153	IWL_DEBUG_INFO(mvm, "\trtt spread: %llu\n", res->ftm.rtt_spread);
1154	IWL_DEBUG_INFO(mvm, "\tdistance: %lld\n", rtt_avg);
1155}
1156
1157static void
1158iwl_mvm_ftm_pasn_update_pn(struct iwl_mvm *mvm,
1159			   struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap)
1160{
1161	struct iwl_mvm_ftm_pasn_entry *entry;
1162
1163	lockdep_assert_held(&mvm->mutex);
1164
1165	list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) {
1166		if (memcmp(fw_ap->bssid, entry->addr, sizeof(entry->addr)))
1167			continue;
1168
1169		memcpy(entry->rx_pn, fw_ap->rx_pn, sizeof(entry->rx_pn));
1170		memcpy(entry->tx_pn, fw_ap->tx_pn, sizeof(entry->tx_pn));
1171		return;
1172	}
1173}
1174
1175static u8 iwl_mvm_ftm_get_range_resp_ver(struct iwl_mvm *mvm)
1176{
1177	if (!fw_has_api(&mvm->fw->ucode_capa,
1178			IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ))
1179		return 5;
1180
1181	/* Starting from version 8, the FW advertises the version */
1182	if (mvm->cmd_ver.range_resp >= 8)
1183		return mvm->cmd_ver.range_resp;
1184	else if (fw_has_api(&mvm->fw->ucode_capa,
1185			    IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
1186		return 7;
1187
1188	/* The first version of the new range request API */
1189	return 6;
1190}
1191
1192static bool iwl_mvm_ftm_resp_size_validation(u8 ver, unsigned int pkt_len)
1193{
1194	switch (ver) {
1195	case 9:
1196	case 8:
1197		return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v8);
1198	case 7:
1199		return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v7);
1200	case 6:
1201		return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v6);
1202	case 5:
1203		return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v5);
1204	default:
1205		WARN_ONCE(1, "FTM: unsupported range response version %u", ver);
1206		return false;
1207	}
1208}
1209
1210void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
1211{
1212	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1213	unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
1214	struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data;
1215	struct iwl_tof_range_rsp_ntfy_v6 *fw_resp_v6 = (void *)pkt->data;
1216	struct iwl_tof_range_rsp_ntfy_v7 *fw_resp_v7 = (void *)pkt->data;
1217	struct iwl_tof_range_rsp_ntfy_v8 *fw_resp_v8 = (void *)pkt->data;
1218	int i;
1219	bool new_api = fw_has_api(&mvm->fw->ucode_capa,
1220				  IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ);
1221	u8 num_of_aps, last_in_batch;
1222	u8 notif_ver = iwl_mvm_ftm_get_range_resp_ver(mvm);
1223
1224	lockdep_assert_held(&mvm->mutex);
1225
1226	if (!mvm->ftm_initiator.req) {
1227		return;
1228	}
1229
1230	if (unlikely(!iwl_mvm_ftm_resp_size_validation(notif_ver, pkt_len)))
1231		return;
1232
1233	if (new_api) {
1234		if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v8->request_id,
1235						 fw_resp_v8->num_of_aps))
1236			return;
1237
1238		num_of_aps = fw_resp_v8->num_of_aps;
1239		last_in_batch = fw_resp_v8->last_report;
1240	} else {
1241		if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v5->request_id,
1242						 fw_resp_v5->num_of_aps))
1243			return;
1244
1245		num_of_aps = fw_resp_v5->num_of_aps;
1246		last_in_batch = fw_resp_v5->last_in_batch;
1247	}
1248
1249	IWL_DEBUG_INFO(mvm, "Range response received\n");
1250	IWL_DEBUG_INFO(mvm, "request id: %lld, num of entries: %u\n",
1251		       mvm->ftm_initiator.req->cookie, num_of_aps);
1252
1253	for (i = 0; i < num_of_aps && i < IWL_MVM_TOF_MAX_APS; i++) {
1254		struct cfg80211_pmsr_result result = {};
1255		struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap;
1256		int peer_idx;
1257
1258		if (new_api) {
1259			if (notif_ver >= 8) {
1260				fw_ap = &fw_resp_v8->ap[i];
1261				iwl_mvm_ftm_pasn_update_pn(mvm, fw_ap);
1262			} else if (notif_ver == 7) {
1263				fw_ap = (void *)&fw_resp_v7->ap[i];
1264			} else {
1265				fw_ap = (void *)&fw_resp_v6->ap[i];
1266			}
1267
1268			result.final = fw_ap->last_burst;
1269			result.ap_tsf = le32_to_cpu(fw_ap->start_tsf);
1270			result.ap_tsf_valid = 1;
1271		} else {
1272			/* the first part is the same for old and new APIs */
1273			fw_ap = (void *)&fw_resp_v5->ap[i];
1274			/*
1275			 * FIXME: the firmware needs to report this, we don't
1276			 * even know the number of bursts the responder picked
1277			 * (if we asked it to)
1278			 */
1279			result.final = 0;
1280		}
1281
1282		peer_idx = iwl_mvm_ftm_find_peer(mvm->ftm_initiator.req,
1283						 fw_ap->bssid);
1284		if (peer_idx < 0) {
1285			IWL_WARN(mvm,
1286				 "Unknown address (%pM, target #%d) in FTM response\n",
1287				 fw_ap->bssid, i);
1288			continue;
1289		}
1290
1291		switch (fw_ap->measure_status) {
1292		case IWL_TOF_ENTRY_SUCCESS:
1293			result.status = NL80211_PMSR_STATUS_SUCCESS;
1294			break;
1295		case IWL_TOF_ENTRY_TIMING_MEASURE_TIMEOUT:
1296			result.status = NL80211_PMSR_STATUS_TIMEOUT;
1297			break;
1298		case IWL_TOF_ENTRY_NO_RESPONSE:
1299			result.status = NL80211_PMSR_STATUS_FAILURE;
1300			result.ftm.failure_reason =
1301				NL80211_PMSR_FTM_FAILURE_NO_RESPONSE;
1302			break;
1303		case IWL_TOF_ENTRY_REQUEST_REJECTED:
1304			result.status = NL80211_PMSR_STATUS_FAILURE;
1305			result.ftm.failure_reason =
1306				NL80211_PMSR_FTM_FAILURE_PEER_BUSY;
1307			result.ftm.busy_retry_time = fw_ap->refusal_period;
1308			break;
1309		default:
1310			result.status = NL80211_PMSR_STATUS_FAILURE;
1311			result.ftm.failure_reason =
1312				NL80211_PMSR_FTM_FAILURE_UNSPECIFIED;
1313			break;
1314		}
1315		memcpy(result.addr, fw_ap->bssid, ETH_ALEN);
1316		result.host_time = iwl_mvm_ftm_get_host_time(mvm,
1317							     fw_ap->timestamp);
1318		result.type = NL80211_PMSR_TYPE_FTM;
1319		result.ftm.burst_index = mvm->ftm_initiator.responses[peer_idx];
1320		mvm->ftm_initiator.responses[peer_idx]++;
1321		result.ftm.rssi_avg = fw_ap->rssi;
1322		result.ftm.rssi_avg_valid = 1;
1323		result.ftm.rssi_spread = fw_ap->rssi_spread;
1324		result.ftm.rssi_spread_valid = 1;
1325		result.ftm.rtt_avg = (s32)le32_to_cpu(fw_ap->rtt);
1326		result.ftm.rtt_avg_valid = 1;
1327		result.ftm.rtt_variance = le32_to_cpu(fw_ap->rtt_variance);
1328		result.ftm.rtt_variance_valid = 1;
1329		result.ftm.rtt_spread = le32_to_cpu(fw_ap->rtt_spread);
1330		result.ftm.rtt_spread_valid = 1;
1331
1332		iwl_mvm_ftm_get_lci_civic(mvm, &result);
1333
1334		iwl_mvm_ftm_rtt_smoothing(mvm, &result);
1335
1336		cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev,
1337				     mvm->ftm_initiator.req,
1338				     &result, GFP_KERNEL);
1339
1340		if (fw_has_api(&mvm->fw->ucode_capa,
1341			       IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
1342			IWL_DEBUG_INFO(mvm, "RTT confidence: %u\n",
1343				       fw_ap->rttConfidence);
1344
1345		iwl_mvm_debug_range_resp(mvm, i, &result);
1346	}
1347
1348	if (last_in_batch) {
1349		cfg80211_pmsr_complete(mvm->ftm_initiator.req_wdev,
1350				       mvm->ftm_initiator.req,
1351				       GFP_KERNEL);
1352		iwl_mvm_ftm_reset(mvm);
1353	}
1354}
1355
1356void iwl_mvm_ftm_lc_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
1357{
1358	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1359	const struct ieee80211_mgmt *mgmt = (void *)pkt->data;
1360	size_t len = iwl_rx_packet_payload_len(pkt);
1361	struct iwl_mvm_loc_entry *entry;
1362	const u8 *ies, *lci, *civic, *msr_ie;
1363	size_t ies_len, lci_len = 0, civic_len = 0;
1364	size_t baselen = IEEE80211_MIN_ACTION_SIZE +
1365			 sizeof(mgmt->u.action.u.ftm);
1366	static const u8 rprt_type_lci = IEEE80211_SPCT_MSR_RPRT_TYPE_LCI;
1367	static const u8 rprt_type_civic = IEEE80211_SPCT_MSR_RPRT_TYPE_CIVIC;
1368
1369	if (len <= baselen)
1370		return;
1371
1372	lockdep_assert_held(&mvm->mutex);
1373
1374	ies = mgmt->u.action.u.ftm.variable;
1375	ies_len = len - baselen;
1376
1377	msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len,
1378					&rprt_type_lci, 1, 4);
1379	if (msr_ie) {
1380		lci = msr_ie + 2;
1381		lci_len = msr_ie[1];
1382	}
1383
1384	msr_ie = cfg80211_find_ie_match(WLAN_EID_MEASURE_REPORT, ies, ies_len,
1385					&rprt_type_civic, 1, 4);
1386	if (msr_ie) {
1387		civic = msr_ie + 2;
1388		civic_len = msr_ie[1];
1389	}
1390
1391	entry = kmalloc(sizeof(*entry) + lci_len + civic_len, GFP_KERNEL);
1392	if (!entry)
1393		return;
1394
1395	memcpy(entry->addr, mgmt->bssid, ETH_ALEN);
1396
1397	entry->lci_len = lci_len;
1398	if (lci_len)
1399		memcpy(entry->buf, lci, lci_len);
1400
1401	entry->civic_len = civic_len;
1402	if (civic_len)
1403		memcpy(entry->buf + lci_len, civic, civic_len);
1404
1405	list_add_tail(&entry->list, &mvm->ftm_initiator.loc_list);
1406}