Linux Audio

Check our new training course

Loading...
v4.17
   1/******************************************************************************
   2 *
   3 * This file is provided under a dual BSD/GPLv2 license.  When using or
   4 * redistributing this file, you may do so under either license.
   5 *
   6 * GPL LICENSE SUMMARY
   7 *
   8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
   9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of version 2 of the GNU General Public License as
  14 * published by the Free Software Foundation.
  15 *
  16 * This program is distributed in the hope that it will be useful, but
  17 * WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  19 * General Public License for more details.
  20 *
  21 * You should have received a copy of the GNU General Public License
  22 * along with this program; if not, write to the Free Software
  23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  24 * USA
  25 *
  26 * The full GNU General Public License is included in this distribution
  27 * in the file called COPYING.
  28 *
  29 * Contact Information:
  30 *  Intel Linux Wireless <linuxwifi@intel.com>
  31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  32 *
  33 * BSD LICENSE
  34 *
  35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  37 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  38 * All rights reserved.
  39 *
  40 * Redistribution and use in source and binary forms, with or without
  41 * modification, are permitted provided that the following conditions
  42 * are met:
  43 *
  44 *  * Redistributions of source code must retain the above copyright
  45 *    notice, this list of conditions and the following disclaimer.
  46 *  * Redistributions in binary form must reproduce the above copyright
  47 *    notice, this list of conditions and the following disclaimer in
  48 *    the documentation and/or other materials provided with the
  49 *    distribution.
  50 *  * Neither the name Intel Corporation nor the names of its
  51 *    contributors may be used to endorse or promote products derived
  52 *    from this software without specific prior written permission.
  53 *
  54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  65 *
  66 *****************************************************************************/
  67#include <net/mac80211.h>
  68#include <linux/netdevice.h>
 
  69
  70#include "iwl-trans.h"
  71#include "iwl-op-mode.h"
  72#include "fw/img.h"
  73#include "iwl-debug.h"
  74#include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
  75#include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
  76#include "iwl-prph.h"
  77#include "fw/acpi.h"
  78
  79#include "mvm.h"
  80#include "fw/dbg.h"
  81#include "iwl-phy-db.h"
  82
  83#define MVM_UCODE_ALIVE_TIMEOUT	HZ
  84#define MVM_UCODE_CALIB_TIMEOUT	(2*HZ)
  85
  86#define UCODE_VALID_OK	cpu_to_le32(0x1)
  87
  88struct iwl_mvm_alive_data {
  89	bool valid;
  90	u32 scd_base_addr;
  91};
  92
  93static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
  94{
  95	struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
  96		.valid = cpu_to_le32(valid_tx_ant),
  97	};
  98
  99	IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant);
 100	return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0,
 101				    sizeof(tx_ant_cmd), &tx_ant_cmd);
 102}
 103
 104static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
 105{
 106	int i;
 107	struct iwl_rss_config_cmd cmd = {
 108		.flags = cpu_to_le32(IWL_RSS_ENABLE),
 109		.hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
 110			     IWL_RSS_HASH_TYPE_IPV4_UDP |
 111			     IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
 112			     IWL_RSS_HASH_TYPE_IPV6_TCP |
 113			     IWL_RSS_HASH_TYPE_IPV6_UDP |
 114			     IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
 115	};
 116
 117	if (mvm->trans->num_rx_queues == 1)
 118		return 0;
 119
 120	/* Do not direct RSS traffic to Q 0 which is our fallback queue */
 121	for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
 122		cmd.indirection_table[i] =
 123			1 + (i % (mvm->trans->num_rx_queues - 1));
 124	netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
 125
 126	return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
 127}
 128
 129static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
 130{
 131	struct iwl_dqa_enable_cmd dqa_cmd = {
 132		.cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE),
 133	};
 134	u32 cmd_id = iwl_cmd_id(DQA_ENABLE_CMD, DATA_PATH_GROUP, 0);
 135	int ret;
 136
 137	ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
 138	if (ret)
 139		IWL_ERR(mvm, "Failed to send DQA enabling command: %d\n", ret);
 140	else
 141		IWL_DEBUG_FW(mvm, "Working in DQA mode\n");
 142
 143	return ret;
 144}
 145
 146void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
 147				   struct iwl_rx_cmd_buffer *rxb)
 148{
 149	struct iwl_rx_packet *pkt = rxb_addr(rxb);
 150	struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
 151	__le32 *dump_data = mfu_dump_notif->data;
 152	int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32);
 153	int i;
 154
 155	if (mfu_dump_notif->index_num == 0)
 156		IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n",
 157			 le32_to_cpu(mfu_dump_notif->assert_id));
 158
 159	for (i = 0; i < n_words; i++)
 160		IWL_DEBUG_INFO(mvm,
 161			       "MFUART assert dump, dword %u: 0x%08x\n",
 162			       le16_to_cpu(mfu_dump_notif->index_num) *
 163			       n_words + i,
 164			       le32_to_cpu(dump_data[i]));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 165}
 166
 167static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
 168			 struct iwl_rx_packet *pkt, void *data)
 169{
 170	struct iwl_mvm *mvm =
 171		container_of(notif_wait, struct iwl_mvm, notif_wait);
 172	struct iwl_mvm_alive_data *alive_data = data;
 173	struct mvm_alive_resp_v3 *palive3;
 174	struct mvm_alive_resp *palive;
 175	struct iwl_umac_alive *umac;
 176	struct iwl_lmac_alive *lmac1;
 177	struct iwl_lmac_alive *lmac2 = NULL;
 178	u16 status;
 179	u32 umac_error_event_table;
 180
 181	if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
 182		palive = (void *)pkt->data;
 183		umac = &palive->umac_data;
 184		lmac1 = &palive->lmac_data[0];
 185		lmac2 = &palive->lmac_data[1];
 186		status = le16_to_cpu(palive->status);
 187	} else {
 188		palive3 = (void *)pkt->data;
 189		umac = &palive3->umac_data;
 190		lmac1 = &palive3->lmac_data;
 191		status = le16_to_cpu(palive3->status);
 
 
 
 
 
 192	}
 193
 194	mvm->error_event_table[0] = le32_to_cpu(lmac1->error_event_table_ptr);
 195	if (lmac2)
 196		mvm->error_event_table[1] =
 197			le32_to_cpu(lmac2->error_event_table_ptr);
 198	mvm->log_event_table = le32_to_cpu(lmac1->log_event_table_ptr);
 
 
 
 
 199
 200	umac_error_event_table = le32_to_cpu(umac->error_info_addr);
 
 
 
 
 
 
 201
 202	if (!umac_error_event_table) {
 203		mvm->support_umac_log = false;
 204	} else if (umac_error_event_table >=
 205		   mvm->trans->cfg->min_umac_error_event_table) {
 206		mvm->support_umac_log = true;
 207		mvm->umac_error_event_table = umac_error_event_table;
 208	} else {
 209		IWL_ERR(mvm,
 210			"Not valid error log pointer 0x%08X for %s uCode\n",
 211			mvm->umac_error_event_table,
 212			(mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) ?
 213			"Init" : "RT");
 214		mvm->support_umac_log = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 215	}
 216
 217	alive_data->scd_base_addr = le32_to_cpu(lmac1->scd_base_ptr);
 218	alive_data->valid = status == IWL_ALIVE_STATUS_OK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 219
 220	IWL_DEBUG_FW(mvm,
 221		     "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
 222		     status, lmac1->ver_type, lmac1->ver_subtype);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 223
 224	if (lmac2)
 225		IWL_DEBUG_FW(mvm, "Alive ucode CDB\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 226
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 227	IWL_DEBUG_FW(mvm,
 228		     "UMAC version: Major - 0x%x, Minor - 0x%x\n",
 229		     le32_to_cpu(umac->umac_major),
 230		     le32_to_cpu(umac->umac_minor));
 231
 232	return true;
 
 
 
 233}
 234
 235static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait,
 236				   struct iwl_rx_packet *pkt, void *data)
 237{
 238	WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 239
 240	return true;
 241}
 242
 243static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
 244				  struct iwl_rx_packet *pkt, void *data)
 245{
 246	struct iwl_phy_db *phy_db = data;
 247
 248	if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
 249		WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
 250		return true;
 251	}
 252
 253	WARN_ON(iwl_phy_db_set_section(phy_db, pkt));
 254
 255	return false;
 256}
 257
 258static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
 259					 enum iwl_ucode_type ucode_type)
 260{
 261	struct iwl_notification_wait alive_wait;
 262	struct iwl_mvm_alive_data alive_data;
 263	const struct fw_img *fw;
 264	int ret, i;
 265	enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
 266	static const u16 alive_cmd[] = { MVM_ALIVE };
 
 267
 268	if (ucode_type == IWL_UCODE_REGULAR &&
 269	    iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
 270	    !(fw_has_capa(&mvm->fw->ucode_capa,
 271			  IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
 272		fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER);
 273	else
 274		fw = iwl_get_ucode_image(mvm->fw, ucode_type);
 275	if (WARN_ON(!fw))
 276		return -EINVAL;
 277	iwl_fw_set_current_image(&mvm->fwrt, ucode_type);
 278	clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
 279
 280	iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
 281				   alive_cmd, ARRAY_SIZE(alive_cmd),
 282				   iwl_alive_fn, &alive_data);
 283
 284	ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT);
 285	if (ret) {
 286		iwl_fw_set_current_image(&mvm->fwrt, old_type);
 287		iwl_remove_notification(&mvm->notif_wait, &alive_wait);
 288		return ret;
 289	}
 290
 291	/*
 292	 * Some things may run in the background now, but we
 293	 * just wait for the ALIVE notification here.
 294	 */
 295	ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
 296				    MVM_UCODE_ALIVE_TIMEOUT);
 297	if (ret) {
 298		struct iwl_trans *trans = mvm->trans;
 299
 300		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22000)
 301			IWL_ERR(mvm,
 302				"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
 303				iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS),
 304				iwl_read_prph(trans, UMAG_SB_CPU_2_STATUS));
 305		else if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
 306			IWL_ERR(mvm,
 307				"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
 308				iwl_read_prph(trans, SB_CPU_1_STATUS),
 309				iwl_read_prph(trans, SB_CPU_2_STATUS));
 310		iwl_fw_set_current_image(&mvm->fwrt, old_type);
 311		return ret;
 312	}
 313
 314	if (!alive_data.valid) {
 315		IWL_ERR(mvm, "Loaded ucode is not valid!\n");
 316		iwl_fw_set_current_image(&mvm->fwrt, old_type);
 317		return -EIO;
 318	}
 319
 
 
 
 
 
 
 
 
 
 
 
 
 320	iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
 321
 322	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 323	 * Note: all the queues are enabled as part of the interface
 324	 * initialization, but in firmware restart scenarios they
 325	 * could be stopped, so wake them up. In firmware restart,
 326	 * mac80211 will have the queues stopped as well until the
 327	 * reconfiguration completes. During normal startup, they
 328	 * will be empty.
 329	 */
 330
 331	memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
 332	mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
 
 
 
 333
 334	for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
 335		atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
 336
 337	set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
 338
 339	return 0;
 340}
 341
 342static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
 343{
 344	struct iwl_notification_wait init_wait;
 345	struct iwl_nvm_access_complete_cmd nvm_complete = {};
 346	struct iwl_init_extended_cfg_cmd init_cfg = {
 347		.init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)),
 348	};
 349	static const u16 init_complete[] = {
 350		INIT_COMPLETE_NOTIF,
 351	};
 352	int ret;
 353
 354	lockdep_assert_held(&mvm->mutex);
 355
 356	iwl_init_notification_wait(&mvm->notif_wait,
 357				   &init_wait,
 358				   init_complete,
 359				   ARRAY_SIZE(init_complete),
 360				   iwl_wait_init_complete,
 361				   NULL);
 362
 363	/* Will also start the device */
 364	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
 365	if (ret) {
 366		IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
 367		goto error;
 368	}
 369
 370	/* Send init config command to mark that we are sending NVM access
 371	 * commands
 372	 */
 373	ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP,
 374						INIT_EXTENDED_CFG_CMD), 0,
 375				   sizeof(init_cfg), &init_cfg);
 376	if (ret) {
 377		IWL_ERR(mvm, "Failed to run init config command: %d\n",
 378			ret);
 379		goto error;
 380	}
 381
 382	/* Load NVM to NIC if needed */
 383	if (mvm->nvm_file_name) {
 384		iwl_mvm_read_external_nvm(mvm);
 385		iwl_mvm_load_nvm_to_nic(mvm);
 386	}
 387
 388	if (IWL_MVM_PARSE_NVM && read_nvm) {
 389		ret = iwl_nvm_init(mvm);
 390		if (ret) {
 391			IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
 392			goto error;
 393		}
 394	}
 395
 396	ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
 397						NVM_ACCESS_COMPLETE), 0,
 398				   sizeof(nvm_complete), &nvm_complete);
 399	if (ret) {
 400		IWL_ERR(mvm, "Failed to run complete NVM access: %d\n",
 401			ret);
 402		goto error;
 403	}
 404
 405	/* We wait for the INIT complete notification */
 406	ret = iwl_wait_notification(&mvm->notif_wait, &init_wait,
 407				    MVM_UCODE_ALIVE_TIMEOUT);
 408	if (ret)
 409		return ret;
 410
 411	/* Read the NVM only at driver load time, no need to do this twice */
 412	if (!IWL_MVM_PARSE_NVM && read_nvm) {
 413		mvm->nvm_data = iwl_fw_get_nvm(&mvm->fwrt);
 414		if (IS_ERR(mvm->nvm_data)) {
 415			ret = PTR_ERR(mvm->nvm_data);
 416			mvm->nvm_data = NULL;
 417			IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
 418			return ret;
 419		}
 420	}
 421
 422	return 0;
 423
 424error:
 425	iwl_remove_notification(&mvm->notif_wait, &init_wait);
 426	return ret;
 427}
 428
 429static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
 430{
 431	struct iwl_phy_cfg_cmd phy_cfg_cmd;
 432	enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
 433
 434	/* Set parameters */
 435	phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
 436
 437	/* set flags extra PHY configuration flags from the device's cfg */
 438	phy_cfg_cmd.phy_cfg |= cpu_to_le32(mvm->cfg->extra_phy_cfg_flags);
 439
 440	phy_cfg_cmd.calib_control.event_trigger =
 441		mvm->fw->default_calib[ucode_type].event_trigger;
 442	phy_cfg_cmd.calib_control.flow_trigger =
 443		mvm->fw->default_calib[ucode_type].flow_trigger;
 444
 445	IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
 446		       phy_cfg_cmd.phy_cfg);
 447
 448	return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
 449				    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
 450}
 451
 452int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
 453{
 454	struct iwl_notification_wait calib_wait;
 455	static const u16 init_complete[] = {
 456		INIT_COMPLETE_NOTIF,
 457		CALIB_RES_NOTIF_PHY_DB
 458	};
 459	int ret;
 460
 461	if (iwl_mvm_has_unified_ucode(mvm))
 462		return iwl_run_unified_mvm_ucode(mvm, true);
 463
 464	lockdep_assert_held(&mvm->mutex);
 465
 466	if (WARN_ON_ONCE(mvm->calibrating))
 467		return 0;
 468
 469	iwl_init_notification_wait(&mvm->notif_wait,
 470				   &calib_wait,
 471				   init_complete,
 472				   ARRAY_SIZE(init_complete),
 473				   iwl_wait_phy_db_entry,
 474				   mvm->phy_db);
 475
 476	/* Will also start the device */
 477	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
 478	if (ret) {
 479		IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
 480		goto remove_notif;
 481	}
 482
 483	if (mvm->cfg->device_family < IWL_DEVICE_FAMILY_8000) {
 484		ret = iwl_mvm_send_bt_init_conf(mvm);
 485		if (ret)
 486			goto remove_notif;
 487	}
 488
 489	/* Read the NVM only at driver load time, no need to do this twice */
 490	if (read_nvm) {
 491		ret = iwl_nvm_init(mvm);
 
 492		if (ret) {
 493			IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
 494			goto remove_notif;
 495		}
 496	}
 497
 498	/* In case we read the NVM from external file, load it to the NIC */
 499	if (mvm->nvm_file_name)
 500		iwl_mvm_load_nvm_to_nic(mvm);
 501
 502	WARN_ON(iwl_nvm_check_version(mvm->nvm_data, mvm->trans));
 
 503
 504	/*
 505	 * abort after reading the nvm in case RF Kill is on, we will complete
 506	 * the init seq later when RF kill will switch to off
 507	 */
 508	if (iwl_mvm_is_radio_hw_killed(mvm)) {
 509		IWL_DEBUG_RF_KILL(mvm,
 510				  "jump over all phy activities due to RF kill\n");
 511		goto remove_notif;
 
 
 512	}
 513
 514	mvm->calibrating = true;
 515
 516	/* Send TX valid antennas before triggering calibrations */
 517	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
 518	if (ret)
 519		goto remove_notif;
 520
 
 
 
 
 521	ret = iwl_send_phy_cfg_cmd(mvm);
 522	if (ret) {
 523		IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
 524			ret);
 525		goto remove_notif;
 526	}
 527
 528	/*
 529	 * Some things may run in the background now, but we
 530	 * just wait for the calibration complete notification.
 531	 */
 532	ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
 533				    MVM_UCODE_CALIB_TIMEOUT);
 534	if (!ret)
 535		goto out;
 536
 537	if (iwl_mvm_is_radio_hw_killed(mvm)) {
 538		IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
 539		ret = 0;
 540	} else {
 541		IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
 542			ret);
 543	}
 544
 545	goto out;
 546
 547remove_notif:
 548	iwl_remove_notification(&mvm->notif_wait, &calib_wait);
 549out:
 550	mvm->calibrating = false;
 551	if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
 552		/* we want to debug INIT and we have no NVM - fake */
 553		mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
 554					sizeof(struct ieee80211_channel) +
 555					sizeof(struct ieee80211_rate),
 556					GFP_KERNEL);
 557		if (!mvm->nvm_data)
 558			return -ENOMEM;
 559		mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
 560		mvm->nvm_data->bands[0].n_channels = 1;
 561		mvm->nvm_data->bands[0].n_bitrates = 1;
 562		mvm->nvm_data->bands[0].bitrates =
 563			(void *)mvm->nvm_data->channels + 1;
 564		mvm->nvm_data->bands[0].bitrates->hw_value = 10;
 565	}
 566
 567	return ret;
 568}
 569
 570static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
 
 571{
 572	struct iwl_ltr_config_cmd cmd = {
 573		.flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
 574	};
 575
 576	if (!mvm->trans->ltr_enabled)
 577		return 0;
 578
 579	return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
 580				    sizeof(cmd), &cmd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 581}
 582
 583#ifdef CONFIG_ACPI
 584static int iwl_mvm_sar_set_profile(struct iwl_mvm *mvm,
 585				   union acpi_object *table,
 586				   struct iwl_mvm_sar_profile *profile,
 587				   bool enabled)
 588{
 
 589	int i;
 590
 591	profile->enabled = enabled;
 592
 593	for (i = 0; i < ACPI_SAR_TABLE_SIZE; i++) {
 594		if ((table[i].type != ACPI_TYPE_INTEGER) ||
 595		    (table[i].integer.value > U8_MAX))
 596			return -EINVAL;
 597
 598		profile->table[i] = table[i].integer.value;
 
 
 
 
 
 
 
 
 
 
 
 
 599	}
 600
 601	return 0;
 602}
 603
 604static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
 605{
 606	union acpi_object *wifi_pkg, *table, *data;
 607	bool enabled;
 608	int ret;
 
 
 
 609
 610	data = iwl_acpi_get_object(mvm->dev, ACPI_WRDS_METHOD);
 611	if (IS_ERR(data))
 612		return PTR_ERR(data);
 613
 614	wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
 615					 ACPI_WRDS_WIFI_DATA_SIZE);
 616	if (IS_ERR(wifi_pkg)) {
 617		ret = PTR_ERR(wifi_pkg);
 618		goto out_free;
 619	}
 620
 621	if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
 622		ret = -EINVAL;
 623		goto out_free;
 624	}
 625
 626	enabled = !!(wifi_pkg->package.elements[1].integer.value);
 
 
 
 
 627
 628	/* position of the actual table */
 629	table = &wifi_pkg->package.elements[2];
 630
 631	/* The profile from WRDS is officially profile 1, but goes
 632	 * into sar_profiles[0] (because we don't have a profile 0).
 633	 */
 634	ret = iwl_mvm_sar_set_profile(mvm, table, &mvm->sar_profiles[0],
 635				      enabled);
 636out_free:
 637	kfree(data);
 638	return ret;
 639}
 640
 641static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
 642{
 643	union acpi_object *wifi_pkg, *data;
 644	bool enabled;
 645	int i, n_profiles, ret;
 646
 647	data = iwl_acpi_get_object(mvm->dev, ACPI_EWRD_METHOD);
 648	if (IS_ERR(data))
 649		return PTR_ERR(data);
 650
 651	wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
 652					 ACPI_EWRD_WIFI_DATA_SIZE);
 653	if (IS_ERR(wifi_pkg)) {
 654		ret = PTR_ERR(wifi_pkg);
 655		goto out_free;
 656	}
 657
 658	if ((wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) ||
 659	    (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER)) {
 660		ret = -EINVAL;
 661		goto out_free;
 662	}
 663
 664	enabled = !!(wifi_pkg->package.elements[1].integer.value);
 665	n_profiles = wifi_pkg->package.elements[2].integer.value;
 
 
 666
 667	/* in case of BIOS bug */
 668	if (n_profiles <= 0) {
 669		ret = -EINVAL;
 670		goto out_free;
 671	}
 
 672
 673	for (i = 0; i < n_profiles; i++) {
 674		/* the tables start at element 3 */
 675		static int pos = 3;
 
 
 
 
 
 
 
 
 676
 677		/* The EWRD profiles officially go from 2 to 4, but we
 678		 * save them in sar_profiles[1-3] (because we don't
 679		 * have profile 0).  So in the array we start from 1.
 
 
 
 
 
 
 680		 */
 681		ret = iwl_mvm_sar_set_profile(mvm,
 682					      &wifi_pkg->package.elements[pos],
 683					      &mvm->sar_profiles[i + 1],
 684					      enabled);
 685		if (ret < 0)
 
 
 686			break;
 687
 688		/* go to the next table */
 689		pos += ACPI_SAR_TABLE_SIZE;
 690	}
 691
 692out_free:
 693	kfree(data);
 694	return ret;
 695}
 696
 697static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
 698{
 699	union acpi_object *wifi_pkg, *data;
 700	int i, j, ret;
 701	int idx = 1;
 702
 703	data = iwl_acpi_get_object(mvm->dev, ACPI_WGDS_METHOD);
 704	if (IS_ERR(data))
 705		return PTR_ERR(data);
 706
 707	wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
 708					 ACPI_WGDS_WIFI_DATA_SIZE);
 709	if (IS_ERR(wifi_pkg)) {
 710		ret = PTR_ERR(wifi_pkg);
 711		goto out_free;
 712	}
 713
 714	for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
 715		for (j = 0; j < ACPI_GEO_TABLE_SIZE; j++) {
 716			union acpi_object *entry;
 717
 718			entry = &wifi_pkg->package.elements[idx++];
 719			if ((entry->type != ACPI_TYPE_INTEGER) ||
 720			    (entry->integer.value > U8_MAX)) {
 721				ret = -EINVAL;
 722				goto out_free;
 723			}
 724
 725			mvm->geo_profiles[i].values[j] = entry->integer.value;
 726		}
 727	}
 728	ret = 0;
 729out_free:
 730	kfree(data);
 731	return ret;
 732}
 733
 734int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
 735{
 736	struct iwl_dev_tx_power_cmd cmd = {
 737		.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
 738	};
 739	int i, j, idx;
 740	int profs[ACPI_SAR_NUM_CHAIN_LIMITS] = { prof_a, prof_b };
 741	int len = sizeof(cmd);
 742
 743	BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS < 2);
 744	BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS * ACPI_SAR_NUM_SUB_BANDS !=
 745		     ACPI_SAR_TABLE_SIZE);
 746
 747	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
 748		len = sizeof(cmd.v3);
 749
 750	for (i = 0; i < ACPI_SAR_NUM_CHAIN_LIMITS; i++) {
 751		struct iwl_mvm_sar_profile *prof;
 752
 753		/* don't allow SAR to be disabled (profile 0 means disable) */
 754		if (profs[i] == 0)
 755			return -EPERM;
 756
 757		/* we are off by one, so allow up to ACPI_SAR_PROFILE_NUM */
 758		if (profs[i] > ACPI_SAR_PROFILE_NUM)
 
 759			return -EINVAL;
 760
 761		/* profiles go from 1 to 4, so decrement to access the array */
 762		prof = &mvm->sar_profiles[profs[i] - 1];
 763
 764		/* if the profile is disabled, do nothing */
 765		if (!prof->enabled) {
 766			IWL_DEBUG_RADIO(mvm, "SAR profile %d is disabled.\n",
 767					profs[i]);
 768			/* if one of the profiles is disabled, we fail all */
 769			return -ENOENT;
 770		}
 771
 772		IWL_DEBUG_RADIO(mvm, "  Chain[%d]:\n", i);
 773		for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS; j++) {
 774			idx = (i * ACPI_SAR_NUM_SUB_BANDS) + j;
 775			cmd.v3.per_chain_restriction[i][j] =
 776				cpu_to_le16(prof->table[idx]);
 777			IWL_DEBUG_RADIO(mvm, "    Band[%d] = %d * .125dBm\n",
 778					j, prof->table[idx]);
 779		}
 780	}
 781
 782	IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
 783
 784	return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
 785}
 786
 787int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
 
 788{
 789	struct iwl_geo_tx_power_profiles_resp *resp;
 
 
 
 790	int ret;
 791
 792	struct iwl_geo_tx_power_profiles_cmd geo_cmd = {
 793		.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE),
 794	};
 795	struct iwl_host_cmd cmd = {
 796		.id =  WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT),
 797		.len = { sizeof(geo_cmd), },
 798		.flags = CMD_WANT_SKB,
 799		.data = { &geo_cmd },
 800	};
 801
 802	ret = iwl_mvm_send_cmd(mvm, &cmd);
 803	if (ret) {
 804		IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
 805		return ret;
 806	}
 807
 808	resp = (void *)cmd.resp_pkt->data;
 809	ret = le32_to_cpu(resp->profile_idx);
 810	if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES)) {
 811		ret = -EIO;
 812		IWL_WARN(mvm, "Invalid geographic profile idx (%d)\n", ret);
 
 813	}
 814
 815	iwl_free_resp(&cmd);
 816	return ret;
 817}
 818
 819static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
 820{
 821	struct iwl_geo_tx_power_profiles_cmd cmd = {
 822		.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES),
 823	};
 824	int ret, i, j;
 825	u16 cmd_wide_id =  WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
 826
 827	ret = iwl_mvm_sar_get_wgds_table(mvm);
 828	if (ret < 0) {
 829		IWL_DEBUG_RADIO(mvm,
 830				"Geo SAR BIOS table invalid or unavailable. (%d)\n",
 831				ret);
 832		/* we don't fail if the table is not available */
 833		return 0;
 834	}
 835
 836	IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n");
 837
 838	BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
 839		     ACPI_WGDS_TABLE_SIZE !=  ACPI_WGDS_WIFI_DATA_SIZE);
 840
 841	BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES);
 842
 843	for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
 844		struct iwl_per_chain_offset *chain =
 845			(struct iwl_per_chain_offset *)&cmd.table[i];
 846
 847		for (j = 0; j < ACPI_WGDS_NUM_BANDS; j++) {
 848			u8 *value;
 849
 850			value = &mvm->geo_profiles[i].values[j *
 851				ACPI_GEO_PER_CHAIN_SIZE];
 852			chain[j].max_tx_power = cpu_to_le16(value[0]);
 853			chain[j].chain_a = value[1];
 854			chain[j].chain_b = value[2];
 855			IWL_DEBUG_RADIO(mvm,
 856					"SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
 857					i, j, value[1], value[2], value[0]);
 858		}
 859	}
 860	return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, sizeof(cmd), &cmd);
 861}
 862
 863#else /* CONFIG_ACPI */
 864static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
 865{
 866	return -ENOENT;
 867}
 868
 869static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
 870{
 871	return -ENOENT;
 872}
 873
 874static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
 875{
 876	return 0;
 877}
 878
 879int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a,
 880			       int prof_b)
 881{
 882	return -ENOENT;
 883}
 884
 885int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
 886{
 887	return -ENOENT;
 888}
 889#endif /* CONFIG_ACPI */
 890
 891static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
 892{
 893	int ret;
 
 
 
 
 
 
 
 
 894
 895	ret = iwl_mvm_sar_get_wrds_table(mvm);
 896	if (ret < 0) {
 897		IWL_DEBUG_RADIO(mvm,
 898				"WRDS SAR BIOS table invalid or unavailable. (%d)\n",
 899				ret);
 900		/* if not available, don't fail and don't bother with EWRD */
 901		return 0;
 902	}
 903
 904	ret = iwl_mvm_sar_get_ewrd_table(mvm);
 905	/* if EWRD is not available, we can still use WRDS, so don't fail */
 906	if (ret < 0)
 907		IWL_DEBUG_RADIO(mvm,
 908				"EWRD SAR BIOS table invalid or unavailable. (%d)\n",
 909				ret);
 910
 911	/* choose profile 1 (WRDS) as default for both chains */
 912	ret = iwl_mvm_sar_select_profile(mvm, 1, 1);
 913
 914	/* if we don't have profile 0 from BIOS, just skip it */
 915	if (ret == -ENOENT)
 916		return 0;
 
 
 
 
 
 
 
 
 
 
 
 917
 918	return ret;
 919}
 920
 921static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
 922{
 923	int ret;
 
 
 924
 925	if (iwl_mvm_has_unified_ucode(mvm))
 926		return iwl_run_unified_mvm_ucode(mvm, false);
 
 
 
 927
 
 
 
 
 
 928	ret = iwl_run_init_mvm_ucode(mvm, false);
 929
 
 
 
 930	if (ret) {
 931		IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
 932
 933		if (iwlmvm_mod_params.init_dbg)
 934			return 0;
 935		return ret;
 936	}
 937
 938	/*
 939	 * Stop and start the transport without entering low power
 940	 * mode. This will save the state of other components on the
 941	 * device that are triggered by the INIT firwmare (MFUART).
 942	 */
 943	_iwl_trans_stop_device(mvm->trans, false);
 944	ret = _iwl_trans_start_hw(mvm->trans, false);
 945	if (ret)
 946		return ret;
 947
 948	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
 949	if (ret)
 950		return ret;
 951
 952	return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img);
 953}
 954
 955int iwl_mvm_up(struct iwl_mvm *mvm)
 956{
 957	int ret, i;
 958	struct ieee80211_channel *chan;
 959	struct cfg80211_chan_def chandef;
 960
 961	lockdep_assert_held(&mvm->mutex);
 962
 963	ret = iwl_trans_start_hw(mvm->trans);
 964	if (ret)
 965		return ret;
 966
 967	ret = iwl_mvm_load_rt_fw(mvm);
 968	if (ret) {
 969		IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
 970		goto error;
 971	}
 972
 973	iwl_get_shared_mem_conf(&mvm->fwrt);
 974
 975	ret = iwl_mvm_sf_update(mvm, NULL, false);
 976	if (ret)
 977		IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
 978
 979	mvm->fwrt.dump.conf = FW_DBG_INVALID;
 980	/* if we have a destination, assume EARLY START */
 981	if (mvm->fw->dbg_dest_tlv)
 982		mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE;
 983	iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE);
 984
 985	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
 986	if (ret)
 987		goto error;
 988
 989	if (!iwl_mvm_has_unified_ucode(mvm)) {
 990		/* Send phy db control command and then phy db calibration */
 991		ret = iwl_send_phy_db_data(mvm->phy_db);
 992		if (ret)
 993			goto error;
 994
 995		ret = iwl_send_phy_cfg_cmd(mvm);
 996		if (ret)
 997			goto error;
 998	}
 999
1000	ret = iwl_mvm_send_bt_init_conf(mvm);
1001	if (ret)
1002		goto error;
1003
1004	/* Init RSS configuration */
1005	/* TODO - remove 22000 disablement when we have RXQ config API */
1006	if (iwl_mvm_has_new_rx_api(mvm) &&
1007	    mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_22000) {
1008		ret = iwl_send_rss_cfg_cmd(mvm);
1009		if (ret) {
1010			IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
1011				ret);
1012			goto error;
1013		}
1014	}
1015
1016	/* init the fw <-> mac80211 STA mapping */
1017	for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
1018		RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
1019
1020	mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1021
1022	/* reset quota debouncing buffer - 0xff will yield invalid data */
1023	memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
1024
1025	ret = iwl_mvm_send_dqa_cmd(mvm);
1026	if (ret)
1027		goto error;
 
 
 
 
 
1028
1029	/* Add auxiliary station for scanning */
1030	ret = iwl_mvm_add_aux_sta(mvm);
1031	if (ret)
1032		goto error;
1033
1034	/* Add all the PHY contexts */
1035	chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0];
1036	cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
1037	for (i = 0; i < NUM_PHY_CTX; i++) {
1038		/*
1039		 * The channel used here isn't relevant as it's
1040		 * going to be overwritten in the other flows.
1041		 * For now use the first channel we have.
1042		 */
1043		ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i],
1044					   &chandef, 1, 1);
1045		if (ret)
1046			goto error;
1047	}
1048
1049#ifdef CONFIG_THERMAL
1050	if (iwl_mvm_is_tt_in_fw(mvm)) {
1051		/* in order to give the responsibility of ct-kill and
1052		 * TX backoff to FW we need to send empty temperature reporting
1053		 * cmd during init time
1054		 */
1055		iwl_mvm_send_temp_report_ths_cmd(mvm);
1056	} else {
1057		/* Initialize tx backoffs to the minimal possible */
1058		iwl_mvm_tt_tx_backoff(mvm, 0);
1059	}
1060
1061	/* TODO: read the budget from BIOS / Platform NVM */
1062
1063	/*
1064	 * In case there is no budget from BIOS / Platform NVM the default
1065	 * budget should be 2000mW (cooling state 0).
1066	 */
1067	if (iwl_mvm_is_ctdp_supported(mvm)) {
1068		ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
1069					   mvm->cooling_dev.cur_state);
1070		if (ret)
1071			goto error;
1072	}
1073#else
1074	/* Initialize tx backoffs to the minimal possible */
1075	iwl_mvm_tt_tx_backoff(mvm, 0);
1076#endif
1077
1078	WARN_ON(iwl_mvm_config_ltr(mvm));
1079
1080	ret = iwl_mvm_power_update_device(mvm);
1081	if (ret)
1082		goto error;
1083
1084	/*
1085	 * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
1086	 * anyway, so don't init MCC.
1087	 */
1088	if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) {
1089		ret = iwl_mvm_init_mcc(mvm);
1090		if (ret)
1091			goto error;
1092	}
1093
1094	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1095		mvm->scan_type = IWL_SCAN_TYPE_NOT_SET;
1096		ret = iwl_mvm_config_scan(mvm);
1097		if (ret)
1098			goto error;
1099	}
1100
 
 
 
 
1101	/* allow FW/transport low power modes if not during restart */
1102	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1103		iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
1104
1105	ret = iwl_mvm_sar_init(mvm);
1106	if (ret)
1107		goto error;
1108
1109	ret = iwl_mvm_sar_geo_init(mvm);
1110	if (ret)
1111		goto error;
1112
1113	iwl_mvm_leds_sync(mvm);
1114
1115	IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
1116	return 0;
1117 error:
1118	if (!iwlmvm_mod_params.init_dbg || !ret)
1119		iwl_mvm_stop_device(mvm);
1120	return ret;
1121}
1122
1123int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
1124{
1125	int ret, i;
1126
1127	lockdep_assert_held(&mvm->mutex);
1128
1129	ret = iwl_trans_start_hw(mvm->trans);
1130	if (ret)
1131		return ret;
1132
1133	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN);
1134	if (ret) {
1135		IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret);
1136		goto error;
1137	}
1138
1139	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
1140	if (ret)
1141		goto error;
1142
1143	/* Send phy db control command and then phy db calibration*/
1144	ret = iwl_send_phy_db_data(mvm->phy_db);
1145	if (ret)
1146		goto error;
1147
1148	ret = iwl_send_phy_cfg_cmd(mvm);
1149	if (ret)
1150		goto error;
1151
1152	/* init the fw <-> mac80211 STA mapping */
1153	for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
1154		RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
1155
1156	/* Add auxiliary station for scanning */
1157	ret = iwl_mvm_add_aux_sta(mvm);
1158	if (ret)
1159		goto error;
1160
1161	return 0;
1162 error:
1163	iwl_mvm_stop_device(mvm);
1164	return ret;
1165}
1166
1167void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
1168				 struct iwl_rx_cmd_buffer *rxb)
1169{
1170	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1171	struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
1172	u32 flags = le32_to_cpu(card_state_notif->flags);
1173
1174	IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n",
1175			  (flags & HW_CARD_DISABLED) ? "Kill" : "On",
1176			  (flags & SW_CARD_DISABLED) ? "Kill" : "On",
1177			  (flags & CT_KILL_CARD_DISABLED) ?
1178			  "Reached" : "Not reached");
1179}
1180
1181void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
1182			     struct iwl_rx_cmd_buffer *rxb)
1183{
1184	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1185	struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
1186
1187	IWL_DEBUG_INFO(mvm,
1188		       "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
1189		       le32_to_cpu(mfuart_notif->installed_ver),
1190		       le32_to_cpu(mfuart_notif->external_ver),
1191		       le32_to_cpu(mfuart_notif->status),
1192		       le32_to_cpu(mfuart_notif->duration));
1193
1194	if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif))
1195		IWL_DEBUG_INFO(mvm,
1196			       "MFUART: image size: 0x%08x\n",
1197			       le32_to_cpu(mfuart_notif->image_size));
1198}
v4.10.11
   1/******************************************************************************
   2 *
   3 * This file is provided under a dual BSD/GPLv2 license.  When using or
   4 * redistributing this file, you may do so under either license.
   5 *
   6 * GPL LICENSE SUMMARY
   7 *
   8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
   9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  10 * Copyright(c) 2016 Intel Deutschland GmbH
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of version 2 of the GNU General Public License as
  14 * published by the Free Software Foundation.
  15 *
  16 * This program is distributed in the hope that it will be useful, but
  17 * WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  19 * General Public License for more details.
  20 *
  21 * You should have received a copy of the GNU General Public License
  22 * along with this program; if not, write to the Free Software
  23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  24 * USA
  25 *
  26 * The full GNU General Public License is included in this distribution
  27 * in the file called COPYING.
  28 *
  29 * Contact Information:
  30 *  Intel Linux Wireless <linuxwifi@intel.com>
  31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  32 *
  33 * BSD LICENSE
  34 *
  35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
 
  37 * All rights reserved.
  38 *
  39 * Redistribution and use in source and binary forms, with or without
  40 * modification, are permitted provided that the following conditions
  41 * are met:
  42 *
  43 *  * Redistributions of source code must retain the above copyright
  44 *    notice, this list of conditions and the following disclaimer.
  45 *  * Redistributions in binary form must reproduce the above copyright
  46 *    notice, this list of conditions and the following disclaimer in
  47 *    the documentation and/or other materials provided with the
  48 *    distribution.
  49 *  * Neither the name Intel Corporation nor the names of its
  50 *    contributors may be used to endorse or promote products derived
  51 *    from this software without specific prior written permission.
  52 *
  53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  54 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  56 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  57 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  58 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  59 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  63 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  64 *
  65 *****************************************************************************/
  66#include <net/mac80211.h>
  67#include <linux/netdevice.h>
  68#include <linux/acpi.h>
  69
  70#include "iwl-trans.h"
  71#include "iwl-op-mode.h"
  72#include "iwl-fw.h"
  73#include "iwl-debug.h"
  74#include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
  75#include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
  76#include "iwl-prph.h"
  77#include "iwl-eeprom-parse.h"
  78
  79#include "mvm.h"
  80#include "fw-dbg.h"
  81#include "iwl-phy-db.h"
  82
  83#define MVM_UCODE_ALIVE_TIMEOUT	HZ
  84#define MVM_UCODE_CALIB_TIMEOUT	(2*HZ)
  85
  86#define UCODE_VALID_OK	cpu_to_le32(0x1)
  87
  88struct iwl_mvm_alive_data {
  89	bool valid;
  90	u32 scd_base_addr;
  91};
  92
  93static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
  94{
  95	struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
  96		.valid = cpu_to_le32(valid_tx_ant),
  97	};
  98
  99	IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant);
 100	return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0,
 101				    sizeof(tx_ant_cmd), &tx_ant_cmd);
 102}
 103
 104static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
 105{
 106	int i;
 107	struct iwl_rss_config_cmd cmd = {
 108		.flags = cpu_to_le32(IWL_RSS_ENABLE),
 109		.hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
 110			     IWL_RSS_HASH_TYPE_IPV4_UDP |
 111			     IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
 112			     IWL_RSS_HASH_TYPE_IPV6_TCP |
 113			     IWL_RSS_HASH_TYPE_IPV6_UDP |
 114			     IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
 115	};
 116
 117	if (mvm->trans->num_rx_queues == 1)
 118		return 0;
 119
 120	/* Do not direct RSS traffic to Q 0 which is our fallback queue */
 121	for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
 122		cmd.indirection_table[i] =
 123			1 + (i % (mvm->trans->num_rx_queues - 1));
 124	netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
 125
 126	return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
 127}
 128
 129static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
 130{
 131	struct iwl_dqa_enable_cmd dqa_cmd = {
 132		.cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE),
 133	};
 134	u32 cmd_id = iwl_cmd_id(DQA_ENABLE_CMD, DATA_PATH_GROUP, 0);
 135	int ret;
 136
 137	ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
 138	if (ret)
 139		IWL_ERR(mvm, "Failed to send DQA enabling command: %d\n", ret);
 140	else
 141		IWL_DEBUG_FW(mvm, "Working in DQA mode\n");
 142
 143	return ret;
 144}
 145
 146void iwl_free_fw_paging(struct iwl_mvm *mvm)
 
 147{
 
 
 
 
 148	int i;
 149
 150	if (!mvm->fw_paging_db[0].fw_paging_block)
 151		return;
 152
 153	for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) {
 154		struct iwl_fw_paging *paging = &mvm->fw_paging_db[i];
 155
 156		if (!paging->fw_paging_block) {
 157			IWL_DEBUG_FW(mvm,
 158				     "Paging: block %d already freed, continue to next page\n",
 159				     i);
 160
 161			continue;
 162		}
 163		dma_unmap_page(mvm->trans->dev, paging->fw_paging_phys,
 164			       paging->fw_paging_size, DMA_BIDIRECTIONAL);
 165
 166		__free_pages(paging->fw_paging_block,
 167			     get_order(paging->fw_paging_size));
 168		paging->fw_paging_block = NULL;
 169	}
 170	kfree(mvm->trans->paging_download_buf);
 171	mvm->trans->paging_download_buf = NULL;
 172	mvm->trans->paging_db = NULL;
 173
 174	memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
 175}
 176
 177static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
 
 178{
 179	int sec_idx, idx;
 180	u32 offset = 0;
 
 
 
 
 
 
 
 
 181
 182	/*
 183	 * find where is the paging image start point:
 184	 * if CPU2 exist and it's in paging format, then the image looks like:
 185	 * CPU1 sections (2 or more)
 186	 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
 187	 * CPU2 sections (not paged)
 188	 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
 189	 * non paged to CPU2 paging sec
 190	 * CPU2 paging CSS
 191	 * CPU2 paging image (including instruction and data)
 192	 */
 193	for (sec_idx = 0; sec_idx < IWL_UCODE_SECTION_MAX; sec_idx++) {
 194		if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
 195			sec_idx++;
 196			break;
 197		}
 198	}
 199
 200	/*
 201	 * If paging is enabled there should be at least 2 more sections left
 202	 * (one for CSS and one for Paging data)
 203	 */
 204	if (sec_idx >= ARRAY_SIZE(image->sec) - 1) {
 205		IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n");
 206		iwl_free_fw_paging(mvm);
 207		return -EINVAL;
 208	}
 209
 210	/* copy the CSS block to the dram */
 211	IWL_DEBUG_FW(mvm, "Paging: load paging CSS to FW, sec = %d\n",
 212		     sec_idx);
 213
 214	memcpy(page_address(mvm->fw_paging_db[0].fw_paging_block),
 215	       image->sec[sec_idx].data,
 216	       mvm->fw_paging_db[0].fw_paging_size);
 217
 218	IWL_DEBUG_FW(mvm,
 219		     "Paging: copied %d CSS bytes to first block\n",
 220		     mvm->fw_paging_db[0].fw_paging_size);
 221
 222	sec_idx++;
 223
 224	/*
 225	 * copy the paging blocks to the dram
 226	 * loop index start from 1 since that CSS block already copied to dram
 227	 * and CSS index is 0.
 228	 * loop stop at num_of_paging_blk since that last block is not full.
 229	 */
 230	for (idx = 1; idx < mvm->num_of_paging_blk; idx++) {
 231		memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
 232		       image->sec[sec_idx].data + offset,
 233		       mvm->fw_paging_db[idx].fw_paging_size);
 234
 235		IWL_DEBUG_FW(mvm,
 236			     "Paging: copied %d paging bytes to block %d\n",
 237			     mvm->fw_paging_db[idx].fw_paging_size,
 238			     idx);
 239
 240		offset += mvm->fw_paging_db[idx].fw_paging_size;
 241	}
 242
 243	/* copy the last paging block */
 244	if (mvm->num_of_pages_in_last_blk > 0) {
 245		memcpy(page_address(mvm->fw_paging_db[idx].fw_paging_block),
 246		       image->sec[sec_idx].data + offset,
 247		       FW_PAGING_SIZE * mvm->num_of_pages_in_last_blk);
 248
 249		IWL_DEBUG_FW(mvm,
 250			     "Paging: copied %d pages in the last block %d\n",
 251			     mvm->num_of_pages_in_last_blk, idx);
 252	}
 253
 254	return 0;
 255}
 256
 257static int iwl_alloc_fw_paging_mem(struct iwl_mvm *mvm,
 258				   const struct fw_img *image)
 259{
 260	struct page *block;
 261	dma_addr_t phys = 0;
 262	int blk_idx = 0;
 263	int order, num_of_pages;
 264	int dma_enabled;
 265
 266	if (mvm->fw_paging_db[0].fw_paging_block)
 267		return 0;
 268
 269	dma_enabled = is_device_dma_capable(mvm->trans->dev);
 270
 271	/* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
 272	BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
 273
 274	num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
 275	mvm->num_of_paging_blk = ((num_of_pages - 1) /
 276				    NUM_OF_PAGE_PER_GROUP) + 1;
 277
 278	mvm->num_of_pages_in_last_blk =
 279		num_of_pages -
 280		NUM_OF_PAGE_PER_GROUP * (mvm->num_of_paging_blk - 1);
 281
 282	IWL_DEBUG_FW(mvm,
 283		     "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
 284		     mvm->num_of_paging_blk,
 285		     mvm->num_of_pages_in_last_blk);
 286
 287	/* allocate block of 4Kbytes for paging CSS */
 288	order = get_order(FW_PAGING_SIZE);
 289	block = alloc_pages(GFP_KERNEL, order);
 290	if (!block) {
 291		/* free all the previous pages since we failed */
 292		iwl_free_fw_paging(mvm);
 293		return -ENOMEM;
 294	}
 295
 296	mvm->fw_paging_db[blk_idx].fw_paging_block = block;
 297	mvm->fw_paging_db[blk_idx].fw_paging_size = FW_PAGING_SIZE;
 298
 299	if (dma_enabled) {
 300		phys = dma_map_page(mvm->trans->dev, block, 0,
 301				    PAGE_SIZE << order, DMA_BIDIRECTIONAL);
 302		if (dma_mapping_error(mvm->trans->dev, phys)) {
 303			/*
 304			 * free the previous pages and the current one since
 305			 * we failed to map_page.
 306			 */
 307			iwl_free_fw_paging(mvm);
 308			return -ENOMEM;
 309		}
 310		mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
 311	} else {
 312		mvm->fw_paging_db[blk_idx].fw_paging_phys = PAGING_ADDR_SIG |
 313			blk_idx << BLOCK_2_EXP_SIZE;
 314	}
 315
 316	IWL_DEBUG_FW(mvm,
 317		     "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
 318		     order);
 319
 320	/*
 321	 * allocate blocks in dram.
 322	 * since that CSS allocated in fw_paging_db[0] loop start from index 1
 323	 */
 324	for (blk_idx = 1; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
 325		/* allocate block of PAGING_BLOCK_SIZE (32K) */
 326		order = get_order(PAGING_BLOCK_SIZE);
 327		block = alloc_pages(GFP_KERNEL, order);
 328		if (!block) {
 329			/* free all the previous pages since we failed */
 330			iwl_free_fw_paging(mvm);
 331			return -ENOMEM;
 332		}
 333
 334		mvm->fw_paging_db[blk_idx].fw_paging_block = block;
 335		mvm->fw_paging_db[blk_idx].fw_paging_size = PAGING_BLOCK_SIZE;
 336
 337		if (dma_enabled) {
 338			phys = dma_map_page(mvm->trans->dev, block, 0,
 339					    PAGE_SIZE << order,
 340					    DMA_BIDIRECTIONAL);
 341			if (dma_mapping_error(mvm->trans->dev, phys)) {
 342				/*
 343				 * free the previous pages and the current one
 344				 * since we failed to map_page.
 345				 */
 346				iwl_free_fw_paging(mvm);
 347				return -ENOMEM;
 348			}
 349			mvm->fw_paging_db[blk_idx].fw_paging_phys = phys;
 350		} else {
 351			mvm->fw_paging_db[blk_idx].fw_paging_phys =
 352				PAGING_ADDR_SIG |
 353				blk_idx << BLOCK_2_EXP_SIZE;
 354		}
 355
 356		IWL_DEBUG_FW(mvm,
 357			     "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
 358			     order);
 359	}
 360
 361	return 0;
 362}
 363
 364static int iwl_save_fw_paging(struct iwl_mvm *mvm,
 365			      const struct fw_img *fw)
 366{
 367	int ret;
 368
 369	ret = iwl_alloc_fw_paging_mem(mvm, fw);
 370	if (ret)
 371		return ret;
 372
 373	return iwl_fill_paging_mem(mvm, fw);
 374}
 375
 376/* send paging cmd to FW in case CPU2 has paging image */
 377static int iwl_send_paging_cmd(struct iwl_mvm *mvm, const struct fw_img *fw)
 378{
 379	struct iwl_fw_paging_cmd paging_cmd = {
 380		.flags =
 381			cpu_to_le32(PAGING_CMD_IS_SECURED |
 382				    PAGING_CMD_IS_ENABLED |
 383				    (mvm->num_of_pages_in_last_blk <<
 384				    PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
 385		.block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
 386		.block_num = cpu_to_le32(mvm->num_of_paging_blk),
 387	};
 388	int blk_idx, size = sizeof(paging_cmd);
 389
 390	/* A bit hard coded - but this is the old API and will be deprecated */
 391	if (!iwl_mvm_has_new_tx_api(mvm))
 392		size -= NUM_OF_FW_PAGING_BLOCKS * 4;
 393
 394	/* loop for for all paging blocks + CSS block */
 395	for (blk_idx = 0; blk_idx < mvm->num_of_paging_blk + 1; blk_idx++) {
 396		dma_addr_t addr = mvm->fw_paging_db[blk_idx].fw_paging_phys;
 397
 398		addr = addr >> PAGE_2_EXP_SIZE;
 399
 400		if (iwl_mvm_has_new_tx_api(mvm)) {
 401			__le64 phy_addr = cpu_to_le64(addr);
 402
 403			paging_cmd.device_phy_addr.addr64[blk_idx] = phy_addr;
 404		} else {
 405			__le32 phy_addr = cpu_to_le32(addr);
 406
 407			paging_cmd.device_phy_addr.addr32[blk_idx] = phy_addr;
 408		}
 409	}
 410
 411	return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(FW_PAGING_BLOCK_CMD,
 412						    IWL_ALWAYS_LONG_GROUP, 0),
 413				    0, size, &paging_cmd);
 414}
 415
 416/*
 417 * Send paging item cmd to FW in case CPU2 has paging image
 418 */
 419static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
 420{
 421	int ret;
 422	struct iwl_fw_get_item_cmd fw_get_item_cmd = {
 423		.item_id = cpu_to_le32(IWL_FW_ITEM_ID_PAGING),
 424	};
 425
 426	struct iwl_fw_get_item_resp *item_resp;
 427	struct iwl_host_cmd cmd = {
 428		.id = iwl_cmd_id(FW_GET_ITEM_CMD, IWL_ALWAYS_LONG_GROUP, 0),
 429		.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
 430		.data = { &fw_get_item_cmd, },
 431	};
 432
 433	cmd.len[0] = sizeof(struct iwl_fw_get_item_cmd);
 434
 435	ret = iwl_mvm_send_cmd(mvm, &cmd);
 436	if (ret) {
 437		IWL_ERR(mvm,
 438			"Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
 439			ret);
 440		return ret;
 441	}
 442
 443	item_resp = (void *)((struct iwl_rx_packet *)cmd.resp_pkt)->data;
 444	if (item_resp->item_id != cpu_to_le32(IWL_FW_ITEM_ID_PAGING)) {
 445		IWL_ERR(mvm,
 446			"Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
 447			le32_to_cpu(item_resp->item_id));
 448		ret = -EIO;
 449		goto exit;
 450	}
 451
 452	/* Add an extra page for headers */
 453	mvm->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE +
 454						  FW_PAGING_SIZE,
 455						  GFP_KERNEL);
 456	if (!mvm->trans->paging_download_buf) {
 457		ret = -ENOMEM;
 458		goto exit;
 459	}
 460	mvm->trans->paging_req_addr = le32_to_cpu(item_resp->item_val);
 461	mvm->trans->paging_db = mvm->fw_paging_db;
 462	IWL_DEBUG_FW(mvm,
 463		     "Paging: got paging request address (paging_req_addr 0x%08x)\n",
 464		     mvm->trans->paging_req_addr);
 
 465
 466exit:
 467	iwl_free_resp(&cmd);
 468
 469	return ret;
 470}
 471
 472static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
 473			 struct iwl_rx_packet *pkt, void *data)
 474{
 475	struct iwl_mvm *mvm =
 476		container_of(notif_wait, struct iwl_mvm, notif_wait);
 477	struct iwl_mvm_alive_data *alive_data = data;
 478	struct mvm_alive_resp_ver1 *palive1;
 479	struct mvm_alive_resp_ver2 *palive2;
 480	struct mvm_alive_resp *palive;
 481
 482	if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive1)) {
 483		palive1 = (void *)pkt->data;
 484
 485		mvm->support_umac_log = false;
 486		mvm->error_event_table =
 487			le32_to_cpu(palive1->error_event_table_ptr);
 488		mvm->log_event_table =
 489			le32_to_cpu(palive1->log_event_table_ptr);
 490		alive_data->scd_base_addr = le32_to_cpu(palive1->scd_base_ptr);
 491
 492		alive_data->valid = le16_to_cpu(palive1->status) ==
 493				    IWL_ALIVE_STATUS_OK;
 494		IWL_DEBUG_FW(mvm,
 495			     "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
 496			     le16_to_cpu(palive1->status), palive1->ver_type,
 497			     palive1->ver_subtype, palive1->flags);
 498	} else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive2)) {
 499		palive2 = (void *)pkt->data;
 500
 501		mvm->error_event_table =
 502			le32_to_cpu(palive2->error_event_table_ptr);
 503		mvm->log_event_table =
 504			le32_to_cpu(palive2->log_event_table_ptr);
 505		alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr);
 506		mvm->umac_error_event_table =
 507			le32_to_cpu(palive2->error_info_addr);
 508		mvm->sf_space.addr = le32_to_cpu(palive2->st_fwrd_addr);
 509		mvm->sf_space.size = le32_to_cpu(palive2->st_fwrd_size);
 510
 511		alive_data->valid = le16_to_cpu(palive2->status) ==
 512				    IWL_ALIVE_STATUS_OK;
 513		if (mvm->umac_error_event_table)
 514			mvm->support_umac_log = true;
 515
 516		IWL_DEBUG_FW(mvm,
 517			     "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
 518			     le16_to_cpu(palive2->status), palive2->ver_type,
 519			     palive2->ver_subtype, palive2->flags);
 520
 521		IWL_DEBUG_FW(mvm,
 522			     "UMAC version: Major - 0x%x, Minor - 0x%x\n",
 523			     palive2->umac_major, palive2->umac_minor);
 524	} else if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
 525		palive = (void *)pkt->data;
 526
 527		mvm->error_event_table =
 528			le32_to_cpu(palive->error_event_table_ptr);
 529		mvm->log_event_table =
 530			le32_to_cpu(palive->log_event_table_ptr);
 531		alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
 532		mvm->umac_error_event_table =
 533			le32_to_cpu(palive->error_info_addr);
 534		mvm->sf_space.addr = le32_to_cpu(palive->st_fwrd_addr);
 535		mvm->sf_space.size = le32_to_cpu(palive->st_fwrd_size);
 536
 537		alive_data->valid = le16_to_cpu(palive->status) ==
 538				    IWL_ALIVE_STATUS_OK;
 539		if (mvm->umac_error_event_table)
 540			mvm->support_umac_log = true;
 541
 542		IWL_DEBUG_FW(mvm,
 543			     "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
 544			     le16_to_cpu(palive->status), palive->ver_type,
 545			     palive->ver_subtype, palive->flags);
 546
 547		IWL_DEBUG_FW(mvm,
 548			     "UMAC version: Major - 0x%x, Minor - 0x%x\n",
 549			     le32_to_cpu(palive->umac_major),
 550			     le32_to_cpu(palive->umac_minor));
 551	}
 552
 553	return true;
 554}
 555
 556static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
 557				  struct iwl_rx_packet *pkt, void *data)
 558{
 559	struct iwl_phy_db *phy_db = data;
 560
 561	if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
 562		WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
 563		return true;
 564	}
 565
 566	WARN_ON(iwl_phy_db_set_section(phy_db, pkt));
 567
 568	return false;
 569}
 570
 571static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
 572					 enum iwl_ucode_type ucode_type)
 573{
 574	struct iwl_notification_wait alive_wait;
 575	struct iwl_mvm_alive_data alive_data;
 576	const struct fw_img *fw;
 577	int ret, i;
 578	enum iwl_ucode_type old_type = mvm->cur_ucode;
 579	static const u16 alive_cmd[] = { MVM_ALIVE };
 580	struct iwl_sf_region st_fwrd_space;
 581
 582	if (ucode_type == IWL_UCODE_REGULAR &&
 583	    iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
 584	    !(fw_has_capa(&mvm->fw->ucode_capa,
 585			  IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
 586		fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER);
 587	else
 588		fw = iwl_get_ucode_image(mvm->fw, ucode_type);
 589	if (WARN_ON(!fw))
 590		return -EINVAL;
 591	mvm->cur_ucode = ucode_type;
 592	mvm->ucode_loaded = false;
 593
 594	iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
 595				   alive_cmd, ARRAY_SIZE(alive_cmd),
 596				   iwl_alive_fn, &alive_data);
 597
 598	ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT);
 599	if (ret) {
 600		mvm->cur_ucode = old_type;
 601		iwl_remove_notification(&mvm->notif_wait, &alive_wait);
 602		return ret;
 603	}
 604
 605	/*
 606	 * Some things may run in the background now, but we
 607	 * just wait for the ALIVE notification here.
 608	 */
 609	ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
 610				    MVM_UCODE_ALIVE_TIMEOUT);
 611	if (ret) {
 612		if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
 
 
 613			IWL_ERR(mvm,
 614				"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
 615				iwl_read_prph(mvm->trans, SB_CPU_1_STATUS),
 616				iwl_read_prph(mvm->trans, SB_CPU_2_STATUS));
 617		mvm->cur_ucode = old_type;
 
 
 
 
 
 618		return ret;
 619	}
 620
 621	if (!alive_data.valid) {
 622		IWL_ERR(mvm, "Loaded ucode is not valid!\n");
 623		mvm->cur_ucode = old_type;
 624		return -EIO;
 625	}
 626
 627	/*
 628	 * update the sdio allocation according to the pointer we get in the
 629	 * alive notification.
 630	 */
 631	st_fwrd_space.addr = mvm->sf_space.addr;
 632	st_fwrd_space.size = mvm->sf_space.size;
 633	ret = iwl_trans_update_sf(mvm->trans, &st_fwrd_space);
 634	if (ret) {
 635		IWL_ERR(mvm, "Failed to update SF size. ret %d\n", ret);
 636		return ret;
 637	}
 638
 639	iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
 640
 641	/*
 642	 * configure and operate fw paging mechanism.
 643	 * driver configures the paging flow only once, CPU2 paging image
 644	 * included in the IWL_UCODE_INIT image.
 645	 */
 646	if (fw->paging_mem_size) {
 647		/*
 648		 * When dma is not enabled, the driver needs to copy / write
 649		 * the downloaded / uploaded page to / from the smem.
 650		 * This gets the location of the place were the pages are
 651		 * stored.
 652		 */
 653		if (!is_device_dma_capable(mvm->trans->dev)) {
 654			ret = iwl_trans_get_paging_item(mvm);
 655			if (ret) {
 656				IWL_ERR(mvm, "failed to get FW paging item\n");
 657				return ret;
 658			}
 659		}
 660
 661		ret = iwl_save_fw_paging(mvm, fw);
 662		if (ret) {
 663			IWL_ERR(mvm, "failed to save the FW paging image\n");
 664			return ret;
 665		}
 666
 667		ret = iwl_send_paging_cmd(mvm, fw);
 668		if (ret) {
 669			IWL_ERR(mvm, "failed to send the paging cmd\n");
 670			iwl_free_fw_paging(mvm);
 671			return ret;
 672		}
 673	}
 674
 675	/*
 676	 * Note: all the queues are enabled as part of the interface
 677	 * initialization, but in firmware restart scenarios they
 678	 * could be stopped, so wake them up. In firmware restart,
 679	 * mac80211 will have the queues stopped as well until the
 680	 * reconfiguration completes. During normal startup, they
 681	 * will be empty.
 682	 */
 683
 684	memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
 685	if (iwl_mvm_is_dqa_supported(mvm))
 686		mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
 687	else
 688		mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
 689
 690	for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
 691		atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
 692
 693	mvm->ucode_loaded = true;
 694
 695	return 0;
 696}
 697
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 698static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
 699{
 700	struct iwl_phy_cfg_cmd phy_cfg_cmd;
 701	enum iwl_ucode_type ucode_type = mvm->cur_ucode;
 702
 703	/* Set parameters */
 704	phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
 
 
 
 
 705	phy_cfg_cmd.calib_control.event_trigger =
 706		mvm->fw->default_calib[ucode_type].event_trigger;
 707	phy_cfg_cmd.calib_control.flow_trigger =
 708		mvm->fw->default_calib[ucode_type].flow_trigger;
 709
 710	IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
 711		       phy_cfg_cmd.phy_cfg);
 712
 713	return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
 714				    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
 715}
 716
 717int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
 718{
 719	struct iwl_notification_wait calib_wait;
 720	static const u16 init_complete[] = {
 721		INIT_COMPLETE_NOTIF,
 722		CALIB_RES_NOTIF_PHY_DB
 723	};
 724	int ret;
 725
 
 
 
 726	lockdep_assert_held(&mvm->mutex);
 727
 728	if (WARN_ON_ONCE(mvm->calibrating))
 729		return 0;
 730
 731	iwl_init_notification_wait(&mvm->notif_wait,
 732				   &calib_wait,
 733				   init_complete,
 734				   ARRAY_SIZE(init_complete),
 735				   iwl_wait_phy_db_entry,
 736				   mvm->phy_db);
 737
 738	/* Will also start the device */
 739	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
 740	if (ret) {
 741		IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
 742		goto error;
 743	}
 744
 745	ret = iwl_send_bt_init_conf(mvm);
 746	if (ret)
 747		goto error;
 
 
 748
 749	/* Read the NVM only at driver load time, no need to do this twice */
 750	if (read_nvm) {
 751		/* Read nvm */
 752		ret = iwl_nvm_init(mvm, true);
 753		if (ret) {
 754			IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
 755			goto error;
 756		}
 757	}
 758
 759	/* In case we read the NVM from external file, load it to the NIC */
 760	if (mvm->nvm_file_name)
 761		iwl_mvm_load_nvm_to_nic(mvm);
 762
 763	ret = iwl_nvm_check_version(mvm->nvm_data, mvm->trans);
 764	WARN_ON(ret);
 765
 766	/*
 767	 * abort after reading the nvm in case RF Kill is on, we will complete
 768	 * the init seq later when RF kill will switch to off
 769	 */
 770	if (iwl_mvm_is_radio_hw_killed(mvm)) {
 771		IWL_DEBUG_RF_KILL(mvm,
 772				  "jump over all phy activities due to RF kill\n");
 773		iwl_remove_notification(&mvm->notif_wait, &calib_wait);
 774		ret = 1;
 775		goto out;
 776	}
 777
 778	mvm->calibrating = true;
 779
 780	/* Send TX valid antennas before triggering calibrations */
 781	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
 782	if (ret)
 783		goto error;
 784
 785	/*
 786	 * Send phy configurations command to init uCode
 787	 * to start the 16.0 uCode init image internal calibrations.
 788	 */
 789	ret = iwl_send_phy_cfg_cmd(mvm);
 790	if (ret) {
 791		IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
 792			ret);
 793		goto error;
 794	}
 795
 796	/*
 797	 * Some things may run in the background now, but we
 798	 * just wait for the calibration complete notification.
 799	 */
 800	ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
 801			MVM_UCODE_CALIB_TIMEOUT);
 
 
 802
 803	if (ret && iwl_mvm_is_radio_hw_killed(mvm)) {
 804		IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
 805		ret = 1;
 
 
 
 806	}
 
 807	goto out;
 808
 809error:
 810	iwl_remove_notification(&mvm->notif_wait, &calib_wait);
 811out:
 812	mvm->calibrating = false;
 813	if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
 814		/* we want to debug INIT and we have no NVM - fake */
 815		mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
 816					sizeof(struct ieee80211_channel) +
 817					sizeof(struct ieee80211_rate),
 818					GFP_KERNEL);
 819		if (!mvm->nvm_data)
 820			return -ENOMEM;
 821		mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
 822		mvm->nvm_data->bands[0].n_channels = 1;
 823		mvm->nvm_data->bands[0].n_bitrates = 1;
 824		mvm->nvm_data->bands[0].bitrates =
 825			(void *)mvm->nvm_data->channels + 1;
 826		mvm->nvm_data->bands[0].bitrates->hw_value = 10;
 827	}
 828
 829	return ret;
 830}
 831
 832static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm,
 833					  struct iwl_rx_packet *pkt)
 834{
 835	struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data;
 836	int i;
 
 
 
 
 837
 838	mvm->shared_mem_cfg.num_txfifo_entries =
 839		ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size);
 840	for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
 841		mvm->shared_mem_cfg.txfifo_size[i] =
 842			le32_to_cpu(mem_cfg->txfifo_size[i]);
 843	for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
 844		mvm->shared_mem_cfg.rxfifo_size[i] =
 845			le32_to_cpu(mem_cfg->rxfifo_size[i]);
 846
 847	BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
 848		     sizeof(mem_cfg->internal_txfifo_size));
 849
 850	for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
 851	     i++)
 852		mvm->shared_mem_cfg.internal_txfifo_size[i] =
 853			le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
 854}
 855
 856static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm,
 857				     struct iwl_rx_packet *pkt)
 
 
 
 858{
 859	struct iwl_shared_mem_cfg_v1 *mem_cfg = (void *)pkt->data;
 860	int i;
 861
 862	mvm->shared_mem_cfg.num_txfifo_entries =
 863		ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size);
 864	for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
 865		mvm->shared_mem_cfg.txfifo_size[i] =
 866			le32_to_cpu(mem_cfg->txfifo_size[i]);
 867	for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
 868		mvm->shared_mem_cfg.rxfifo_size[i] =
 869			le32_to_cpu(mem_cfg->rxfifo_size[i]);
 870
 871	/* new API has more data, from rxfifo_addr field and on */
 872	if (fw_has_capa(&mvm->fw->ucode_capa,
 873			IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
 874		BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
 875			     sizeof(mem_cfg->internal_txfifo_size));
 876
 877		for (i = 0;
 878		     i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
 879		     i++)
 880			mvm->shared_mem_cfg.internal_txfifo_size[i] =
 881				le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
 882	}
 
 
 883}
 884
 885static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
 886{
 887	struct iwl_host_cmd cmd = {
 888		.flags = CMD_WANT_SKB,
 889		.data = { NULL, },
 890		.len = { 0, },
 891	};
 892	struct iwl_rx_packet *pkt;
 893
 894	lockdep_assert_held(&mvm->mutex);
 
 
 895
 896	if (fw_has_capa(&mvm->fw->ucode_capa,
 897			IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
 898		cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
 899	else
 900		cmd.id = SHARED_MEM_CFG;
 
 901
 902	if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
 903		return;
 
 
 904
 905	pkt = cmd.resp_pkt;
 906	if (iwl_mvm_has_new_tx_api(mvm))
 907		iwl_mvm_parse_shared_mem_a000(mvm, pkt);
 908	else
 909		iwl_mvm_parse_shared_mem(mvm, pkt);
 910
 911	IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
 
 912
 913	iwl_free_resp(&cmd);
 
 
 
 
 
 
 
 914}
 915
 916static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
 917{
 918	struct iwl_ltr_config_cmd cmd = {
 919		.flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
 920	};
 921
 922	if (!mvm->trans->ltr_enabled)
 923		return 0;
 
 924
 925	return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
 926				    sizeof(cmd), &cmd);
 927}
 
 
 
 928
 929#define ACPI_WRDS_METHOD	"WRDS"
 930#define ACPI_WRDS_WIFI		(0x07)
 931#define ACPI_WRDS_TABLE_SIZE	10
 
 
 932
 933struct iwl_mvm_sar_table {
 934	bool enabled;
 935	u8 values[ACPI_WRDS_TABLE_SIZE];
 936};
 937
 938#ifdef CONFIG_ACPI
 939static int iwl_mvm_sar_get_wrds(struct iwl_mvm *mvm, union acpi_object *wrds,
 940				struct iwl_mvm_sar_table *sar_table)
 941{
 942	union acpi_object *data_pkg;
 943	u32 i;
 944
 945	/* We need at least two packages, one for the revision and one
 946	 * for the data itself.  Also check that the revision is valid
 947	 * (i.e. it is an integer set to 0).
 948	*/
 949	if (wrds->type != ACPI_TYPE_PACKAGE ||
 950	    wrds->package.count < 2 ||
 951	    wrds->package.elements[0].type != ACPI_TYPE_INTEGER ||
 952	    wrds->package.elements[0].integer.value != 0) {
 953		IWL_DEBUG_RADIO(mvm, "Unsupported wrds structure\n");
 954		return -EINVAL;
 955	}
 956
 957	/* loop through all the packages to find the one for WiFi */
 958	for (i = 1; i < wrds->package.count; i++) {
 959		union acpi_object *domain;
 960
 961		data_pkg = &wrds->package.elements[i];
 962
 963		/* Skip anything that is not a package with the right
 964		 * amount of elements (i.e. domain_type,
 965		 * enabled/disabled plus the sar table size.
 966		 */
 967		if (data_pkg->type != ACPI_TYPE_PACKAGE ||
 968		    data_pkg->package.count != ACPI_WRDS_TABLE_SIZE + 2)
 969			continue;
 970
 971		domain = &data_pkg->package.elements[0];
 972		if (domain->type == ACPI_TYPE_INTEGER &&
 973		    domain->integer.value == ACPI_WRDS_WIFI)
 974			break;
 975
 976		data_pkg = NULL;
 
 977	}
 978
 979	if (!data_pkg)
 980		return -ENOENT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 981
 982	if (data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
 983		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 984
 985	sar_table->enabled = !!(data_pkg->package.elements[1].integer.value);
 
 986
 987	for (i = 0; i < ACPI_WRDS_TABLE_SIZE; i++) {
 988		union acpi_object *entry;
 
 989
 990		entry = &data_pkg->package.elements[i + 2];
 991		if ((entry->type != ACPI_TYPE_INTEGER) ||
 992		    (entry->integer.value > U8_MAX))
 993			return -EINVAL;
 994
 995		sar_table->values[i] = entry->integer.value;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 996	}
 997
 998	return 0;
 
 
 999}
1000
1001static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm,
1002				 struct iwl_mvm_sar_table *sar_table)
1003{
1004	acpi_handle root_handle;
1005	acpi_handle handle;
1006	struct acpi_buffer wrds = {ACPI_ALLOCATE_BUFFER, NULL};
1007	acpi_status status;
1008	int ret;
1009
1010	root_handle = ACPI_HANDLE(mvm->dev);
1011	if (!root_handle) {
1012		IWL_DEBUG_RADIO(mvm,
1013				"Could not retrieve root port ACPI handle\n");
1014		return -ENOENT;
 
 
 
 
 
 
 
 
 
1015	}
1016
1017	/* Get the method's handle */
1018	status = acpi_get_handle(root_handle, (acpi_string)ACPI_WRDS_METHOD,
1019				 &handle);
1020	if (ACPI_FAILURE(status)) {
1021		IWL_DEBUG_RADIO(mvm, "WRDS method not found\n");
1022		return -ENOENT;
1023	}
1024
1025	/* Call WRDS with no arguments */
1026	status = acpi_evaluate_object(handle, NULL, NULL, &wrds);
1027	if (ACPI_FAILURE(status)) {
1028		IWL_DEBUG_RADIO(mvm, "WRDS invocation failed (0x%x)\n", status);
1029		return -ENOENT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1030	}
1031
1032	ret = iwl_mvm_sar_get_wrds(mvm, wrds.pointer, sar_table);
1033	kfree(wrds.pointer);
 
 
1034
1035	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1036}
 
1037#else /* CONFIG_ACPI */
1038static int iwl_mvm_sar_get_table(struct iwl_mvm *mvm,
1039				 struct iwl_mvm_sar_table *sar_table)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1040{
1041	return -ENOENT;
1042}
1043#endif /* CONFIG_ACPI */
1044
1045static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
1046{
1047	struct iwl_mvm_sar_table sar_table;
1048	struct iwl_dev_tx_power_cmd cmd = {
1049		.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
1050	};
1051	int ret, i, j, idx;
1052	int len = sizeof(cmd);
1053
1054	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
1055		len = sizeof(cmd.v3);
1056
1057	ret = iwl_mvm_sar_get_table(mvm, &sar_table);
1058	if (ret < 0) {
1059		IWL_DEBUG_RADIO(mvm,
1060				"SAR BIOS table invalid or unavailable. (%d)\n",
1061				ret);
1062		/* we don't fail if the table is not available */
1063		return 0;
1064	}
1065
1066	if (!sar_table.enabled)
1067		return 0;
1068
1069	IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
 
 
1070
1071	BUILD_BUG_ON(IWL_NUM_CHAIN_LIMITS * IWL_NUM_SUB_BANDS !=
1072		     ACPI_WRDS_TABLE_SIZE);
1073
1074	for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
1075		IWL_DEBUG_RADIO(mvm, "  Chain[%d]:\n", i);
1076		for (j = 0; j < IWL_NUM_SUB_BANDS; j++) {
1077			idx = (i * IWL_NUM_SUB_BANDS) + j;
1078			cmd.v3.per_chain_restriction[i][j] =
1079				cpu_to_le16(sar_table.values[idx]);
1080			IWL_DEBUG_RADIO(mvm, "    Band[%d] = %d * .125dBm\n",
1081					j, sar_table.values[idx]);
1082		}
1083	}
1084
1085	ret = iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
1086	if (ret)
1087		IWL_ERR(mvm, "failed to set per-chain TX power: %d\n", ret);
1088
1089	return ret;
1090}
1091
1092int iwl_mvm_up(struct iwl_mvm *mvm)
1093{
1094	int ret, i;
1095	struct ieee80211_channel *chan;
1096	struct cfg80211_chan_def chandef;
1097
1098	lockdep_assert_held(&mvm->mutex);
1099
1100	ret = iwl_trans_start_hw(mvm->trans);
1101	if (ret)
1102		return ret;
1103
1104	/*
1105	 * If we haven't completed the run of the init ucode during
1106	 * module loading, load init ucode now
1107	 * (for example, if we were in RFKILL)
1108	 */
1109	ret = iwl_run_init_mvm_ucode(mvm, false);
1110
1111	if (iwlmvm_mod_params.init_dbg)
1112		return 0;
1113
1114	if (ret) {
1115		IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
1116		/* this can't happen */
1117		if (WARN_ON(ret > 0))
1118			ret = -ERFKILL;
1119		goto error;
1120	}
1121
1122	/*
1123	 * Stop and start the transport without entering low power
1124	 * mode. This will save the state of other components on the
1125	 * device that are triggered by the INIT firwmare (MFUART).
1126	 */
1127	_iwl_trans_stop_device(mvm->trans, false);
1128	ret = _iwl_trans_start_hw(mvm->trans, false);
1129	if (ret)
1130		goto error;
1131
1132	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1133	if (ret) {
1134		IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
1135		goto error;
1136	}
1137
1138	iwl_mvm_get_shared_mem_conf(mvm);
1139
1140	ret = iwl_mvm_sf_update(mvm, NULL, false);
1141	if (ret)
1142		IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
1143
1144	mvm->fw_dbg_conf = FW_DBG_INVALID;
1145	/* if we have a destination, assume EARLY START */
1146	if (mvm->fw->dbg_dest_tlv)
1147		mvm->fw_dbg_conf = FW_DBG_START_FROM_ALIVE;
1148	iwl_mvm_start_fw_dbg_conf(mvm, FW_DBG_START_FROM_ALIVE);
1149
1150	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
1151	if (ret)
1152		goto error;
1153
1154	ret = iwl_send_bt_init_conf(mvm);
1155	if (ret)
1156		goto error;
 
 
1157
1158	/* Send phy db control command and then phy db calibration*/
1159	ret = iwl_send_phy_db_data(mvm->phy_db);
1160	if (ret)
1161		goto error;
1162
1163	ret = iwl_send_phy_cfg_cmd(mvm);
1164	if (ret)
1165		goto error;
1166
1167	/* Init RSS configuration */
1168	if (iwl_mvm_has_new_rx_api(mvm)) {
 
 
1169		ret = iwl_send_rss_cfg_cmd(mvm);
1170		if (ret) {
1171			IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
1172				ret);
1173			goto error;
1174		}
1175	}
1176
1177	/* init the fw <-> mac80211 STA mapping */
1178	for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
1179		RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
1180
1181	mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT;
1182
1183	/* reset quota debouncing buffer - 0xff will yield invalid data */
1184	memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
1185
1186	/* Enable DQA-mode if required */
1187	if (iwl_mvm_is_dqa_supported(mvm)) {
1188		ret = iwl_mvm_send_dqa_cmd(mvm);
1189		if (ret)
1190			goto error;
1191	} else {
1192		IWL_DEBUG_FW(mvm, "Working in non-DQA mode\n");
1193	}
1194
1195	/* Add auxiliary station for scanning */
1196	ret = iwl_mvm_add_aux_sta(mvm);
1197	if (ret)
1198		goto error;
1199
1200	/* Add all the PHY contexts */
1201	chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0];
1202	cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
1203	for (i = 0; i < NUM_PHY_CTX; i++) {
1204		/*
1205		 * The channel used here isn't relevant as it's
1206		 * going to be overwritten in the other flows.
1207		 * For now use the first channel we have.
1208		 */
1209		ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i],
1210					   &chandef, 1, 1);
1211		if (ret)
1212			goto error;
1213	}
1214
1215#ifdef CONFIG_THERMAL
1216	if (iwl_mvm_is_tt_in_fw(mvm)) {
1217		/* in order to give the responsibility of ct-kill and
1218		 * TX backoff to FW we need to send empty temperature reporting
1219		 * cmd during init time
1220		 */
1221		iwl_mvm_send_temp_report_ths_cmd(mvm);
1222	} else {
1223		/* Initialize tx backoffs to the minimal possible */
1224		iwl_mvm_tt_tx_backoff(mvm, 0);
1225	}
1226
1227	/* TODO: read the budget from BIOS / Platform NVM */
1228	if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0) {
 
 
 
 
 
1229		ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
1230					   mvm->cooling_dev.cur_state);
1231		if (ret)
1232			goto error;
1233	}
1234#else
1235	/* Initialize tx backoffs to the minimal possible */
1236	iwl_mvm_tt_tx_backoff(mvm, 0);
1237#endif
1238
1239	WARN_ON(iwl_mvm_config_ltr(mvm));
1240
1241	ret = iwl_mvm_power_update_device(mvm);
1242	if (ret)
1243		goto error;
1244
1245	/*
1246	 * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
1247	 * anyway, so don't init MCC.
1248	 */
1249	if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) {
1250		ret = iwl_mvm_init_mcc(mvm);
1251		if (ret)
1252			goto error;
1253	}
1254
1255	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
1256		mvm->scan_type = IWL_SCAN_TYPE_NOT_SET;
1257		ret = iwl_mvm_config_scan(mvm);
1258		if (ret)
1259			goto error;
1260	}
1261
1262	if (iwl_mvm_is_csum_supported(mvm) &&
1263	    mvm->cfg->features & NETIF_F_RXCSUM)
1264		iwl_trans_write_prph(mvm->trans, RX_EN_CSUM, 0x3);
1265
1266	/* allow FW/transport low power modes if not during restart */
1267	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1268		iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
1269
1270	ret = iwl_mvm_sar_init(mvm);
1271	if (ret)
1272		goto error;
1273
 
 
 
 
 
 
1274	IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
1275	return 0;
1276 error:
1277	iwl_mvm_stop_device(mvm);
 
1278	return ret;
1279}
1280
1281int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
1282{
1283	int ret, i;
1284
1285	lockdep_assert_held(&mvm->mutex);
1286
1287	ret = iwl_trans_start_hw(mvm->trans);
1288	if (ret)
1289		return ret;
1290
1291	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN);
1292	if (ret) {
1293		IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret);
1294		goto error;
1295	}
1296
1297	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
1298	if (ret)
1299		goto error;
1300
1301	/* Send phy db control command and then phy db calibration*/
1302	ret = iwl_send_phy_db_data(mvm->phy_db);
1303	if (ret)
1304		goto error;
1305
1306	ret = iwl_send_phy_cfg_cmd(mvm);
1307	if (ret)
1308		goto error;
1309
1310	/* init the fw <-> mac80211 STA mapping */
1311	for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
1312		RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
1313
1314	/* Add auxiliary station for scanning */
1315	ret = iwl_mvm_add_aux_sta(mvm);
1316	if (ret)
1317		goto error;
1318
1319	return 0;
1320 error:
1321	iwl_mvm_stop_device(mvm);
1322	return ret;
1323}
1324
1325void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
1326				 struct iwl_rx_cmd_buffer *rxb)
1327{
1328	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1329	struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
1330	u32 flags = le32_to_cpu(card_state_notif->flags);
1331
1332	IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n",
1333			  (flags & HW_CARD_DISABLED) ? "Kill" : "On",
1334			  (flags & SW_CARD_DISABLED) ? "Kill" : "On",
1335			  (flags & CT_KILL_CARD_DISABLED) ?
1336			  "Reached" : "Not reached");
1337}
1338
1339void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
1340			     struct iwl_rx_cmd_buffer *rxb)
1341{
1342	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1343	struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
1344
1345	IWL_DEBUG_INFO(mvm,
1346		       "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
1347		       le32_to_cpu(mfuart_notif->installed_ver),
1348		       le32_to_cpu(mfuart_notif->external_ver),
1349		       le32_to_cpu(mfuart_notif->status),
1350		       le32_to_cpu(mfuart_notif->duration));
 
 
 
 
 
1351}