Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
   1/******************************************************************************
   2 *
   3 * This file is provided under a dual BSD/GPLv2 license.  When using or
   4 * redistributing this file, you may do so under either license.
   5 *
   6 * GPL LICENSE SUMMARY
   7 *
   8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
   9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  11 * Copyright(c) 2018 - 2019 Intel Corporation
  12 *
  13 * This program is free software; you can redistribute it and/or modify
  14 * it under the terms of version 2 of the GNU General Public License as
  15 * published by the Free Software Foundation.
  16 *
  17 * This program is distributed in the hope that it will be useful, but
  18 * WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  20 * General Public License for more details.
  21 *
  22 * The full GNU General Public License is included in this distribution
  23 * in the file called COPYING.
  24 *
  25 * Contact Information:
  26 *  Intel Linux Wireless <linuxwifi@intel.com>
  27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  28 *
  29 * BSD LICENSE
  30 *
  31 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
  34 * Copyright(c) 2018 - 2019 Intel Corporation
  35 * All rights reserved.
  36 *
  37 * Redistribution and use in source and binary forms, with or without
  38 * modification, are permitted provided that the following conditions
  39 * are met:
  40 *
  41 *  * Redistributions of source code must retain the above copyright
  42 *    notice, this list of conditions and the following disclaimer.
  43 *  * Redistributions in binary form must reproduce the above copyright
  44 *    notice, this list of conditions and the following disclaimer in
  45 *    the documentation and/or other materials provided with the
  46 *    distribution.
  47 *  * Neither the name Intel Corporation nor the names of its
  48 *    contributors may be used to endorse or promote products derived
  49 *    from this software without specific prior written permission.
  50 *
  51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  62 *
  63 *****************************************************************************/
  64#ifndef __iwl_trans_h__
  65#define __iwl_trans_h__
  66
  67#include <linux/ieee80211.h>
  68#include <linux/mm.h> /* for page_address */
  69#include <linux/lockdep.h>
  70#include <linux/kernel.h>
  71
  72#include "iwl-debug.h"
  73#include "iwl-config.h"
  74#include "fw/img.h"
  75#include "iwl-op-mode.h"
  76#include "fw/api/cmdhdr.h"
  77#include "fw/api/txq.h"
  78#include "fw/api/dbg-tlv.h"
  79#include "iwl-dbg-tlv.h"
  80
  81/**
  82 * DOC: Transport layer - what is it ?
  83 *
  84 * The transport layer is the layer that deals with the HW directly. It provides
  85 * an abstraction of the underlying HW to the upper layer. The transport layer
  86 * doesn't provide any policy, algorithm or anything of this kind, but only
  87 * mechanisms to make the HW do something. It is not completely stateless but
  88 * close to it.
  89 * We will have an implementation for each different supported bus.
  90 */
  91
  92/**
  93 * DOC: Life cycle of the transport layer
  94 *
  95 * The transport layer has a very precise life cycle.
  96 *
  97 *	1) A helper function is called during the module initialization and
  98 *	   registers the bus driver's ops with the transport's alloc function.
  99 *	2) Bus's probe calls to the transport layer's allocation functions.
 100 *	   Of course this function is bus specific.
 101 *	3) This allocation functions will spawn the upper layer which will
 102 *	   register mac80211.
 103 *
 104 *	4) At some point (i.e. mac80211's start call), the op_mode will call
 105 *	   the following sequence:
 106 *	   start_hw
 107 *	   start_fw
 108 *
 109 *	5) Then when finished (or reset):
 110 *	   stop_device
 111 *
 112 *	6) Eventually, the free function will be called.
 113 */
 114
 115#define IWL_TRANS_FW_DBG_DOMAIN(trans)	IWL_FW_INI_DOMAIN_ALWAYS_ON
 116
 117#define FH_RSCSR_FRAME_SIZE_MSK		0x00003FFF	/* bits 0-13 */
 118#define FH_RSCSR_FRAME_INVALID		0x55550000
 119#define FH_RSCSR_FRAME_ALIGN		0x40
 120#define FH_RSCSR_RPA_EN			BIT(25)
 121#define FH_RSCSR_RADA_EN		BIT(26)
 122#define FH_RSCSR_RXQ_POS		16
 123#define FH_RSCSR_RXQ_MASK		0x3F0000
 124
 125struct iwl_rx_packet {
 126	/*
 127	 * The first 4 bytes of the RX frame header contain both the RX frame
 128	 * size and some flags.
 129	 * Bit fields:
 130	 * 31:    flag flush RB request
 131	 * 30:    flag ignore TC (terminal counter) request
 132	 * 29:    flag fast IRQ request
 133	 * 28-27: Reserved
 134	 * 26:    RADA enabled
 135	 * 25:    Offload enabled
 136	 * 24:    RPF enabled
 137	 * 23:    RSS enabled
 138	 * 22:    Checksum enabled
 139	 * 21-16: RX queue
 140	 * 15-14: Reserved
 141	 * 13-00: RX frame size
 142	 */
 143	__le32 len_n_flags;
 144	struct iwl_cmd_header hdr;
 145	u8 data[];
 146} __packed;
 147
 148static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
 149{
 150	return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
 151}
 152
 153static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
 154{
 155	return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
 156}
 157
 158/**
 159 * enum CMD_MODE - how to send the host commands ?
 160 *
 161 * @CMD_ASYNC: Return right away and don't wait for the response
 162 * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
 163 *	the response. The caller needs to call iwl_free_resp when done.
 164 * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be
 165 *	called after this command completes. Valid only with CMD_ASYNC.
 166 */
 167enum CMD_MODE {
 168	CMD_ASYNC		= BIT(0),
 169	CMD_WANT_SKB		= BIT(1),
 170	CMD_SEND_IN_RFKILL	= BIT(2),
 171	CMD_WANT_ASYNC_CALLBACK	= BIT(3),
 172};
 173
 174#define DEF_CMD_PAYLOAD_SIZE 320
 175
 176/**
 177 * struct iwl_device_cmd
 178 *
 179 * For allocation of the command and tx queues, this establishes the overall
 180 * size of the largest command we send to uCode, except for commands that
 181 * aren't fully copied and use other TFD space.
 182 */
 183struct iwl_device_cmd {
 184	union {
 185		struct {
 186			struct iwl_cmd_header hdr;	/* uCode API */
 187			u8 payload[DEF_CMD_PAYLOAD_SIZE];
 188		};
 189		struct {
 190			struct iwl_cmd_header_wide hdr_wide;
 191			u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
 192					sizeof(struct iwl_cmd_header_wide) +
 193					sizeof(struct iwl_cmd_header)];
 194		};
 195	};
 196} __packed;
 197
 198/**
 199 * struct iwl_device_tx_cmd - buffer for TX command
 200 * @hdr: the header
 201 * @payload: the payload placeholder
 202 *
 203 * The actual structure is sized dynamically according to need.
 204 */
 205struct iwl_device_tx_cmd {
 206	struct iwl_cmd_header hdr;
 207	u8 payload[];
 208} __packed;
 209
 210#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
 211
 212/*
 213 * number of transfer buffers (fragments) per transmit frame descriptor;
 214 * this is just the driver's idea, the hardware supports 20
 215 */
 216#define IWL_MAX_CMD_TBS_PER_TFD	2
 217
 218/**
 219 * enum iwl_hcmd_dataflag - flag for each one of the chunks of the command
 220 *
 221 * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
 222 *	ring. The transport layer doesn't map the command's buffer to DMA, but
 223 *	rather copies it to a previously allocated DMA buffer. This flag tells
 224 *	the transport layer not to copy the command, but to map the existing
 225 *	buffer (that is passed in) instead. This saves the memcpy and allows
 226 *	commands that are bigger than the fixed buffer to be submitted.
 227 *	Note that a TFD entry after a NOCOPY one cannot be a normal copied one.
 228 * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
 229 *	chunk internally and free it again after the command completes. This
 230 *	can (currently) be used only once per command.
 231 *	Note that a TFD entry after a DUP one cannot be a normal copied one.
 232 */
 233enum iwl_hcmd_dataflag {
 234	IWL_HCMD_DFL_NOCOPY	= BIT(0),
 235	IWL_HCMD_DFL_DUP	= BIT(1),
 236};
 237
 238enum iwl_error_event_table_status {
 239	IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),
 240	IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),
 241	IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),
 242};
 243
 244/**
 245 * struct iwl_host_cmd - Host command to the uCode
 246 *
 247 * @data: array of chunks that composes the data of the host command
 248 * @resp_pkt: response packet, if %CMD_WANT_SKB was set
 249 * @_rx_page_order: (internally used to free response packet)
 250 * @_rx_page_addr: (internally used to free response packet)
 251 * @flags: can be CMD_*
 252 * @len: array of the lengths of the chunks in data
 253 * @dataflags: IWL_HCMD_DFL_*
 254 * @id: command id of the host command, for wide commands encoding the
 255 *	version and group as well
 256 */
 257struct iwl_host_cmd {
 258	const void *data[IWL_MAX_CMD_TBS_PER_TFD];
 259	struct iwl_rx_packet *resp_pkt;
 260	unsigned long _rx_page_addr;
 261	u32 _rx_page_order;
 262
 263	u32 flags;
 264	u32 id;
 265	u16 len[IWL_MAX_CMD_TBS_PER_TFD];
 266	u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
 267};
 268
 269static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
 270{
 271	free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
 272}
 273
 274struct iwl_rx_cmd_buffer {
 275	struct page *_page;
 276	int _offset;
 277	bool _page_stolen;
 278	u32 _rx_page_order;
 279	unsigned int truesize;
 280};
 281
 282static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
 283{
 284	return (void *)((unsigned long)page_address(r->_page) + r->_offset);
 285}
 286
 287static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
 288{
 289	return r->_offset;
 290}
 291
 292static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
 293{
 294	r->_page_stolen = true;
 295	get_page(r->_page);
 296	return r->_page;
 297}
 298
 299static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
 300{
 301	__free_pages(r->_page, r->_rx_page_order);
 302}
 303
 304#define MAX_NO_RECLAIM_CMDS	6
 305
 306#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
 307
 308/*
 309 * Maximum number of HW queues the transport layer
 310 * currently supports
 311 */
 312#define IWL_MAX_HW_QUEUES		32
 313#define IWL_MAX_TVQM_QUEUES		512
 314
 315#define IWL_MAX_TID_COUNT	8
 316#define IWL_MGMT_TID		15
 317#define IWL_FRAME_LIMIT	64
 318#define IWL_MAX_RX_HW_QUEUES	16
 319
 320/**
 321 * enum iwl_wowlan_status - WoWLAN image/device status
 322 * @IWL_D3_STATUS_ALIVE: firmware is still running after resume
 323 * @IWL_D3_STATUS_RESET: device was reset while suspended
 324 */
 325enum iwl_d3_status {
 326	IWL_D3_STATUS_ALIVE,
 327	IWL_D3_STATUS_RESET,
 328};
 329
 330/**
 331 * enum iwl_trans_status: transport status flags
 332 * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed
 333 * @STATUS_DEVICE_ENABLED: APM is enabled
 334 * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
 335 * @STATUS_INT_ENABLED: interrupts are enabled
 336 * @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch
 337 * @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode
 338 * @STATUS_FW_ERROR: the fw is in error state
 339 * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
 340 *	are sent
 341 * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
 342 * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
 343 */
 344enum iwl_trans_status {
 345	STATUS_SYNC_HCMD_ACTIVE,
 346	STATUS_DEVICE_ENABLED,
 347	STATUS_TPOWER_PMI,
 348	STATUS_INT_ENABLED,
 349	STATUS_RFKILL_HW,
 350	STATUS_RFKILL_OPMODE,
 351	STATUS_FW_ERROR,
 352	STATUS_TRANS_GOING_IDLE,
 353	STATUS_TRANS_IDLE,
 354	STATUS_TRANS_DEAD,
 355};
 356
 357static inline int
 358iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
 359{
 360	switch (rb_size) {
 361	case IWL_AMSDU_2K:
 362		return get_order(2 * 1024);
 363	case IWL_AMSDU_4K:
 364		return get_order(4 * 1024);
 365	case IWL_AMSDU_8K:
 366		return get_order(8 * 1024);
 367	case IWL_AMSDU_12K:
 368		return get_order(12 * 1024);
 369	default:
 370		WARN_ON(1);
 371		return -1;
 372	}
 373}
 374
 375static inline int
 376iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size)
 377{
 378	switch (rb_size) {
 379	case IWL_AMSDU_2K:
 380		return 2 * 1024;
 381	case IWL_AMSDU_4K:
 382		return 4 * 1024;
 383	case IWL_AMSDU_8K:
 384		return 8 * 1024;
 385	case IWL_AMSDU_12K:
 386		return 12 * 1024;
 387	default:
 388		WARN_ON(1);
 389		return 0;
 390	}
 391}
 392
 393struct iwl_hcmd_names {
 394	u8 cmd_id;
 395	const char *const cmd_name;
 396};
 397
 398#define HCMD_NAME(x)	\
 399	{ .cmd_id = x, .cmd_name = #x }
 400
 401struct iwl_hcmd_arr {
 402	const struct iwl_hcmd_names *arr;
 403	int size;
 404};
 405
 406#define HCMD_ARR(x)	\
 407	{ .arr = x, .size = ARRAY_SIZE(x) }
 408
 409/**
 410 * struct iwl_trans_config - transport configuration
 411 *
 412 * @op_mode: pointer to the upper layer.
 413 * @cmd_queue: the index of the command queue.
 414 *	Must be set before start_fw.
 415 * @cmd_fifo: the fifo for host commands
 416 * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue.
 417 * @no_reclaim_cmds: Some devices erroneously don't set the
 418 *	SEQ_RX_FRAME bit on some notifications, this is the
 419 *	list of such notifications to filter. Max length is
 420 *	%MAX_NO_RECLAIM_CMDS.
 421 * @n_no_reclaim_cmds: # of commands in list
 422 * @rx_buf_size: RX buffer size needed for A-MSDUs
 423 *	if unset 4k will be the RX buffer size
 424 * @bc_table_dword: set to true if the BC table expects the byte count to be
 425 *	in DWORD (as opposed to bytes)
 426 * @scd_set_active: should the transport configure the SCD for HCMD queue
 427 * @sw_csum_tx: transport should compute the TCP checksum
 428 * @command_groups: array of command groups, each member is an array of the
 429 *	commands in the group; for debugging only
 430 * @command_groups_size: number of command groups, to avoid illegal access
 431 * @cb_data_offs: offset inside skb->cb to store transport data at, must have
 432 *	space for at least two pointers
 433 */
 434struct iwl_trans_config {
 435	struct iwl_op_mode *op_mode;
 436
 437	u8 cmd_queue;
 438	u8 cmd_fifo;
 439	unsigned int cmd_q_wdg_timeout;
 440	const u8 *no_reclaim_cmds;
 441	unsigned int n_no_reclaim_cmds;
 442
 443	enum iwl_amsdu_size rx_buf_size;
 444	bool bc_table_dword;
 445	bool scd_set_active;
 446	bool sw_csum_tx;
 447	const struct iwl_hcmd_arr *command_groups;
 448	int command_groups_size;
 449
 450	u8 cb_data_offs;
 451};
 452
 453struct iwl_trans_dump_data {
 454	u32 len;
 455	u8 data[];
 456};
 457
 458struct iwl_trans;
 459
 460struct iwl_trans_txq_scd_cfg {
 461	u8 fifo;
 462	u8 sta_id;
 463	u8 tid;
 464	bool aggregate;
 465	int frame_limit;
 466};
 467
 468/**
 469 * struct iwl_trans_rxq_dma_data - RX queue DMA data
 470 * @fr_bd_cb: DMA address of free BD cyclic buffer
 471 * @fr_bd_wid: Initial write index of the free BD cyclic buffer
 472 * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
 473 * @ur_bd_cb: DMA address of used BD cyclic buffer
 474 */
 475struct iwl_trans_rxq_dma_data {
 476	u64 fr_bd_cb;
 477	u32 fr_bd_wid;
 478	u64 urbd_stts_wrptr;
 479	u64 ur_bd_cb;
 480};
 481
 482/**
 483 * struct iwl_trans_ops - transport specific operations
 484 *
 485 * All the handlers MUST be implemented
 486 *
 487 * @start_hw: starts the HW. From that point on, the HW can send interrupts.
 488 *	May sleep.
 489 * @op_mode_leave: Turn off the HW RF kill indication if on
 490 *	May sleep
 491 * @start_fw: allocates and inits all the resources for the transport
 492 *	layer. Also kick a fw image.
 493 *	May sleep
 494 * @fw_alive: called when the fw sends alive notification. If the fw provides
 495 *	the SCD base address in SRAM, then provide it here, or 0 otherwise.
 496 *	May sleep
 497 * @stop_device: stops the whole device (embedded CPU put to reset) and stops
 498 *	the HW. From that point on, the HW will be stopped but will still issue
 499 *	an interrupt if the HW RF kill switch is triggered.
 500 *	This callback must do the right thing and not crash even if %start_hw()
 501 *	was called but not &start_fw(). May sleep.
 502 * @d3_suspend: put the device into the correct mode for WoWLAN during
 503 *	suspend. This is optional, if not implemented WoWLAN will not be
 504 *	supported. This callback may sleep.
 505 * @d3_resume: resume the device after WoWLAN, enabling the opmode to
 506 *	talk to the WoWLAN image to get its status. This is optional, if not
 507 *	implemented WoWLAN will not be supported. This callback may sleep.
 508 * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
 509 *	If RFkill is asserted in the middle of a SYNC host command, it must
 510 *	return -ERFKILL straight away.
 511 *	May sleep only if CMD_ASYNC is not set
 512 * @tx: send an skb. The transport relies on the op_mode to zero the
 513 *	the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all
 514 *	the CSUM will be taken care of (TCP CSUM and IP header in case of
 515 *	IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP
 516 *	header if it is IPv4.
 517 *	Must be atomic
 518 * @reclaim: free packet until ssn. Returns a list of freed packets.
 519 *	Must be atomic
 520 * @txq_enable: setup a queue. To setup an AC queue, use the
 521 *	iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
 522 *	this one. The op_mode must not configure the HCMD queue. The scheduler
 523 *	configuration may be %NULL, in which case the hardware will not be
 524 *	configured. If true is returned, the operation mode needs to increment
 525 *	the sequence number of the packets routed to this queue because of a
 526 *	hardware scheduler bug. May sleep.
 527 * @txq_disable: de-configure a Tx queue to send AMPDUs
 528 *	Must be atomic
 529 * @txq_set_shared_mode: change Tx queue shared/unshared marking
 530 * @wait_tx_queues_empty: wait until tx queues are empty. May sleep.
 531 * @wait_txq_empty: wait until specific tx queue is empty. May sleep.
 532 * @freeze_txq_timer: prevents the timer of the queue from firing until the
 533 *	queue is set to awake. Must be atomic.
 534 * @block_txq_ptrs: stop updating the write pointers of the Tx queues. Note
 535 *	that the transport needs to refcount the calls since this function
 536 *	will be called several times with block = true, and then the queues
 537 *	need to be unblocked only after the same number of calls with
 538 *	block = false.
 539 * @write8: write a u8 to a register at offset ofs from the BAR
 540 * @write32: write a u32 to a register at offset ofs from the BAR
 541 * @read32: read a u32 register at offset ofs from the BAR
 542 * @read_prph: read a DWORD from a periphery register
 543 * @write_prph: write a DWORD to a periphery register
 544 * @read_mem: read device's SRAM in DWORD
 545 * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory
 546 *	will be zeroed.
 547 * @read_config32: read a u32 value from the device's config space at
 548 *	the given offset.
 549 * @configure: configure parameters required by the transport layer from
 550 *	the op_mode. May be called several times before start_fw, can't be
 551 *	called after that.
 552 * @set_pmi: set the power pmi state
 553 * @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
 554 *	Sleeping is not allowed between grab_nic_access and
 555 *	release_nic_access.
 556 * @release_nic_access: let the NIC go to sleep. The "flags" parameter
 557 *	must be the same one that was sent before to the grab_nic_access.
 558 * @set_bits_mask - set SRAM register according to value and mask.
 559 * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
 560 *	TX'ed commands and similar. The buffer will be vfree'd by the caller.
 561 *	Note that the transport must fill in the proper file headers.
 562 * @debugfs_cleanup: used in the driver unload flow to make a proper cleanup
 563 *	of the trans debugfs
 564 */
 565struct iwl_trans_ops {
 566
 567	int (*start_hw)(struct iwl_trans *iwl_trans);
 568	void (*op_mode_leave)(struct iwl_trans *iwl_trans);
 569	int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
 570			bool run_in_rfkill);
 571	void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
 572	void (*stop_device)(struct iwl_trans *trans);
 573
 574	int (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
 575	int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
 576			 bool test, bool reset);
 577
 578	int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
 579
 580	int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
 581		  struct iwl_device_tx_cmd *dev_cmd, int queue);
 582	void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
 583			struct sk_buff_head *skbs);
 584
 585	void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr);
 586
 587	bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
 588			   const struct iwl_trans_txq_scd_cfg *cfg,
 589			   unsigned int queue_wdg_timeout);
 590	void (*txq_disable)(struct iwl_trans *trans, int queue,
 591			    bool configure_scd);
 592	/* 22000 functions */
 593	int (*txq_alloc)(struct iwl_trans *trans,
 594			 __le16 flags, u8 sta_id, u8 tid,
 595			 int cmd_id, int size,
 596			 unsigned int queue_wdg_timeout);
 597	void (*txq_free)(struct iwl_trans *trans, int queue);
 598	int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
 599			    struct iwl_trans_rxq_dma_data *data);
 600
 601	void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
 602				    bool shared);
 603
 604	int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
 605	int (*wait_txq_empty)(struct iwl_trans *trans, int queue);
 606	void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
 607				 bool freeze);
 608	void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
 609
 610	void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
 611	void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
 612	u32 (*read32)(struct iwl_trans *trans, u32 ofs);
 613	u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
 614	void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
 615	int (*read_mem)(struct iwl_trans *trans, u32 addr,
 616			void *buf, int dwords);
 617	int (*write_mem)(struct iwl_trans *trans, u32 addr,
 618			 const void *buf, int dwords);
 619	int (*read_config32)(struct iwl_trans *trans, u32 ofs, u32 *val);
 620	void (*configure)(struct iwl_trans *trans,
 621			  const struct iwl_trans_config *trans_cfg);
 622	void (*set_pmi)(struct iwl_trans *trans, bool state);
 623	void (*sw_reset)(struct iwl_trans *trans);
 624	bool (*grab_nic_access)(struct iwl_trans *trans, unsigned long *flags);
 625	void (*release_nic_access)(struct iwl_trans *trans,
 626				   unsigned long *flags);
 627	void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
 628			      u32 value);
 629	int  (*suspend)(struct iwl_trans *trans);
 630	void (*resume)(struct iwl_trans *trans);
 631
 632	struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
 633						 u32 dump_mask);
 634	void (*debugfs_cleanup)(struct iwl_trans *trans);
 635	void (*sync_nmi)(struct iwl_trans *trans);
 636};
 637
 638/**
 639 * enum iwl_trans_state - state of the transport layer
 640 *
 641 * @IWL_TRANS_NO_FW: no fw has sent an alive response
 642 * @IWL_TRANS_FW_ALIVE: a fw has sent an alive response
 643 */
 644enum iwl_trans_state {
 645	IWL_TRANS_NO_FW = 0,
 646	IWL_TRANS_FW_ALIVE	= 1,
 647};
 648
 649/**
 650 * DOC: Platform power management
 651 *
 652 * In system-wide power management the entire platform goes into a low
 653 * power state (e.g. idle or suspend to RAM) at the same time and the
 654 * device is configured as a wakeup source for the entire platform.
 655 * This is usually triggered by userspace activity (e.g. the user
 656 * presses the suspend button or a power management daemon decides to
 657 * put the platform in low power mode).  The device's behavior in this
 658 * mode is dictated by the wake-on-WLAN configuration.
 659 *
 660 * The terms used for the device's behavior are as follows:
 661 *
 662 *	- D0: the device is fully powered and the host is awake;
 663 *	- D3: the device is in low power mode and only reacts to
 664 *		specific events (e.g. magic-packet received or scan
 665 *		results found);
 666 *
 667 * These terms reflect the power modes in the firmware and are not to
 668 * be confused with the physical device power state.
 669 */
 670
 671/**
 672 * enum iwl_plat_pm_mode - platform power management mode
 673 *
 674 * This enumeration describes the device's platform power management
 675 * behavior when in system-wide suspend (i.e WoWLAN).
 676 *
 677 * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this
 678 *	device.  In system-wide suspend mode, it means that the all
 679 *	connections will be closed automatically by mac80211 before
 680 *	the platform is suspended.
 681 * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN).
 682 */
 683enum iwl_plat_pm_mode {
 684	IWL_PLAT_PM_MODE_DISABLED,
 685	IWL_PLAT_PM_MODE_D3,
 686};
 687
 688/**
 689 * enum iwl_ini_cfg_state
 690 * @IWL_INI_CFG_STATE_NOT_LOADED: no debug cfg was given
 691 * @IWL_INI_CFG_STATE_LOADED: debug cfg was found and loaded
 692 * @IWL_INI_CFG_STATE_CORRUPTED: debug cfg was found and some of the TLVs
 693 *	are corrupted. The rest of the debug TLVs will still be used
 694 */
 695enum iwl_ini_cfg_state {
 696	IWL_INI_CFG_STATE_NOT_LOADED,
 697	IWL_INI_CFG_STATE_LOADED,
 698	IWL_INI_CFG_STATE_CORRUPTED,
 699};
 700
 701/* Max time to wait for nmi interrupt */
 702#define IWL_TRANS_NMI_TIMEOUT (HZ / 4)
 703
 704/**
 705 * struct iwl_dram_data
 706 * @physical: page phy pointer
 707 * @block: pointer to the allocated block/page
 708 * @size: size of the block/page
 709 */
 710struct iwl_dram_data {
 711	dma_addr_t physical;
 712	void *block;
 713	int size;
 714};
 715
 716/**
 717 * struct iwl_fw_mon - fw monitor per allocation id
 718 * @num_frags: number of fragments
 719 * @frags: an array of DRAM buffer fragments
 720 */
 721struct iwl_fw_mon {
 722	u32 num_frags;
 723	struct iwl_dram_data *frags;
 724};
 725
 726/**
 727 * struct iwl_self_init_dram - dram data used by self init process
 728 * @fw: lmac and umac dram data
 729 * @fw_cnt: total number of items in array
 730 * @paging: paging dram data
 731 * @paging_cnt: total number of items in array
 732 */
 733struct iwl_self_init_dram {
 734	struct iwl_dram_data *fw;
 735	int fw_cnt;
 736	struct iwl_dram_data *paging;
 737	int paging_cnt;
 738};
 739
 740/**
 741 * struct iwl_trans_debug - transport debug related data
 742 *
 743 * @n_dest_reg: num of reg_ops in %dbg_dest_tlv
 744 * @rec_on: true iff there is a fw debug recording currently active
 745 * @dest_tlv: points to the destination TLV for debug
 746 * @conf_tlv: array of pointers to configuration TLVs for debug
 747 * @trigger_tlv: array of pointers to triggers TLVs for debug
 748 * @lmac_error_event_table: addrs of lmacs error tables
 749 * @umac_error_event_table: addr of umac error table
 750 * @error_event_table_tlv_status: bitmap that indicates what error table
 751 *	pointers was recevied via TLV. uses enum &iwl_error_event_table_status
 752 * @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state
 753 * @external_ini_cfg: external debug cfg state. Uses &enum iwl_ini_cfg_state
 754 * @fw_mon_cfg: debug buffer allocation configuration
 755 * @fw_mon_ini: DRAM buffer fragments per allocation id
 756 * @fw_mon: DRAM buffer for firmware monitor
 757 * @hw_error: equals true if hw error interrupt was received from the FW
 758 * @ini_dest: debug monitor destination uses &enum iwl_fw_ini_buffer_location
 759 * @active_regions: active regions
 760 * @debug_info_tlv_list: list of debug info TLVs
 761 * @time_point: array of debug time points
 762 * @periodic_trig_list: periodic triggers list
 763 * @domains_bitmap: bitmap of active domains other than
 764 *	&IWL_FW_INI_DOMAIN_ALWAYS_ON
 765 */
 766struct iwl_trans_debug {
 767	u8 n_dest_reg;
 768	bool rec_on;
 769
 770	const struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv;
 771	const struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX];
 772	struct iwl_fw_dbg_trigger_tlv * const *trigger_tlv;
 773
 774	u32 lmac_error_event_table[2];
 775	u32 umac_error_event_table;
 776	unsigned int error_event_table_tlv_status;
 777
 778	enum iwl_ini_cfg_state internal_ini_cfg;
 779	enum iwl_ini_cfg_state external_ini_cfg;
 780
 781	struct iwl_fw_ini_allocation_tlv fw_mon_cfg[IWL_FW_INI_ALLOCATION_NUM];
 782	struct iwl_fw_mon fw_mon_ini[IWL_FW_INI_ALLOCATION_NUM];
 783
 784	struct iwl_dram_data fw_mon;
 785
 786	bool hw_error;
 787	enum iwl_fw_ini_buffer_location ini_dest;
 788
 789	struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID];
 790	struct list_head debug_info_tlv_list;
 791	struct iwl_dbg_tlv_time_point_data
 792		time_point[IWL_FW_INI_TIME_POINT_NUM];
 793	struct list_head periodic_trig_list;
 794
 795	u32 domains_bitmap;
 796};
 797
 798struct iwl_dma_ptr {
 799	dma_addr_t dma;
 800	void *addr;
 801	size_t size;
 802};
 803
 804struct iwl_cmd_meta {
 805	/* only for SYNC commands, iff the reply skb is wanted */
 806	struct iwl_host_cmd *source;
 807	u32 flags;
 808	u32 tbs;
 809};
 810
 811/*
 812 * The FH will write back to the first TB only, so we need to copy some data
 813 * into the buffer regardless of whether it should be mapped or not.
 814 * This indicates how big the first TB must be to include the scratch buffer
 815 * and the assigned PN.
 816 * Since PN location is 8 bytes at offset 12, it's 20 now.
 817 * If we make it bigger then allocations will be bigger and copy slower, so
 818 * that's probably not useful.
 819 */
 820#define IWL_FIRST_TB_SIZE	20
 821#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
 822
 823struct iwl_pcie_txq_entry {
 824	void *cmd;
 825	struct sk_buff *skb;
 826	/* buffer to free after command completes */
 827	const void *free_buf;
 828	struct iwl_cmd_meta meta;
 829};
 830
 831struct iwl_pcie_first_tb_buf {
 832	u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
 833};
 834
 835/**
 836 * struct iwl_txq - Tx Queue for DMA
 837 * @q: generic Rx/Tx queue descriptor
 838 * @tfds: transmit frame descriptors (DMA memory)
 839 * @first_tb_bufs: start of command headers, including scratch buffers, for
 840 *	the writeback -- this is DMA memory and an array holding one buffer
 841 *	for each command on the queue
 842 * @first_tb_dma: DMA address for the first_tb_bufs start
 843 * @entries: transmit entries (driver state)
 844 * @lock: queue lock
 845 * @stuck_timer: timer that fires if queue gets stuck
 846 * @trans: pointer back to transport (for timer)
 847 * @need_update: indicates need to update read/write index
 848 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
 849 * @wd_timeout: queue watchdog timeout (jiffies) - per queue
 850 * @frozen: tx stuck queue timer is frozen
 851 * @frozen_expiry_remainder: remember how long until the timer fires
 852 * @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
 853 * @write_ptr: 1-st empty entry (index) host_w
 854 * @read_ptr: last used entry (index) host_r
 855 * @dma_addr:  physical addr for BD's
 856 * @n_window: safe queue window
 857 * @id: queue id
 858 * @low_mark: low watermark, resume queue if free space more than this
 859 * @high_mark: high watermark, stop queue if free space less than this
 860 *
 861 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
 862 * descriptors) and required locking structures.
 863 *
 864 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
 865 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
 866 * there might be HW changes in the future). For the normal TX
 867 * queues, n_window, which is the size of the software queue data
 868 * is also 256; however, for the command queue, n_window is only
 869 * 32 since we don't need so many commands pending. Since the HW
 870 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
 871 * This means that we end up with the following:
 872 *  HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
 873 *  SW entries:           | 0      | ... | 31          |
 874 * where N is a number between 0 and 7. This means that the SW
 875 * data is a window overlayed over the HW queue.
 876 */
 877struct iwl_txq {
 878	void *tfds;
 879	struct iwl_pcie_first_tb_buf *first_tb_bufs;
 880	dma_addr_t first_tb_dma;
 881	struct iwl_pcie_txq_entry *entries;
 882	/* lock for syncing changes on the queue */
 883	spinlock_t lock;
 884	unsigned long frozen_expiry_remainder;
 885	struct timer_list stuck_timer;
 886	struct iwl_trans *trans;
 887	bool need_update;
 888	bool frozen;
 889	bool ampdu;
 890	int block;
 891	unsigned long wd_timeout;
 892	struct sk_buff_head overflow_q;
 893	struct iwl_dma_ptr bc_tbl;
 894
 895	int write_ptr;
 896	int read_ptr;
 897	dma_addr_t dma_addr;
 898	int n_window;
 899	u32 id;
 900	int low_mark;
 901	int high_mark;
 902
 903	bool overflow_tx;
 904};
 905
 906/**
 907 * struct iwl_trans_txqs - transport tx queues data
 908 *
 909 * @queue_used - bit mask of used queues
 910 * @queue_stopped - bit mask of stopped queues
 911 */
 912struct iwl_trans_txqs {
 913	unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
 914	unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
 915	struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
 916	struct {
 917		u8 fifo;
 918		u8 q_id;
 919		unsigned int wdg_timeout;
 920	} cmd;
 921
 922};
 923
 924/**
 925 * struct iwl_trans - transport common data
 926 *
 927 * @ops - pointer to iwl_trans_ops
 928 * @op_mode - pointer to the op_mode
 929 * @trans_cfg: the trans-specific configuration part
 930 * @cfg - pointer to the configuration
 931 * @drv - pointer to iwl_drv
 932 * @status: a bit-mask of transport status flags
 933 * @dev - pointer to struct device * that represents the device
 934 * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
 935 *	0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
 936 * @hw_rf_id a u32 with the device RF ID
 937 * @hw_id: a u32 with the ID of the device / sub-device.
 938 *	Set during transport allocation.
 939 * @hw_id_str: a string with info about HW ID. Set during transport allocation.
 940 * @pm_support: set to true in start_hw if link pm is supported
 941 * @ltr_enabled: set to true if the LTR is enabled
 942 * @wide_cmd_header: true when ucode supports wide command header format
 943 * @num_rx_queues: number of RX queues allocated by the transport;
 944 *	the transport must set this before calling iwl_drv_start()
 945 * @iml_len: the length of the image loader
 946 * @iml: a pointer to the image loader itself
 947 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
 948 *	The user should use iwl_trans_{alloc,free}_tx_cmd.
 949 * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
 950 *	starting the firmware, used for tracing
 951 * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
 952 *	start of the 802.11 header in the @rx_mpdu_cmd
 953 * @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
 954 * @system_pm_mode: the system-wide power management mode in use.
 955 *	This mode is set dynamically, depending on the WoWLAN values
 956 *	configured from the userspace at runtime.
 957 * @iwl_trans_txqs: transport tx queues data.
 958 */
 959struct iwl_trans {
 960	const struct iwl_trans_ops *ops;
 961	struct iwl_op_mode *op_mode;
 962	const struct iwl_cfg_trans_params *trans_cfg;
 963	const struct iwl_cfg *cfg;
 964	struct iwl_drv *drv;
 965	enum iwl_trans_state state;
 966	unsigned long status;
 967
 968	struct device *dev;
 969	u32 max_skb_frags;
 970	u32 hw_rev;
 971	u32 hw_rf_id;
 972	u32 hw_id;
 973	char hw_id_str[52];
 974
 975	u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
 976
 977	bool pm_support;
 978	bool ltr_enabled;
 979
 980	const struct iwl_hcmd_arr *command_groups;
 981	int command_groups_size;
 982	bool wide_cmd_header;
 983
 984	u8 num_rx_queues;
 985
 986	size_t iml_len;
 987	u8 *iml;
 988
 989	/* The following fields are internal only */
 990	struct kmem_cache *dev_cmd_pool;
 991	char dev_cmd_pool_name[50];
 992
 993	struct dentry *dbgfs_dir;
 994
 995#ifdef CONFIG_LOCKDEP
 996	struct lockdep_map sync_cmd_lockdep_map;
 997#endif
 998
 999	struct iwl_trans_debug dbg;
1000	struct iwl_self_init_dram init_dram;
1001
1002	enum iwl_plat_pm_mode system_pm_mode;
1003
1004	const char *name;
1005	struct iwl_trans_txqs txqs;
1006
1007	/* pointer to trans specific struct */
1008	/*Ensure that this pointer will always be aligned to sizeof pointer */
1009	char trans_specific[] __aligned(sizeof(void *));
1010};
1011
1012const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
1013int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
1014
1015static inline void iwl_trans_configure(struct iwl_trans *trans,
1016				       const struct iwl_trans_config *trans_cfg)
1017{
1018	trans->op_mode = trans_cfg->op_mode;
1019
1020	trans->ops->configure(trans, trans_cfg);
1021	WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
1022}
1023
1024static inline int iwl_trans_start_hw(struct iwl_trans *trans)
1025{
1026	might_sleep();
1027
1028	return trans->ops->start_hw(trans);
1029}
1030
1031static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
1032{
1033	might_sleep();
1034
1035	if (trans->ops->op_mode_leave)
1036		trans->ops->op_mode_leave(trans);
1037
1038	trans->op_mode = NULL;
1039
1040	trans->state = IWL_TRANS_NO_FW;
1041}
1042
1043static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1044{
1045	might_sleep();
1046
1047	trans->state = IWL_TRANS_FW_ALIVE;
1048
1049	trans->ops->fw_alive(trans, scd_addr);
1050}
1051
1052static inline int iwl_trans_start_fw(struct iwl_trans *trans,
1053				     const struct fw_img *fw,
1054				     bool run_in_rfkill)
1055{
1056	might_sleep();
1057
1058	WARN_ON_ONCE(!trans->rx_mpdu_cmd);
1059
1060	clear_bit(STATUS_FW_ERROR, &trans->status);
1061	return trans->ops->start_fw(trans, fw, run_in_rfkill);
1062}
1063
1064static inline void iwl_trans_stop_device(struct iwl_trans *trans)
1065{
1066	might_sleep();
1067
1068	trans->ops->stop_device(trans);
1069
1070	trans->state = IWL_TRANS_NO_FW;
1071}
1072
1073static inline int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
1074				       bool reset)
1075{
1076	might_sleep();
1077	if (!trans->ops->d3_suspend)
1078		return 0;
1079
1080	return trans->ops->d3_suspend(trans, test, reset);
1081}
1082
1083static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
1084				      enum iwl_d3_status *status,
1085				      bool test, bool reset)
1086{
1087	might_sleep();
1088	if (!trans->ops->d3_resume)
1089		return 0;
1090
1091	return trans->ops->d3_resume(trans, status, test, reset);
1092}
1093
1094static inline int iwl_trans_suspend(struct iwl_trans *trans)
1095{
1096	if (!trans->ops->suspend)
1097		return 0;
1098
1099	return trans->ops->suspend(trans);
1100}
1101
1102static inline void iwl_trans_resume(struct iwl_trans *trans)
1103{
1104	if (trans->ops->resume)
1105		trans->ops->resume(trans);
1106}
1107
1108static inline struct iwl_trans_dump_data *
1109iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask)
1110{
1111	if (!trans->ops->dump_data)
1112		return NULL;
1113	return trans->ops->dump_data(trans, dump_mask);
1114}
1115
1116static inline struct iwl_device_tx_cmd *
1117iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
1118{
1119	return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
1120}
1121
1122int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
1123
1124static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
1125					 struct iwl_device_tx_cmd *dev_cmd)
1126{
1127	kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
1128}
1129
1130static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
1131			       struct iwl_device_tx_cmd *dev_cmd, int queue)
1132{
1133	if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
1134		return -EIO;
1135
1136	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1137		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1138		return -EIO;
1139	}
1140
1141	return trans->ops->tx(trans, skb, dev_cmd, queue);
1142}
1143
1144static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
1145				     int ssn, struct sk_buff_head *skbs)
1146{
1147	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1148		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1149		return;
1150	}
1151
1152	trans->ops->reclaim(trans, queue, ssn, skbs);
1153}
1154
1155static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue,
1156					int ptr)
1157{
1158	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1159		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1160		return;
1161	}
1162
1163	trans->ops->set_q_ptrs(trans, queue, ptr);
1164}
1165
1166static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
1167					 bool configure_scd)
1168{
1169	trans->ops->txq_disable(trans, queue, configure_scd);
1170}
1171
1172static inline bool
1173iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
1174			 const struct iwl_trans_txq_scd_cfg *cfg,
1175			 unsigned int queue_wdg_timeout)
1176{
1177	might_sleep();
1178
1179	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1180		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1181		return false;
1182	}
1183
1184	return trans->ops->txq_enable(trans, queue, ssn,
1185				      cfg, queue_wdg_timeout);
1186}
1187
1188static inline int
1189iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
1190			   struct iwl_trans_rxq_dma_data *data)
1191{
1192	if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
1193		return -ENOTSUPP;
1194
1195	return trans->ops->rxq_dma_data(trans, queue, data);
1196}
1197
1198static inline void
1199iwl_trans_txq_free(struct iwl_trans *trans, int queue)
1200{
1201	if (WARN_ON_ONCE(!trans->ops->txq_free))
1202		return;
1203
1204	trans->ops->txq_free(trans, queue);
1205}
1206
1207static inline int
1208iwl_trans_txq_alloc(struct iwl_trans *trans,
1209		    __le16 flags, u8 sta_id, u8 tid,
1210		    int cmd_id, int size,
1211		    unsigned int wdg_timeout)
1212{
1213	might_sleep();
1214
1215	if (WARN_ON_ONCE(!trans->ops->txq_alloc))
1216		return -ENOTSUPP;
1217
1218	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1219		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1220		return -EIO;
1221	}
1222
1223	return trans->ops->txq_alloc(trans, flags, sta_id, tid,
1224				     cmd_id, size, wdg_timeout);
1225}
1226
1227static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
1228						 int queue, bool shared_mode)
1229{
1230	if (trans->ops->txq_set_shared_mode)
1231		trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
1232}
1233
1234static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
1235					int fifo, int sta_id, int tid,
1236					int frame_limit, u16 ssn,
1237					unsigned int queue_wdg_timeout)
1238{
1239	struct iwl_trans_txq_scd_cfg cfg = {
1240		.fifo = fifo,
1241		.sta_id = sta_id,
1242		.tid = tid,
1243		.frame_limit = frame_limit,
1244		.aggregate = sta_id >= 0,
1245	};
1246
1247	iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
1248}
1249
1250static inline
1251void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
1252			     unsigned int queue_wdg_timeout)
1253{
1254	struct iwl_trans_txq_scd_cfg cfg = {
1255		.fifo = fifo,
1256		.sta_id = -1,
1257		.tid = IWL_MAX_TID_COUNT,
1258		.frame_limit = IWL_FRAME_LIMIT,
1259		.aggregate = false,
1260	};
1261
1262	iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
1263}
1264
1265static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
1266					      unsigned long txqs,
1267					      bool freeze)
1268{
1269	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1270		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1271		return;
1272	}
1273
1274	if (trans->ops->freeze_txq_timer)
1275		trans->ops->freeze_txq_timer(trans, txqs, freeze);
1276}
1277
1278static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
1279					    bool block)
1280{
1281	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1282		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1283		return;
1284	}
1285
1286	if (trans->ops->block_txq_ptrs)
1287		trans->ops->block_txq_ptrs(trans, block);
1288}
1289
1290static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
1291						 u32 txqs)
1292{
1293	if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty))
1294		return -ENOTSUPP;
1295
1296	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1297		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1298		return -EIO;
1299	}
1300
1301	return trans->ops->wait_tx_queues_empty(trans, txqs);
1302}
1303
1304static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
1305{
1306	if (WARN_ON_ONCE(!trans->ops->wait_txq_empty))
1307		return -ENOTSUPP;
1308
1309	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1310		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1311		return -EIO;
1312	}
1313
1314	return trans->ops->wait_txq_empty(trans, queue);
1315}
1316
1317static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1318{
1319	trans->ops->write8(trans, ofs, val);
1320}
1321
1322static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1323{
1324	trans->ops->write32(trans, ofs, val);
1325}
1326
1327static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
1328{
1329	return trans->ops->read32(trans, ofs);
1330}
1331
1332static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
1333{
1334	return trans->ops->read_prph(trans, ofs);
1335}
1336
1337static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
1338					u32 val)
1339{
1340	return trans->ops->write_prph(trans, ofs, val);
1341}
1342
1343static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
1344				     void *buf, int dwords)
1345{
1346	return trans->ops->read_mem(trans, addr, buf, dwords);
1347}
1348
1349#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize)		      \
1350	do {								      \
1351		if (__builtin_constant_p(bufsize))			      \
1352			BUILD_BUG_ON((bufsize) % sizeof(u32));		      \
1353		iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
1354	} while (0)
1355
1356static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
1357{
1358	u32 value;
1359
1360	if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
1361		return 0xa5a5a5a5;
1362
1363	return value;
1364}
1365
1366static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
1367				      const void *buf, int dwords)
1368{
1369	return trans->ops->write_mem(trans, addr, buf, dwords);
1370}
1371
1372static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
1373					u32 val)
1374{
1375	return iwl_trans_write_mem(trans, addr, &val, 1);
1376}
1377
1378static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
1379{
1380	if (trans->ops->set_pmi)
1381		trans->ops->set_pmi(trans, state);
1382}
1383
1384static inline void iwl_trans_sw_reset(struct iwl_trans *trans)
1385{
1386	if (trans->ops->sw_reset)
1387		trans->ops->sw_reset(trans);
1388}
1389
1390static inline void
1391iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
1392{
1393	trans->ops->set_bits_mask(trans, reg, mask, value);
1394}
1395
1396#define iwl_trans_grab_nic_access(trans, flags)	\
1397	__cond_lock(nic_access,				\
1398		    likely((trans)->ops->grab_nic_access(trans, flags)))
1399
1400static inline void __releases(nic_access)
1401iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
1402{
1403	trans->ops->release_nic_access(trans, flags);
1404	__release(nic_access);
1405}
1406
1407static inline void iwl_trans_fw_error(struct iwl_trans *trans)
1408{
1409	if (WARN_ON_ONCE(!trans->op_mode))
1410		return;
1411
1412	/* prevent double restarts due to the same erroneous FW */
1413	if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
1414		iwl_op_mode_nic_error(trans->op_mode);
1415}
1416
1417static inline bool iwl_trans_fw_running(struct iwl_trans *trans)
1418{
1419	return trans->state == IWL_TRANS_FW_ALIVE;
1420}
1421
1422static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
1423{
1424	if (trans->ops->sync_nmi)
1425		trans->ops->sync_nmi(trans);
1426}
1427
1428static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
1429{
1430	return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED ||
1431		trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED;
1432}
1433
1434/*****************************************************
1435 * transport helper functions
1436 *****************************************************/
1437struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
1438				  struct device *dev,
1439				  const struct iwl_trans_ops *ops,
1440				  unsigned int cmd_pool_size,
1441				  unsigned int cmd_pool_align);
1442void iwl_trans_free(struct iwl_trans *trans);
1443
1444/*****************************************************
1445* driver (transport) register/unregister functions
1446******************************************************/
1447int __must_check iwl_pci_register_driver(void);
1448void iwl_pci_unregister_driver(void);
1449
1450#endif /* __iwl_trans_h__ */