Linux Audio

Check our new training course

Loading...
v6.9.4
   1/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
   2/*
   3 * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
   4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
   5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
   6 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   7#ifndef __iwl_trans_h__
   8#define __iwl_trans_h__
   9
  10#include <linux/ieee80211.h>
  11#include <linux/mm.h> /* for page_address */
  12#include <linux/lockdep.h>
  13#include <linux/kernel.h>
  14
  15#include "iwl-debug.h"
  16#include "iwl-config.h"
  17#include "fw/img.h"
  18#include "iwl-op-mode.h"
  19#include <linux/firmware.h>
  20#include "fw/api/cmdhdr.h"
  21#include "fw/api/txq.h"
  22#include "fw/api/dbg-tlv.h"
  23#include "iwl-dbg-tlv.h"
  24
  25/**
  26 * DOC: Transport layer - what is it ?
  27 *
  28 * The transport layer is the layer that deals with the HW directly. It provides
  29 * an abstraction of the underlying HW to the upper layer. The transport layer
  30 * doesn't provide any policy, algorithm or anything of this kind, but only
  31 * mechanisms to make the HW do something. It is not completely stateless but
  32 * close to it.
  33 * We will have an implementation for each different supported bus.
  34 */
  35
  36/**
  37 * DOC: Life cycle of the transport layer
  38 *
  39 * The transport layer has a very precise life cycle.
  40 *
  41 *	1) A helper function is called during the module initialization and
  42 *	   registers the bus driver's ops with the transport's alloc function.
  43 *	2) Bus's probe calls to the transport layer's allocation functions.
  44 *	   Of course this function is bus specific.
  45 *	3) This allocation functions will spawn the upper layer which will
  46 *	   register mac80211.
  47 *
  48 *	4) At some point (i.e. mac80211's start call), the op_mode will call
  49 *	   the following sequence:
  50 *	   start_hw
  51 *	   start_fw
  52 *
  53 *	5) Then when finished (or reset):
  54 *	   stop_device
  55 *
  56 *	6) Eventually, the free function will be called.
  57 */
  58
  59/* default preset 0 (start from bit 16)*/
  60#define IWL_FW_DBG_DOMAIN_POS	16
  61#define IWL_FW_DBG_DOMAIN	BIT(IWL_FW_DBG_DOMAIN_POS)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  62
  63#define IWL_TRANS_FW_DBG_DOMAIN(trans)	IWL_FW_INI_DOMAIN_ALWAYS_ON
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  64
  65#define FH_RSCSR_FRAME_SIZE_MSK		0x00003FFF	/* bits 0-13 */
  66#define FH_RSCSR_FRAME_INVALID		0x55550000
  67#define FH_RSCSR_FRAME_ALIGN		0x40
  68#define FH_RSCSR_RPA_EN			BIT(25)
  69#define FH_RSCSR_RADA_EN		BIT(26)
  70#define FH_RSCSR_RXQ_POS		16
  71#define FH_RSCSR_RXQ_MASK		0x3F0000
  72
  73struct iwl_rx_packet {
  74	/*
  75	 * The first 4 bytes of the RX frame header contain both the RX frame
  76	 * size and some flags.
  77	 * Bit fields:
  78	 * 31:    flag flush RB request
  79	 * 30:    flag ignore TC (terminal counter) request
  80	 * 29:    flag fast IRQ request
  81	 * 28-27: Reserved
  82	 * 26:    RADA enabled
  83	 * 25:    Offload enabled
  84	 * 24:    RPF enabled
  85	 * 23:    RSS enabled
  86	 * 22:    Checksum enabled
  87	 * 21-16: RX queue
  88	 * 15-14: Reserved
  89	 * 13-00: RX frame size
  90	 */
  91	__le32 len_n_flags;
  92	struct iwl_cmd_header hdr;
  93	u8 data[];
  94} __packed;
  95
  96static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
  97{
  98	return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
  99}
 100
 101static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
 102{
 103	return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
 104}
 105
 106/**
 107 * enum CMD_MODE - how to send the host commands ?
 108 *
 109 * @CMD_ASYNC: Return right away and don't wait for the response
 110 * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
 111 *	the response. The caller needs to call iwl_free_resp when done.
 112 * @CMD_SEND_IN_RFKILL: Send the command even if the NIC is in RF-kill.
 113 * @CMD_BLOCK_TXQS: Block TXQs while the comment is executing.
 114 * @CMD_SEND_IN_D3: Allow the command to be sent in D3 mode, relevant to
 115 *	SUSPEND and RESUME commands. We are in D3 mode when we set
 116 *	trans->system_pm_mode to IWL_PLAT_PM_MODE_D3.
 
 
 
 
 
 
 117 */
 118enum CMD_MODE {
 119	CMD_ASYNC		= BIT(0),
 120	CMD_WANT_SKB		= BIT(1),
 121	CMD_SEND_IN_RFKILL	= BIT(2),
 122	CMD_BLOCK_TXQS		= BIT(3),
 123	CMD_SEND_IN_D3          = BIT(4),
 
 
 
 
 
 124};
 125
 126#define DEF_CMD_PAYLOAD_SIZE 320
 127
 128/**
 129 * struct iwl_device_cmd
 130 *
 131 * For allocation of the command and tx queues, this establishes the overall
 132 * size of the largest command we send to uCode, except for commands that
 133 * aren't fully copied and use other TFD space.
 134 */
 135struct iwl_device_cmd {
 136	union {
 137		struct {
 138			struct iwl_cmd_header hdr;	/* uCode API */
 139			u8 payload[DEF_CMD_PAYLOAD_SIZE];
 140		};
 141		struct {
 142			struct iwl_cmd_header_wide hdr_wide;
 143			u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
 144					sizeof(struct iwl_cmd_header_wide) +
 145					sizeof(struct iwl_cmd_header)];
 146		};
 147	};
 148} __packed;
 149
 150/**
 151 * struct iwl_device_tx_cmd - buffer for TX command
 152 * @hdr: the header
 153 * @payload: the payload placeholder
 154 *
 155 * The actual structure is sized dynamically according to need.
 156 */
 157struct iwl_device_tx_cmd {
 158	struct iwl_cmd_header hdr;
 159	u8 payload[];
 160} __packed;
 161
 162#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
 163
 164/*
 165 * number of transfer buffers (fragments) per transmit frame descriptor;
 166 * this is just the driver's idea, the hardware supports 20
 167 */
 168#define IWL_MAX_CMD_TBS_PER_TFD	2
 169
 170/* We need 2 entries for the TX command and header, and another one might
 171 * be needed for potential data in the SKB's head. The remaining ones can
 172 * be used for frags.
 173 */
 174#define IWL_TRANS_MAX_FRAGS(trans) ((trans)->txqs.tfd.max_tbs - 3)
 175
 176/**
 177 * enum iwl_hcmd_dataflag - flag for each one of the chunks of the command
 178 *
 179 * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
 180 *	ring. The transport layer doesn't map the command's buffer to DMA, but
 181 *	rather copies it to a previously allocated DMA buffer. This flag tells
 182 *	the transport layer not to copy the command, but to map the existing
 183 *	buffer (that is passed in) instead. This saves the memcpy and allows
 184 *	commands that are bigger than the fixed buffer to be submitted.
 185 *	Note that a TFD entry after a NOCOPY one cannot be a normal copied one.
 186 * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
 187 *	chunk internally and free it again after the command completes. This
 188 *	can (currently) be used only once per command.
 189 *	Note that a TFD entry after a DUP one cannot be a normal copied one.
 190 */
 191enum iwl_hcmd_dataflag {
 192	IWL_HCMD_DFL_NOCOPY	= BIT(0),
 193	IWL_HCMD_DFL_DUP	= BIT(1),
 194};
 195
 196enum iwl_error_event_table_status {
 197	IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),
 198	IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),
 199	IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),
 200	IWL_ERROR_EVENT_TABLE_TCM1 = BIT(3),
 201	IWL_ERROR_EVENT_TABLE_TCM2 = BIT(4),
 202	IWL_ERROR_EVENT_TABLE_RCM1 = BIT(5),
 203	IWL_ERROR_EVENT_TABLE_RCM2 = BIT(6),
 204};
 205
 206/**
 207 * struct iwl_host_cmd - Host command to the uCode
 208 *
 209 * @data: array of chunks that composes the data of the host command
 210 * @resp_pkt: response packet, if %CMD_WANT_SKB was set
 211 * @_rx_page_order: (internally used to free response packet)
 212 * @_rx_page_addr: (internally used to free response packet)
 213 * @flags: can be CMD_*
 214 * @len: array of the lengths of the chunks in data
 215 * @dataflags: IWL_HCMD_DFL_*
 216 * @id: command id of the host command, for wide commands encoding the
 217 *	version and group as well
 218 */
 219struct iwl_host_cmd {
 220	const void *data[IWL_MAX_CMD_TBS_PER_TFD];
 221	struct iwl_rx_packet *resp_pkt;
 222	unsigned long _rx_page_addr;
 223	u32 _rx_page_order;
 224
 225	u32 flags;
 226	u32 id;
 227	u16 len[IWL_MAX_CMD_TBS_PER_TFD];
 228	u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
 229};
 230
 231static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
 232{
 233	free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
 234}
 235
 236struct iwl_rx_cmd_buffer {
 237	struct page *_page;
 238	int _offset;
 239	bool _page_stolen;
 240	u32 _rx_page_order;
 241	unsigned int truesize;
 242};
 243
 244static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
 245{
 246	return (void *)((unsigned long)page_address(r->_page) + r->_offset);
 247}
 248
 249static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
 250{
 251	return r->_offset;
 252}
 253
 254static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
 255{
 256	r->_page_stolen = true;
 257	get_page(r->_page);
 258	return r->_page;
 259}
 260
 261static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
 262{
 263	__free_pages(r->_page, r->_rx_page_order);
 264}
 265
 266#define MAX_NO_RECLAIM_CMDS	6
 267
 
 
 
 
 
 268#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
 269
 270/*
 271 * Maximum number of HW queues the transport layer
 272 * currently supports
 273 */
 274#define IWL_MAX_HW_QUEUES		32
 275#define IWL_MAX_TVQM_QUEUES		512
 276
 277#define IWL_MAX_TID_COUNT	8
 278#define IWL_MGMT_TID		15
 279#define IWL_FRAME_LIMIT	64
 280#define IWL_MAX_RX_HW_QUEUES	16
 281#define IWL_9000_MAX_RX_HW_QUEUES	1
 282
 283/**
 284 * enum iwl_wowlan_status - WoWLAN image/device status
 285 * @IWL_D3_STATUS_ALIVE: firmware is still running after resume
 286 * @IWL_D3_STATUS_RESET: device was reset while suspended
 287 */
 288enum iwl_d3_status {
 289	IWL_D3_STATUS_ALIVE,
 290	IWL_D3_STATUS_RESET,
 291};
 292
 293/**
 294 * enum iwl_trans_status: transport status flags
 295 * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed
 296 * @STATUS_DEVICE_ENABLED: APM is enabled
 297 * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
 298 * @STATUS_INT_ENABLED: interrupts are enabled
 299 * @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch
 300 * @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode
 301 * @STATUS_FW_ERROR: the fw is in error state
 302 * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
 303 *	are sent
 304 * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
 305 * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
 306 * @STATUS_SUPPRESS_CMD_ERROR_ONCE: suppress "FW error in SYNC CMD" once,
 307 *	e.g. for testing
 308 */
 309enum iwl_trans_status {
 310	STATUS_SYNC_HCMD_ACTIVE,
 311	STATUS_DEVICE_ENABLED,
 312	STATUS_TPOWER_PMI,
 313	STATUS_INT_ENABLED,
 314	STATUS_RFKILL_HW,
 315	STATUS_RFKILL_OPMODE,
 316	STATUS_FW_ERROR,
 317	STATUS_TRANS_GOING_IDLE,
 318	STATUS_TRANS_IDLE,
 319	STATUS_TRANS_DEAD,
 320	STATUS_SUPPRESS_CMD_ERROR_ONCE,
 321};
 322
 323static inline int
 324iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
 325{
 326	switch (rb_size) {
 327	case IWL_AMSDU_2K:
 328		return get_order(2 * 1024);
 329	case IWL_AMSDU_4K:
 330		return get_order(4 * 1024);
 331	case IWL_AMSDU_8K:
 332		return get_order(8 * 1024);
 333	case IWL_AMSDU_12K:
 334		return get_order(16 * 1024);
 335	default:
 336		WARN_ON(1);
 337		return -1;
 338	}
 339}
 340
 341static inline int
 342iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size)
 343{
 344	switch (rb_size) {
 345	case IWL_AMSDU_2K:
 346		return 2 * 1024;
 347	case IWL_AMSDU_4K:
 348		return 4 * 1024;
 349	case IWL_AMSDU_8K:
 350		return 8 * 1024;
 351	case IWL_AMSDU_12K:
 352		return 16 * 1024;
 353	default:
 354		WARN_ON(1);
 355		return 0;
 356	}
 357}
 358
 359struct iwl_hcmd_names {
 360	u8 cmd_id;
 361	const char *const cmd_name;
 362};
 363
 364#define HCMD_NAME(x)	\
 365	{ .cmd_id = x, .cmd_name = #x }
 366
 367struct iwl_hcmd_arr {
 368	const struct iwl_hcmd_names *arr;
 369	int size;
 370};
 371
 372#define HCMD_ARR(x)	\
 373	{ .arr = x, .size = ARRAY_SIZE(x) }
 374
 375/**
 376 * struct iwl_dump_sanitize_ops - dump sanitization operations
 377 * @frob_txf: Scrub the TX FIFO data
 378 * @frob_hcmd: Scrub a host command, the %hcmd pointer is to the header
 379 *	but that might be short or long (&struct iwl_cmd_header or
 380 *	&struct iwl_cmd_header_wide)
 381 * @frob_mem: Scrub memory data
 382 */
 383struct iwl_dump_sanitize_ops {
 384	void (*frob_txf)(void *ctx, void *buf, size_t buflen);
 385	void (*frob_hcmd)(void *ctx, void *hcmd, size_t buflen);
 386	void (*frob_mem)(void *ctx, u32 mem_addr, void *mem, size_t buflen);
 387};
 388
 389/**
 390 * struct iwl_trans_config - transport configuration
 391 *
 392 * @op_mode: pointer to the upper layer.
 393 * @cmd_queue: the index of the command queue.
 394 *	Must be set before start_fw.
 395 * @cmd_fifo: the fifo for host commands
 396 * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue.
 397 * @no_reclaim_cmds: Some devices erroneously don't set the
 398 *	SEQ_RX_FRAME bit on some notifications, this is the
 399 *	list of such notifications to filter. Max length is
 400 *	%MAX_NO_RECLAIM_CMDS.
 401 * @n_no_reclaim_cmds: # of commands in list
 402 * @rx_buf_size: RX buffer size needed for A-MSDUs
 403 *	if unset 4k will be the RX buffer size
 404 * @bc_table_dword: set to true if the BC table expects the byte count to be
 405 *	in DWORD (as opposed to bytes)
 406 * @scd_set_active: should the transport configure the SCD for HCMD queue
 
 
 407 * @command_groups: array of command groups, each member is an array of the
 408 *	commands in the group; for debugging only
 409 * @command_groups_size: number of command groups, to avoid illegal access
 410 * @cb_data_offs: offset inside skb->cb to store transport data at, must have
 411 *	space for at least two pointers
 412 * @fw_reset_handshake: firmware supports reset flow handshake
 413 * @queue_alloc_cmd_ver: queue allocation command version, set to 0
 414 *	for using the older SCD_QUEUE_CFG, set to the version of
 415 *	SCD_QUEUE_CONFIG_CMD otherwise.
 416 */
 417struct iwl_trans_config {
 418	struct iwl_op_mode *op_mode;
 419
 420	u8 cmd_queue;
 421	u8 cmd_fifo;
 422	unsigned int cmd_q_wdg_timeout;
 423	const u8 *no_reclaim_cmds;
 424	unsigned int n_no_reclaim_cmds;
 425
 426	enum iwl_amsdu_size rx_buf_size;
 427	bool bc_table_dword;
 428	bool scd_set_active;
 
 
 429	const struct iwl_hcmd_arr *command_groups;
 430	int command_groups_size;
 431
 432	u8 cb_data_offs;
 433	bool fw_reset_handshake;
 434	u8 queue_alloc_cmd_ver;
 435};
 436
 437struct iwl_trans_dump_data {
 438	u32 len;
 439	u8 data[];
 440};
 441
 442struct iwl_trans;
 443
 444struct iwl_trans_txq_scd_cfg {
 445	u8 fifo;
 446	u8 sta_id;
 447	u8 tid;
 448	bool aggregate;
 449	int frame_limit;
 450};
 451
 452/**
 453 * struct iwl_trans_rxq_dma_data - RX queue DMA data
 454 * @fr_bd_cb: DMA address of free BD cyclic buffer
 455 * @fr_bd_wid: Initial write index of the free BD cyclic buffer
 456 * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
 457 * @ur_bd_cb: DMA address of used BD cyclic buffer
 458 */
 459struct iwl_trans_rxq_dma_data {
 460	u64 fr_bd_cb;
 461	u32 fr_bd_wid;
 462	u64 urbd_stts_wrptr;
 463	u64 ur_bd_cb;
 464};
 465
 466/* maximal number of DRAM MAP entries supported by FW */
 467#define IPC_DRAM_MAP_ENTRY_NUM_MAX 64
 468
 469/**
 470 * struct iwl_pnvm_image - contains info about the parsed pnvm image
 471 * @chunks: array of pointers to pnvm payloads and their sizes
 472 * @n_chunks: the number of the pnvm payloads.
 473 * @version: the version of the loaded PNVM image
 474 */
 475struct iwl_pnvm_image {
 476	struct {
 477		const void *data;
 478		u32 len;
 479	} chunks[IPC_DRAM_MAP_ENTRY_NUM_MAX];
 480	u32 n_chunks;
 481	u32 version;
 482};
 483
 484/**
 485 * struct iwl_trans_ops - transport specific operations
 486 *
 487 * All the handlers MUST be implemented
 488 *
 489 * @start_hw: starts the HW. From that point on, the HW can send interrupts.
 490 *	May sleep.
 
 491 * @op_mode_leave: Turn off the HW RF kill indication if on
 492 *	May sleep
 493 * @start_fw: allocates and inits all the resources for the transport
 494 *	layer. Also kick a fw image.
 495 *	May sleep
 496 * @fw_alive: called when the fw sends alive notification. If the fw provides
 497 *	the SCD base address in SRAM, then provide it here, or 0 otherwise.
 498 *	May sleep
 499 * @stop_device: stops the whole device (embedded CPU put to reset) and stops
 500 *	the HW. From that point on, the HW will be stopped but will still issue
 501 *	an interrupt if the HW RF kill switch is triggered.
 
 502 *	This callback must do the right thing and not crash even if %start_hw()
 503 *	was called but not &start_fw(). May sleep.
 504 * @d3_suspend: put the device into the correct mode for WoWLAN during
 505 *	suspend. This is optional, if not implemented WoWLAN will not be
 506 *	supported. This callback may sleep.
 507 * @d3_resume: resume the device after WoWLAN, enabling the opmode to
 508 *	talk to the WoWLAN image to get its status. This is optional, if not
 509 *	implemented WoWLAN will not be supported. This callback may sleep.
 510 * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
 511 *	If RFkill is asserted in the middle of a SYNC host command, it must
 512 *	return -ERFKILL straight away.
 513 *	May sleep only if CMD_ASYNC is not set
 514 * @tx: send an skb. The transport relies on the op_mode to zero the
 515 *	the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all
 516 *	the CSUM will be taken care of (TCP CSUM and IP header in case of
 517 *	IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP
 518 *	header if it is IPv4.
 519 *	Must be atomic
 520 * @reclaim: free packet until ssn. Returns a list of freed packets.
 521 *	Must be atomic
 522 * @set_q_ptrs: set queue pointers internally, after D3 when HW state changed
 523 * @txq_enable: setup a queue. To setup an AC queue, use the
 524 *	iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
 525 *	this one. The op_mode must not configure the HCMD queue. The scheduler
 526 *	configuration may be %NULL, in which case the hardware will not be
 527 *	configured. If true is returned, the operation mode needs to increment
 528 *	the sequence number of the packets routed to this queue because of a
 529 *	hardware scheduler bug. May sleep.
 530 * @txq_disable: de-configure a Tx queue to send AMPDUs
 531 *	Must be atomic
 532 * @txq_alloc: Allocate a new TX queue, may sleep.
 533 * @txq_free: Free a previously allocated TX queue.
 534 * @txq_set_shared_mode: change Tx queue shared/unshared marking
 535 * @wait_tx_queues_empty: wait until tx queues are empty. May sleep.
 536 * @wait_txq_empty: wait until specific tx queue is empty. May sleep.
 537 * @freeze_txq_timer: prevents the timer of the queue from firing until the
 538 *	queue is set to awake. Must be atomic.
 
 
 
 
 
 539 * @write8: write a u8 to a register at offset ofs from the BAR
 540 * @write32: write a u32 to a register at offset ofs from the BAR
 541 * @read32: read a u32 register at offset ofs from the BAR
 542 * @read_prph: read a DWORD from a periphery register
 543 * @write_prph: write a DWORD to a periphery register
 544 * @read_mem: read device's SRAM in DWORD
 545 * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory
 546 *	will be zeroed.
 547 * @read_config32: read a u32 value from the device's config space at
 548 *	the given offset.
 549 * @configure: configure parameters required by the transport layer from
 550 *	the op_mode. May be called several times before start_fw, can't be
 551 *	called after that.
 552 * @set_pmi: set the power pmi state
 553 * @sw_reset: trigger software reset of the NIC
 554 * @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
 555 *	Sleeping is not allowed between grab_nic_access and
 556 *	release_nic_access.
 557 * @release_nic_access: let the NIC go to sleep. The "flags" parameter
 558 *	must be the same one that was sent before to the grab_nic_access.
 559 * @set_bits_mask: set SRAM register according to value and mask.
 
 
 
 
 
 560 * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
 561 *	TX'ed commands and similar. The buffer will be vfree'd by the caller.
 562 *	Note that the transport must fill in the proper file headers.
 563 * @debugfs_cleanup: used in the driver unload flow to make a proper cleanup
 564 *	of the trans debugfs
 565 * @sync_nmi: trigger a firmware NMI and wait for it to complete
 566 * @load_pnvm: save the pnvm data in DRAM
 567 * @set_pnvm: set the pnvm data in the prph scratch buffer, inside the
 568 *	context info.
 569 * @load_reduce_power: copy reduce power table to the corresponding DRAM memory
 570 * @set_reduce_power: set reduce power table addresses in the sratch buffer
 571 * @interrupts: disable/enable interrupts to transport
 572 * @imr_dma_data: set up IMR DMA
 573 * @rxq_dma_data: retrieve RX queue DMA data, see @struct iwl_trans_rxq_dma_data
 574 */
 575struct iwl_trans_ops {
 576
 577	int (*start_hw)(struct iwl_trans *iwl_trans);
 578	void (*op_mode_leave)(struct iwl_trans *iwl_trans);
 579	int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
 580			bool run_in_rfkill);
 
 
 581	void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
 582	void (*stop_device)(struct iwl_trans *trans);
 583
 584	int (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
 585	int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
 586			 bool test, bool reset);
 587
 588	int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
 589
 590	int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
 591		  struct iwl_device_tx_cmd *dev_cmd, int queue);
 592	void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
 593			struct sk_buff_head *skbs, bool is_flush);
 594
 595	void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr);
 596
 597	bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
 598			   const struct iwl_trans_txq_scd_cfg *cfg,
 599			   unsigned int queue_wdg_timeout);
 600	void (*txq_disable)(struct iwl_trans *trans, int queue,
 601			    bool configure_scd);
 602	/* 22000 functions */
 603	int (*txq_alloc)(struct iwl_trans *trans, u32 flags,
 604			 u32 sta_mask, u8 tid,
 605			 int size, unsigned int queue_wdg_timeout);
 606	void (*txq_free)(struct iwl_trans *trans, int queue);
 607	int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
 608			    struct iwl_trans_rxq_dma_data *data);
 609
 610	void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
 611				    bool shared);
 612
 613	int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
 614	int (*wait_txq_empty)(struct iwl_trans *trans, int queue);
 615	void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
 616				 bool freeze);
 
 617
 618	void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
 619	void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
 620	u32 (*read32)(struct iwl_trans *trans, u32 ofs);
 621	u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
 622	void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
 623	int (*read_mem)(struct iwl_trans *trans, u32 addr,
 624			void *buf, int dwords);
 625	int (*write_mem)(struct iwl_trans *trans, u32 addr,
 626			 const void *buf, int dwords);
 627	int (*read_config32)(struct iwl_trans *trans, u32 ofs, u32 *val);
 628	void (*configure)(struct iwl_trans *trans,
 629			  const struct iwl_trans_config *trans_cfg);
 630	void (*set_pmi)(struct iwl_trans *trans, bool state);
 631	int (*sw_reset)(struct iwl_trans *trans, bool retake_ownership);
 632	bool (*grab_nic_access)(struct iwl_trans *trans);
 633	void (*release_nic_access)(struct iwl_trans *trans);
 634	void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
 635			      u32 value);
 
 
 
 
 636
 637	struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
 638						 u32 dump_mask,
 639						 const struct iwl_dump_sanitize_ops *sanitize_ops,
 640						 void *sanitize_ctx);
 641	void (*debugfs_cleanup)(struct iwl_trans *trans);
 642	void (*sync_nmi)(struct iwl_trans *trans);
 643	int (*load_pnvm)(struct iwl_trans *trans,
 644			 const struct iwl_pnvm_image *pnvm_payloads,
 645			 const struct iwl_ucode_capabilities *capa);
 646	void (*set_pnvm)(struct iwl_trans *trans,
 647			 const struct iwl_ucode_capabilities *capa);
 648	int (*load_reduce_power)(struct iwl_trans *trans,
 649				 const struct iwl_pnvm_image *payloads,
 650				 const struct iwl_ucode_capabilities *capa);
 651	void (*set_reduce_power)(struct iwl_trans *trans,
 652				 const struct iwl_ucode_capabilities *capa);
 653
 654	void (*interrupts)(struct iwl_trans *trans, bool enable);
 655	int (*imr_dma_data)(struct iwl_trans *trans,
 656			    u32 dst_addr, u64 src_addr,
 657			    u32 byte_cnt);
 658
 659};
 660
 661/**
 662 * enum iwl_trans_state - state of the transport layer
 663 *
 664 * @IWL_TRANS_NO_FW: firmware wasn't started yet, or crashed
 665 * @IWL_TRANS_FW_STARTED: FW was started, but not alive yet
 666 * @IWL_TRANS_FW_ALIVE: FW has sent an alive response
 667 */
 668enum iwl_trans_state {
 669	IWL_TRANS_NO_FW,
 670	IWL_TRANS_FW_STARTED,
 671	IWL_TRANS_FW_ALIVE,
 672};
 673
 674/**
 675 * DOC: Platform power management
 676 *
 
 
 
 677 * In system-wide power management the entire platform goes into a low
 678 * power state (e.g. idle or suspend to RAM) at the same time and the
 679 * device is configured as a wakeup source for the entire platform.
 680 * This is usually triggered by userspace activity (e.g. the user
 681 * presses the suspend button or a power management daemon decides to
 682 * put the platform in low power mode).  The device's behavior in this
 683 * mode is dictated by the wake-on-WLAN configuration.
 684 *
 
 
 
 
 
 
 
 685 * The terms used for the device's behavior are as follows:
 686 *
 687 *	- D0: the device is fully powered and the host is awake;
 688 *	- D3: the device is in low power mode and only reacts to
 689 *		specific events (e.g. magic-packet received or scan
 690 *		results found);
 
 
 691 *
 692 * These terms reflect the power modes in the firmware and are not to
 693 * be confused with the physical device power state.
 
 694 */
 695
 696/**
 697 * enum iwl_plat_pm_mode - platform power management mode
 698 *
 699 * This enumeration describes the device's platform power management
 700 * behavior when in system-wide suspend (i.e WoWLAN).
 
 701 *
 702 * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this
 703 *	device.  In system-wide suspend mode, it means that the all
 704 *	connections will be closed automatically by mac80211 before
 705 *	the platform is suspended.
 
 706 * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN).
 
 
 
 707 */
 708enum iwl_plat_pm_mode {
 709	IWL_PLAT_PM_MODE_DISABLED,
 710	IWL_PLAT_PM_MODE_D3,
 
 711};
 712
 713/**
 714 * enum iwl_ini_cfg_state
 715 * @IWL_INI_CFG_STATE_NOT_LOADED: no debug cfg was given
 716 * @IWL_INI_CFG_STATE_LOADED: debug cfg was found and loaded
 717 * @IWL_INI_CFG_STATE_CORRUPTED: debug cfg was found and some of the TLVs
 718 *	are corrupted. The rest of the debug TLVs will still be used
 719 */
 720enum iwl_ini_cfg_state {
 721	IWL_INI_CFG_STATE_NOT_LOADED,
 722	IWL_INI_CFG_STATE_LOADED,
 723	IWL_INI_CFG_STATE_CORRUPTED,
 724};
 725
 726/* Max time to wait for nmi interrupt */
 727#define IWL_TRANS_NMI_TIMEOUT (HZ / 4)
 728
 729/**
 730 * struct iwl_dram_data
 731 * @physical: page phy pointer
 732 * @block: pointer to the allocated block/page
 733 * @size: size of the block/page
 734 */
 735struct iwl_dram_data {
 736	dma_addr_t physical;
 737	void *block;
 738	int size;
 739};
 740
 741/**
 742 * struct iwl_dram_regions - DRAM regions container structure
 743 * @drams: array of several DRAM areas that contains the pnvm and power
 744 *	reduction table payloads.
 745 * @n_regions: number of DRAM regions that were allocated
 746 * @prph_scratch_mem_desc: points to a structure allocated in dram,
 747 *	designed to show FW where all the payloads are.
 748 */
 749struct iwl_dram_regions {
 750	struct iwl_dram_data drams[IPC_DRAM_MAP_ENTRY_NUM_MAX];
 751	struct iwl_dram_data prph_scratch_mem_desc;
 752	u8 n_regions;
 753};
 754
 755/**
 756 * struct iwl_fw_mon - fw monitor per allocation id
 757 * @num_frags: number of fragments
 758 * @frags: an array of DRAM buffer fragments
 759 */
 760struct iwl_fw_mon {
 761	u32 num_frags;
 762	struct iwl_dram_data *frags;
 763};
 764
 765/**
 766 * struct iwl_self_init_dram - dram data used by self init process
 767 * @fw: lmac and umac dram data
 768 * @fw_cnt: total number of items in array
 769 * @paging: paging dram data
 770 * @paging_cnt: total number of items in array
 771 */
 772struct iwl_self_init_dram {
 773	struct iwl_dram_data *fw;
 774	int fw_cnt;
 775	struct iwl_dram_data *paging;
 776	int paging_cnt;
 777};
 778
 779/**
 780 * struct iwl_imr_data - imr dram data used during debug process
 781 * @imr_enable: imr enable status received from fw
 782 * @imr_size: imr dram size received from fw
 783 * @sram_addr: sram address from debug tlv
 784 * @sram_size: sram size from debug tlv
 785 * @imr2sram_remainbyte: size remained after each dma transfer
 786 * @imr_curr_addr: current dst address used during dma transfer
 787 * @imr_base_addr: imr address received from fw
 788 */
 789struct iwl_imr_data {
 790	u32 imr_enable;
 791	u32 imr_size;
 792	u32 sram_addr;
 793	u32 sram_size;
 794	u32 imr2sram_remainbyte;
 795	u64 imr_curr_addr;
 796	__le64 imr_base_addr;
 797};
 798
 799#define IWL_TRANS_CURRENT_PC_NAME_MAX_BYTES      32
 800
 801/**
 802 * struct iwl_pc_data - program counter details
 803 * @pc_name: cpu name
 804 * @pc_address: cpu program counter
 805 */
 806struct iwl_pc_data {
 807	u8  pc_name[IWL_TRANS_CURRENT_PC_NAME_MAX_BYTES];
 808	u32 pc_address;
 809};
 810
 811/**
 812 * struct iwl_trans_debug - transport debug related data
 813 *
 814 * @n_dest_reg: num of reg_ops in %dbg_dest_tlv
 815 * @rec_on: true iff there is a fw debug recording currently active
 816 * @dest_tlv: points to the destination TLV for debug
 817 * @conf_tlv: array of pointers to configuration TLVs for debug
 818 * @trigger_tlv: array of pointers to triggers TLVs for debug
 819 * @lmac_error_event_table: addrs of lmacs error tables
 820 * @umac_error_event_table: addr of umac error table
 821 * @tcm_error_event_table: address(es) of TCM error table(s)
 822 * @rcm_error_event_table: address(es) of RCM error table(s)
 823 * @error_event_table_tlv_status: bitmap that indicates what error table
 824 *	pointers was recevied via TLV. uses enum &iwl_error_event_table_status
 825 * @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state
 826 * @external_ini_cfg: external debug cfg state. Uses &enum iwl_ini_cfg_state
 827 * @fw_mon_cfg: debug buffer allocation configuration
 828 * @fw_mon_ini: DRAM buffer fragments per allocation id
 829 * @fw_mon: DRAM buffer for firmware monitor
 830 * @hw_error: equals true if hw error interrupt was received from the FW
 831 * @ini_dest: debug monitor destination uses &enum iwl_fw_ini_buffer_location
 832 * @unsupported_region_msk: unsupported regions out of active_regions
 833 * @active_regions: active regions
 834 * @debug_info_tlv_list: list of debug info TLVs
 835 * @time_point: array of debug time points
 836 * @periodic_trig_list: periodic triggers list
 837 * @domains_bitmap: bitmap of active domains other than &IWL_FW_INI_DOMAIN_ALWAYS_ON
 838 * @ucode_preset: preset based on ucode
 839 * @restart_required: indicates debug restart is required
 840 * @last_tp_resetfw: last handling of reset during debug timepoint
 841 * @imr_data: IMR debug data allocation
 842 * @dump_file_name_ext: dump file name extension
 843 * @dump_file_name_ext_valid: dump file name extension if valid or not
 844 * @num_pc: number of program counter for cpu
 845 * @pc_data: details of the program counter
 846 * @yoyo_bin_loaded: tells if a yoyo debug file has been loaded
 847 */
 848struct iwl_trans_debug {
 849	u8 n_dest_reg;
 850	bool rec_on;
 851
 852	const struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv;
 853	const struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX];
 854	struct iwl_fw_dbg_trigger_tlv * const *trigger_tlv;
 855
 856	u32 lmac_error_event_table[2];
 857	u32 umac_error_event_table;
 858	u32 tcm_error_event_table[2];
 859	u32 rcm_error_event_table[2];
 860	unsigned int error_event_table_tlv_status;
 861
 862	enum iwl_ini_cfg_state internal_ini_cfg;
 863	enum iwl_ini_cfg_state external_ini_cfg;
 864
 865	struct iwl_fw_ini_allocation_tlv fw_mon_cfg[IWL_FW_INI_ALLOCATION_NUM];
 866	struct iwl_fw_mon fw_mon_ini[IWL_FW_INI_ALLOCATION_NUM];
 867
 868	struct iwl_dram_data fw_mon;
 869
 870	bool hw_error;
 871	enum iwl_fw_ini_buffer_location ini_dest;
 872
 873	u64 unsupported_region_msk;
 874	struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID];
 875	struct list_head debug_info_tlv_list;
 876	struct iwl_dbg_tlv_time_point_data time_point[IWL_FW_INI_TIME_POINT_NUM];
 877	struct list_head periodic_trig_list;
 878
 879	u32 domains_bitmap;
 880	u32 ucode_preset;
 881	bool restart_required;
 882	u32 last_tp_resetfw;
 883	struct iwl_imr_data imr_data;
 884	u8 dump_file_name_ext[IWL_FW_INI_MAX_NAME];
 885	bool dump_file_name_ext_valid;
 886	u32 num_pc;
 887	struct iwl_pc_data *pc_data;
 888	bool yoyo_bin_loaded;
 889};
 890
 891struct iwl_dma_ptr {
 892	dma_addr_t dma;
 893	void *addr;
 894	size_t size;
 895};
 896
 897struct iwl_cmd_meta {
 898	/* only for SYNC commands, iff the reply skb is wanted */
 899	struct iwl_host_cmd *source;
 900	u32 flags;
 901	u32 tbs;
 902};
 903
 904/*
 905 * The FH will write back to the first TB only, so we need to copy some data
 906 * into the buffer regardless of whether it should be mapped or not.
 907 * This indicates how big the first TB must be to include the scratch buffer
 908 * and the assigned PN.
 909 * Since PN location is 8 bytes at offset 12, it's 20 now.
 910 * If we make it bigger then allocations will be bigger and copy slower, so
 911 * that's probably not useful.
 912 */
 913#define IWL_FIRST_TB_SIZE	20
 914#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
 915
 916struct iwl_pcie_txq_entry {
 917	void *cmd;
 918	struct sk_buff *skb;
 919	/* buffer to free after command completes */
 920	const void *free_buf;
 921	struct iwl_cmd_meta meta;
 922};
 923
 924struct iwl_pcie_first_tb_buf {
 925	u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
 926};
 927
 928/**
 929 * struct iwl_txq - Tx Queue for DMA
 930 * @tfds: transmit frame descriptors (DMA memory)
 931 * @first_tb_bufs: start of command headers, including scratch buffers, for
 932 *	the writeback -- this is DMA memory and an array holding one buffer
 933 *	for each command on the queue
 934 * @first_tb_dma: DMA address for the first_tb_bufs start
 935 * @entries: transmit entries (driver state)
 936 * @lock: queue lock
 937 * @stuck_timer: timer that fires if queue gets stuck
 938 * @trans: pointer back to transport (for timer)
 939 * @need_update: indicates need to update read/write index
 940 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
 941 * @wd_timeout: queue watchdog timeout (jiffies) - per queue
 942 * @frozen: tx stuck queue timer is frozen
 943 * @frozen_expiry_remainder: remember how long until the timer fires
 944 * @block: queue is blocked
 945 * @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
 946 * @write_ptr: 1-st empty entry (index) host_w
 947 * @read_ptr: last used entry (index) host_r
 948 * @dma_addr:  physical addr for BD's
 949 * @n_window: safe queue window
 950 * @id: queue id
 951 * @low_mark: low watermark, resume queue if free space more than this
 952 * @high_mark: high watermark, stop queue if free space less than this
 953 * @overflow_q: overflow queue for handling frames that didn't fit on HW queue
 954 * @overflow_tx: need to transmit from overflow
 955 *
 956 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
 957 * descriptors) and required locking structures.
 958 *
 959 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
 960 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
 961 * there might be HW changes in the future). For the normal TX
 962 * queues, n_window, which is the size of the software queue data
 963 * is also 256; however, for the command queue, n_window is only
 964 * 32 since we don't need so many commands pending. Since the HW
 965 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
 966 * This means that we end up with the following:
 967 *  HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
 968 *  SW entries:           | 0      | ... | 31          |
 969 * where N is a number between 0 and 7. This means that the SW
 970 * data is a window overlayed over the HW queue.
 971 */
 972struct iwl_txq {
 973	void *tfds;
 974	struct iwl_pcie_first_tb_buf *first_tb_bufs;
 975	dma_addr_t first_tb_dma;
 976	struct iwl_pcie_txq_entry *entries;
 977	/* lock for syncing changes on the queue */
 978	spinlock_t lock;
 979	unsigned long frozen_expiry_remainder;
 980	struct timer_list stuck_timer;
 981	struct iwl_trans *trans;
 982	bool need_update;
 983	bool frozen;
 984	bool ampdu;
 985	int block;
 986	unsigned long wd_timeout;
 987	struct sk_buff_head overflow_q;
 988	struct iwl_dma_ptr bc_tbl;
 989
 990	int write_ptr;
 991	int read_ptr;
 992	dma_addr_t dma_addr;
 993	int n_window;
 994	u32 id;
 995	int low_mark;
 996	int high_mark;
 997
 998	bool overflow_tx;
 999};
1000
1001/**
1002 * struct iwl_trans_txqs - transport tx queues data
1003 *
1004 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
1005 * @page_offs: offset from skb->cb to mac header page pointer
1006 * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
1007 * @queue_used: bit mask of used queues
1008 * @queue_stopped: bit mask of stopped queues
1009 * @txq: array of TXQ data structures representing the TXQs
1010 * @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
1011 * @queue_alloc_cmd_ver: queue allocation command version
1012 * @bc_pool: bytecount DMA allocations pool
1013 * @bc_tbl_size: bytecount table size
1014 * @tso_hdr_page: page allocated (per CPU) for A-MSDU headers when doing TSO
1015 *	(and similar usage)
1016 * @tfd: TFD data
1017 * @tfd.max_tbs: max number of buffers per TFD
1018 * @tfd.size: TFD size
1019 * @tfd.addr_size: TFD/TB address size
1020 */
1021struct iwl_trans_txqs {
1022	unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
1023	unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
1024	struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
1025	struct dma_pool *bc_pool;
1026	size_t bc_tbl_size;
1027	bool bc_table_dword;
1028	u8 page_offs;
1029	u8 dev_cmd_offs;
1030	struct iwl_tso_hdr_page __percpu *tso_hdr_page;
1031
1032	struct {
1033		u8 fifo;
1034		u8 q_id;
1035		unsigned int wdg_timeout;
1036	} cmd;
1037
1038	struct {
1039		u8 max_tbs;
1040		u16 size;
1041		u8 addr_size;
1042	} tfd;
1043
1044	struct iwl_dma_ptr scd_bc_tbls;
1045
1046	u8 queue_alloc_cmd_ver;
1047};
1048
1049/**
1050 * struct iwl_trans - transport common data
1051 *
1052 * @csme_own: true if we couldn't get ownership on the device
1053 * @ops: pointer to iwl_trans_ops
1054 * @op_mode: pointer to the op_mode
1055 * @trans_cfg: the trans-specific configuration part
1056 * @cfg: pointer to the configuration
1057 * @drv: pointer to iwl_drv
1058 * @state: current device state
1059 * @status: a bit-mask of transport status flags
1060 * @dev: pointer to struct device * that represents the device
1061 * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
1062 *	0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
1063 * @hw_rf_id: a u32 with the device RF ID
1064 * @hw_cnv_id: a u32 with the device CNV ID
1065 * @hw_crf_id: a u32 with the device CRF ID
1066 * @hw_wfpm_id: a u32 with the device wfpm ID
1067 * @hw_id: a u32 with the ID of the device / sub-device.
1068 *	Set during transport allocation.
1069 * @hw_id_str: a string with info about HW ID. Set during transport allocation.
1070 * @sku_id: the SKU identifier (for PNVM matching)
1071 * @pnvm_loaded: indicates PNVM was loaded
1072 * @hw_rev: the revision data of the HW
1073 * @hw_rev_step: The mac step of the HW
1074 * @pm_support: set to true in start_hw if link pm is supported
1075 * @ltr_enabled: set to true if the LTR is enabled
1076 * @fail_to_parse_pnvm_image: set to true if pnvm parsing failed
1077 * @reduce_power_loaded: indicates reduced power section was loaded
1078 * @failed_to_load_reduce_power_image: set to true if pnvm loading failed
1079 * @command_groups: pointer to command group name list array
1080 * @command_groups_size: array size of @command_groups
1081 * @wide_cmd_header: true when ucode supports wide command header format
1082 * @wait_command_queue: wait queue for sync commands
1083 * @num_rx_queues: number of RX queues allocated by the transport;
1084 *	the transport must set this before calling iwl_drv_start()
1085 * @iml_len: the length of the image loader
1086 * @iml: a pointer to the image loader itself
1087 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
1088 *	The user should use iwl_trans_{alloc,free}_tx_cmd.
1089 * @dev_cmd_pool_name: name for the TX command allocation pool
1090 * @dbgfs_dir: iwlwifi debugfs base dir for this device
1091 * @sync_cmd_lockdep_map: lockdep map for checking sync commands
1092 * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
1093 *	starting the firmware, used for tracing
1094 * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
1095 *	start of the 802.11 header in the @rx_mpdu_cmd
1096 * @dbg: additional debug data, see &struct iwl_trans_debug
1097 * @init_dram: FW initialization DMA data
 
 
 
 
 
 
 
 
 
1098 * @system_pm_mode: the system-wide power management mode in use.
1099 *	This mode is set dynamically, depending on the WoWLAN values
1100 *	configured from the userspace at runtime.
1101 * @name: the device name
1102 * @txqs: transport tx queues data.
1103 * @mbx_addr_0_step: step address data 0
1104 * @mbx_addr_1_step: step address data 1
1105 * @pcie_link_speed: current PCIe link speed (%PCI_EXP_LNKSTA_CLS_*),
1106 *	only valid for discrete (not integrated) NICs
1107 * @invalid_tx_cmd: invalid TX command buffer
1108 * @reduced_cap_sku: reduced capability supported SKU
1109 * @no_160: device not supporting 160 MHz
1110 * @step_urm: STEP is in URM, no support for MCS>9 in 320 MHz
1111 * @trans_specific: data for the specific transport this is allocated for/with
1112 */
1113struct iwl_trans {
1114	bool csme_own;
1115	const struct iwl_trans_ops *ops;
1116	struct iwl_op_mode *op_mode;
1117	const struct iwl_cfg_trans_params *trans_cfg;
1118	const struct iwl_cfg *cfg;
1119	struct iwl_drv *drv;
1120	enum iwl_trans_state state;
1121	unsigned long status;
1122
1123	struct device *dev;
1124	u32 max_skb_frags;
1125	u32 hw_rev;
1126	u32 hw_rev_step;
1127	u32 hw_rf_id;
1128	u32 hw_crf_id;
1129	u32 hw_cnv_id;
1130	u32 hw_wfpm_id;
1131	u32 hw_id;
1132	char hw_id_str[52];
1133	u32 sku_id[3];
1134	bool reduced_cap_sku;
1135	u8 no_160:1, step_urm:1;
1136
1137	u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
1138
1139	bool pm_support;
1140	bool ltr_enabled;
1141	u8 pnvm_loaded:1;
1142	u8 fail_to_parse_pnvm_image:1;
1143	u8 reduce_power_loaded:1;
1144	u8 failed_to_load_reduce_power_image:1;
1145
1146	const struct iwl_hcmd_arr *command_groups;
1147	int command_groups_size;
1148	bool wide_cmd_header;
1149
1150	wait_queue_head_t wait_command_queue;
1151	u8 num_rx_queues;
1152
1153	size_t iml_len;
1154	u8 *iml;
1155
1156	/* The following fields are internal only */
1157	struct kmem_cache *dev_cmd_pool;
 
1158	char dev_cmd_pool_name[50];
1159
1160	struct dentry *dbgfs_dir;
1161
1162#ifdef CONFIG_LOCKDEP
1163	struct lockdep_map sync_cmd_lockdep_map;
1164#endif
1165
1166	struct iwl_trans_debug dbg;
1167	struct iwl_self_init_dram init_dram;
1168
1169	enum iwl_plat_pm_mode system_pm_mode;
1170
1171	const char *name;
1172	struct iwl_trans_txqs txqs;
1173	u32 mbx_addr_0_step;
1174	u32 mbx_addr_1_step;
1175
1176	u8 pcie_link_speed;
 
 
 
 
 
 
1177
1178	struct iwl_dma_ptr invalid_tx_cmd;
 
 
1179
1180	/* pointer to trans specific struct */
1181	/*Ensure that this pointer will always be aligned to sizeof pointer */
1182	char trans_specific[] __aligned(sizeof(void *));
1183};
1184
1185const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
1186int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
1187
1188static inline void iwl_trans_configure(struct iwl_trans *trans,
1189				       const struct iwl_trans_config *trans_cfg)
1190{
1191	trans->op_mode = trans_cfg->op_mode;
1192
1193	trans->ops->configure(trans, trans_cfg);
1194	WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
1195}
1196
1197static inline int iwl_trans_start_hw(struct iwl_trans *trans)
1198{
1199	might_sleep();
1200
1201	return trans->ops->start_hw(trans);
 
 
 
 
 
1202}
1203
1204static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
1205{
1206	might_sleep();
1207
1208	if (trans->ops->op_mode_leave)
1209		trans->ops->op_mode_leave(trans);
1210
1211	trans->op_mode = NULL;
1212
1213	trans->state = IWL_TRANS_NO_FW;
1214}
1215
1216static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1217{
1218	might_sleep();
1219
1220	trans->state = IWL_TRANS_FW_ALIVE;
1221
1222	trans->ops->fw_alive(trans, scd_addr);
1223}
1224
1225static inline int iwl_trans_start_fw(struct iwl_trans *trans,
1226				     const struct fw_img *fw,
1227				     bool run_in_rfkill)
1228{
1229	int ret;
1230
1231	might_sleep();
1232
1233	WARN_ON_ONCE(!trans->rx_mpdu_cmd);
1234
1235	clear_bit(STATUS_FW_ERROR, &trans->status);
1236	ret = trans->ops->start_fw(trans, fw, run_in_rfkill);
1237	if (ret == 0)
1238		trans->state = IWL_TRANS_FW_STARTED;
 
 
 
 
1239
1240	return ret;
 
 
 
1241}
1242
1243static inline void iwl_trans_stop_device(struct iwl_trans *trans)
 
1244{
1245	might_sleep();
1246
1247	trans->ops->stop_device(trans);
1248
1249	trans->state = IWL_TRANS_NO_FW;
1250}
1251
1252static inline int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
1253				       bool reset)
1254{
1255	might_sleep();
1256	if (!trans->ops->d3_suspend)
1257		return -EOPNOTSUPP;
1258
1259	return trans->ops->d3_suspend(trans, test, reset);
 
 
 
 
 
1260}
1261
1262static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
1263				      enum iwl_d3_status *status,
1264				      bool test, bool reset)
1265{
1266	might_sleep();
1267	if (!trans->ops->d3_resume)
1268		return -EOPNOTSUPP;
1269
1270	return trans->ops->d3_resume(trans, status, test, reset);
1271}
1272
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1273static inline struct iwl_trans_dump_data *
1274iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask,
1275		    const struct iwl_dump_sanitize_ops *sanitize_ops,
1276		    void *sanitize_ctx)
1277{
1278	if (!trans->ops->dump_data)
1279		return NULL;
1280	return trans->ops->dump_data(trans, dump_mask,
1281				     sanitize_ops, sanitize_ctx);
1282}
1283
1284static inline struct iwl_device_tx_cmd *
1285iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
1286{
1287	return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
 
 
 
 
 
 
1288}
1289
1290int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
1291
1292static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
1293					 struct iwl_device_tx_cmd *dev_cmd)
1294{
1295	kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
 
 
1296}
1297
1298static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
1299			       struct iwl_device_tx_cmd *dev_cmd, int queue)
1300{
1301	if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
1302		return -EIO;
1303
1304	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1305		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1306		return -EIO;
1307	}
1308
1309	return trans->ops->tx(trans, skb, dev_cmd, queue);
1310}
1311
1312static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
1313				     int ssn, struct sk_buff_head *skbs,
1314				     bool is_flush)
1315{
1316	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1317		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1318		return;
1319	}
1320
1321	trans->ops->reclaim(trans, queue, ssn, skbs, is_flush);
1322}
1323
1324static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue,
1325					int ptr)
1326{
1327	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1328		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1329		return;
1330	}
1331
1332	trans->ops->set_q_ptrs(trans, queue, ptr);
1333}
1334
1335static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
1336					 bool configure_scd)
1337{
1338	trans->ops->txq_disable(trans, queue, configure_scd);
1339}
1340
1341static inline bool
1342iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
1343			 const struct iwl_trans_txq_scd_cfg *cfg,
1344			 unsigned int queue_wdg_timeout)
1345{
1346	might_sleep();
1347
1348	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1349		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1350		return false;
1351	}
1352
1353	return trans->ops->txq_enable(trans, queue, ssn,
1354				      cfg, queue_wdg_timeout);
1355}
1356
1357static inline int
1358iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
1359			   struct iwl_trans_rxq_dma_data *data)
1360{
1361	if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
1362		return -EOPNOTSUPP;
1363
1364	return trans->ops->rxq_dma_data(trans, queue, data);
1365}
1366
1367static inline void
1368iwl_trans_txq_free(struct iwl_trans *trans, int queue)
1369{
1370	if (WARN_ON_ONCE(!trans->ops->txq_free))
1371		return;
1372
1373	trans->ops->txq_free(trans, queue);
1374}
1375
1376static inline int
1377iwl_trans_txq_alloc(struct iwl_trans *trans,
1378		    u32 flags, u32 sta_mask, u8 tid,
1379		    int size, unsigned int wdg_timeout)
1380{
1381	might_sleep();
1382
1383	if (WARN_ON_ONCE(!trans->ops->txq_alloc))
1384		return -EOPNOTSUPP;
1385
1386	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1387		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1388		return -EIO;
1389	}
1390
1391	return trans->ops->txq_alloc(trans, flags, sta_mask, tid,
1392				     size, wdg_timeout);
1393}
1394
1395static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
1396						 int queue, bool shared_mode)
1397{
1398	if (trans->ops->txq_set_shared_mode)
1399		trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
1400}
1401
1402static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
1403					int fifo, int sta_id, int tid,
1404					int frame_limit, u16 ssn,
1405					unsigned int queue_wdg_timeout)
1406{
1407	struct iwl_trans_txq_scd_cfg cfg = {
1408		.fifo = fifo,
1409		.sta_id = sta_id,
1410		.tid = tid,
1411		.frame_limit = frame_limit,
1412		.aggregate = sta_id >= 0,
1413	};
1414
1415	iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
1416}
1417
1418static inline
1419void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
1420			     unsigned int queue_wdg_timeout)
1421{
1422	struct iwl_trans_txq_scd_cfg cfg = {
1423		.fifo = fifo,
1424		.sta_id = -1,
1425		.tid = IWL_MAX_TID_COUNT,
1426		.frame_limit = IWL_FRAME_LIMIT,
1427		.aggregate = false,
1428	};
1429
1430	iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
1431}
1432
1433static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
1434					      unsigned long txqs,
1435					      bool freeze)
1436{
1437	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1438		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1439		return;
1440	}
1441
1442	if (trans->ops->freeze_txq_timer)
1443		trans->ops->freeze_txq_timer(trans, txqs, freeze);
1444}
1445
1446static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
1447						 u32 txqs)
1448{
1449	if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty))
1450		return -EOPNOTSUPP;
1451
1452	/* No need to wait if the firmware is not alive */
1453	if (trans->state != IWL_TRANS_FW_ALIVE) {
1454		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1455		return -EIO;
1456	}
1457
1458	return trans->ops->wait_tx_queues_empty(trans, txqs);
 
1459}
1460
1461static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
 
1462{
1463	if (WARN_ON_ONCE(!trans->ops->wait_txq_empty))
1464		return -EOPNOTSUPP;
1465
1466	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1467		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1468		return -EIO;
1469	}
1470
1471	return trans->ops->wait_txq_empty(trans, queue);
1472}
1473
1474static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1475{
1476	trans->ops->write8(trans, ofs, val);
1477}
1478
1479static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1480{
1481	trans->ops->write32(trans, ofs, val);
1482}
1483
1484static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
1485{
1486	return trans->ops->read32(trans, ofs);
1487}
1488
1489static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
1490{
1491	return trans->ops->read_prph(trans, ofs);
1492}
1493
1494static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
1495					u32 val)
1496{
1497	return trans->ops->write_prph(trans, ofs, val);
1498}
1499
1500static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
1501				     void *buf, int dwords)
1502{
1503	return trans->ops->read_mem(trans, addr, buf, dwords);
1504}
1505
1506#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize)		      \
1507	do {								      \
1508		if (__builtin_constant_p(bufsize))			      \
1509			BUILD_BUG_ON((bufsize) % sizeof(u32));		      \
1510		iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
1511	} while (0)
1512
1513static inline int iwl_trans_write_imr_mem(struct iwl_trans *trans,
1514					  u32 dst_addr, u64 src_addr,
1515					  u32 byte_cnt)
1516{
1517	if (trans->ops->imr_dma_data)
1518		return trans->ops->imr_dma_data(trans, dst_addr, src_addr, byte_cnt);
1519	return 0;
1520}
1521
1522static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
1523{
1524	u32 value;
1525
1526	if (iwl_trans_read_mem(trans, addr, &value, 1))
1527		return 0xa5a5a5a5;
1528
1529	return value;
1530}
1531
1532static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
1533				      const void *buf, int dwords)
1534{
1535	return trans->ops->write_mem(trans, addr, buf, dwords);
1536}
1537
1538static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
1539					u32 val)
1540{
1541	return iwl_trans_write_mem(trans, addr, &val, 1);
1542}
1543
1544static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
1545{
1546	if (trans->ops->set_pmi)
1547		trans->ops->set_pmi(trans, state);
1548}
1549
1550static inline int iwl_trans_sw_reset(struct iwl_trans *trans,
1551				     bool retake_ownership)
1552{
1553	if (trans->ops->sw_reset)
1554		return trans->ops->sw_reset(trans, retake_ownership);
1555	return 0;
1556}
1557
1558static inline void
1559iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
1560{
1561	trans->ops->set_bits_mask(trans, reg, mask, value);
1562}
1563
1564#define iwl_trans_grab_nic_access(trans)		\
1565	__cond_lock(nic_access,				\
1566		    likely((trans)->ops->grab_nic_access(trans)))
1567
1568static inline void __releases(nic_access)
1569iwl_trans_release_nic_access(struct iwl_trans *trans)
1570{
1571	trans->ops->release_nic_access(trans);
1572	__release(nic_access);
1573}
1574
1575static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync)
1576{
1577	if (WARN_ON_ONCE(!trans->op_mode))
1578		return;
1579
1580	/* prevent double restarts due to the same erroneous FW */
1581	if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) {
1582		iwl_op_mode_nic_error(trans->op_mode, sync);
1583		trans->state = IWL_TRANS_NO_FW;
1584	}
1585}
1586
1587static inline bool iwl_trans_fw_running(struct iwl_trans *trans)
1588{
1589	return trans->state == IWL_TRANS_FW_ALIVE;
1590}
1591
1592static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
1593{
1594	if (trans->ops->sync_nmi)
1595		trans->ops->sync_nmi(trans);
1596}
1597
1598void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
1599				  u32 sw_err_bit);
1600
1601static inline int iwl_trans_load_pnvm(struct iwl_trans *trans,
1602				      const struct iwl_pnvm_image *pnvm_data,
1603				      const struct iwl_ucode_capabilities *capa)
1604{
1605	return trans->ops->load_pnvm(trans, pnvm_data, capa);
1606}
1607
1608static inline void iwl_trans_set_pnvm(struct iwl_trans *trans,
1609				      const struct iwl_ucode_capabilities *capa)
1610{
1611	if (trans->ops->set_pnvm)
1612		trans->ops->set_pnvm(trans, capa);
1613}
1614
1615static inline int iwl_trans_load_reduce_power
1616				(struct iwl_trans *trans,
1617				 const struct iwl_pnvm_image *payloads,
1618				 const struct iwl_ucode_capabilities *capa)
1619{
1620	return trans->ops->load_reduce_power(trans, payloads, capa);
1621}
1622
1623static inline void
1624iwl_trans_set_reduce_power(struct iwl_trans *trans,
1625			   const struct iwl_ucode_capabilities *capa)
1626{
1627	if (trans->ops->set_reduce_power)
1628		trans->ops->set_reduce_power(trans, capa);
1629}
1630
1631static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
1632{
1633	return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED ||
1634		trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED;
1635}
1636
1637static inline void iwl_trans_interrupts(struct iwl_trans *trans, bool enable)
1638{
1639	if (trans->ops->interrupts)
1640		trans->ops->interrupts(trans, enable);
1641}
1642
1643/*****************************************************
1644 * transport helper functions
1645 *****************************************************/
1646struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
1647			  struct device *dev,
1648			  const struct iwl_trans_ops *ops,
1649			  const struct iwl_cfg_trans_params *cfg_trans);
1650int iwl_trans_init(struct iwl_trans *trans);
1651void iwl_trans_free(struct iwl_trans *trans);
1652
1653static inline bool iwl_trans_is_hw_error_value(u32 val)
1654{
1655	return ((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50);
1656}
1657
1658/*****************************************************
1659* driver (transport) register/unregister functions
1660******************************************************/
1661int __must_check iwl_pci_register_driver(void);
1662void iwl_pci_unregister_driver(void);
1663void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan);
1664
1665#endif /* __iwl_trans_h__ */
v4.6
   1/******************************************************************************
   2 *
   3 * This file is provided under a dual BSD/GPLv2 license.  When using or
   4 * redistributing this file, you may do so under either license.
   5 *
   6 * GPL LICENSE SUMMARY
   7 *
   8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
   9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  10 *
  11 * This program is free software; you can redistribute it and/or modify
  12 * it under the terms of version 2 of the GNU General Public License as
  13 * published by the Free Software Foundation.
  14 *
  15 * This program is distributed in the hope that it will be useful, but
  16 * WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  18 * General Public License for more details.
  19 *
  20 * You should have received a copy of the GNU General Public License
  21 * along with this program; if not, write to the Free Software
  22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  23 * USA
  24 *
  25 * The full GNU General Public License is included in this distribution
  26 * in the file called COPYING.
  27 *
  28 * Contact Information:
  29 *  Intel Linux Wireless <linuxwifi@intel.com>
  30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  31 *
  32 * BSD LICENSE
  33 *
  34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  36 * All rights reserved.
  37 *
  38 * Redistribution and use in source and binary forms, with or without
  39 * modification, are permitted provided that the following conditions
  40 * are met:
  41 *
  42 *  * Redistributions of source code must retain the above copyright
  43 *    notice, this list of conditions and the following disclaimer.
  44 *  * Redistributions in binary form must reproduce the above copyright
  45 *    notice, this list of conditions and the following disclaimer in
  46 *    the documentation and/or other materials provided with the
  47 *    distribution.
  48 *  * Neither the name Intel Corporation nor the names of its
  49 *    contributors may be used to endorse or promote products derived
  50 *    from this software without specific prior written permission.
  51 *
  52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  63 *
  64 *****************************************************************************/
  65#ifndef __iwl_trans_h__
  66#define __iwl_trans_h__
  67
  68#include <linux/ieee80211.h>
  69#include <linux/mm.h> /* for page_address */
  70#include <linux/lockdep.h>
  71#include <linux/kernel.h>
  72
  73#include "iwl-debug.h"
  74#include "iwl-config.h"
  75#include "iwl-fw.h"
  76#include "iwl-op-mode.h"
 
 
 
 
 
  77
  78/**
  79 * DOC: Transport layer - what is it ?
  80 *
  81 * The transport layer is the layer that deals with the HW directly. It provides
  82 * an abstraction of the underlying HW to the upper layer. The transport layer
  83 * doesn't provide any policy, algorithm or anything of this kind, but only
  84 * mechanisms to make the HW do something. It is not completely stateless but
  85 * close to it.
  86 * We will have an implementation for each different supported bus.
  87 */
  88
  89/**
  90 * DOC: Life cycle of the transport layer
  91 *
  92 * The transport layer has a very precise life cycle.
  93 *
  94 *	1) A helper function is called during the module initialization and
  95 *	   registers the bus driver's ops with the transport's alloc function.
  96 *	2) Bus's probe calls to the transport layer's allocation functions.
  97 *	   Of course this function is bus specific.
  98 *	3) This allocation functions will spawn the upper layer which will
  99 *	   register mac80211.
 100 *
 101 *	4) At some point (i.e. mac80211's start call), the op_mode will call
 102 *	   the following sequence:
 103 *	   start_hw
 104 *	   start_fw
 105 *
 106 *	5) Then when finished (or reset):
 107 *	   stop_device
 108 *
 109 *	6) Eventually, the free function will be called.
 110 */
 111
 112/**
 113 * DOC: Host command section
 114 *
 115 * A host command is a command issued by the upper layer to the fw. There are
 116 * several versions of fw that have several APIs. The transport layer is
 117 * completely agnostic to these differences.
 118 * The transport does provide helper functionality (i.e. SYNC / ASYNC mode),
 119 */
 120#define SEQ_TO_QUEUE(s)	(((s) >> 8) & 0x1f)
 121#define QUEUE_TO_SEQ(q)	(((q) & 0x1f) << 8)
 122#define SEQ_TO_INDEX(s)	((s) & 0xff)
 123#define INDEX_TO_SEQ(i)	((i) & 0xff)
 124#define SEQ_RX_FRAME	cpu_to_le16(0x8000)
 125
 126/*
 127 * those functions retrieve specific information from
 128 * the id field in the iwl_host_cmd struct which contains
 129 * the command id, the group id and the version of the command
 130 * and vice versa
 131*/
 132static inline u8 iwl_cmd_opcode(u32 cmdid)
 133{
 134	return cmdid & 0xFF;
 135}
 136
 137static inline u8 iwl_cmd_groupid(u32 cmdid)
 138{
 139	return ((cmdid & 0xFF00) >> 8);
 140}
 141
 142static inline u8 iwl_cmd_version(u32 cmdid)
 143{
 144	return ((cmdid & 0xFF0000) >> 16);
 145}
 146
 147static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version)
 148{
 149	return opcode + (groupid << 8) + (version << 16);
 150}
 151
 152/* make u16 wide id out of u8 group and opcode */
 153#define WIDE_ID(grp, opcode) ((grp << 8) | opcode)
 154
 155/* due to the conversion, this group is special; new groups
 156 * should be defined in the appropriate fw-api header files
 157 */
 158#define IWL_ALWAYS_LONG_GROUP	1
 159
 160/**
 161 * struct iwl_cmd_header
 162 *
 163 * This header format appears in the beginning of each command sent from the
 164 * driver, and each response/notification received from uCode.
 165 */
 166struct iwl_cmd_header {
 167	u8 cmd;		/* Command ID:  REPLY_RXON, etc. */
 168	u8 group_id;
 169	/*
 170	 * The driver sets up the sequence number to values of its choosing.
 171	 * uCode does not use this value, but passes it back to the driver
 172	 * when sending the response to each driver-originated command, so
 173	 * the driver can match the response to the command.  Since the values
 174	 * don't get used by uCode, the driver may set up an arbitrary format.
 175	 *
 176	 * There is one exception:  uCode sets bit 15 when it originates
 177	 * the response/notification, i.e. when the response/notification
 178	 * is not a direct response to a command sent by the driver.  For
 179	 * example, uCode issues REPLY_RX when it sends a received frame
 180	 * to the driver; it is not a direct response to any driver command.
 181	 *
 182	 * The Linux driver uses the following format:
 183	 *
 184	 *  0:7		tfd index - position within TX queue
 185	 *  8:12	TX queue id
 186	 *  13:14	reserved
 187	 *  15		unsolicited RX or uCode-originated notification
 188	 */
 189	__le16 sequence;
 190} __packed;
 191
 192/**
 193 * struct iwl_cmd_header_wide
 194 *
 195 * This header format appears in the beginning of each command sent from the
 196 * driver, and each response/notification received from uCode.
 197 * this is the wide version that contains more information about the command
 198 * like length, version and command type
 199 */
 200struct iwl_cmd_header_wide {
 201	u8 cmd;
 202	u8 group_id;
 203	__le16 sequence;
 204	__le16 length;
 205	u8 reserved;
 206	u8 version;
 207} __packed;
 208
 209#define FH_RSCSR_FRAME_SIZE_MSK		0x00003FFF	/* bits 0-13 */
 210#define FH_RSCSR_FRAME_INVALID		0x55550000
 211#define FH_RSCSR_FRAME_ALIGN		0x40
 
 
 
 
 212
 213struct iwl_rx_packet {
 214	/*
 215	 * The first 4 bytes of the RX frame header contain both the RX frame
 216	 * size and some flags.
 217	 * Bit fields:
 218	 * 31:    flag flush RB request
 219	 * 30:    flag ignore TC (terminal counter) request
 220	 * 29:    flag fast IRQ request
 221	 * 28-14: Reserved
 
 
 
 
 
 
 
 222	 * 13-00: RX frame size
 223	 */
 224	__le32 len_n_flags;
 225	struct iwl_cmd_header hdr;
 226	u8 data[];
 227} __packed;
 228
 229static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
 230{
 231	return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
 232}
 233
 234static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
 235{
 236	return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
 237}
 238
 239/**
 240 * enum CMD_MODE - how to send the host commands ?
 241 *
 242 * @CMD_ASYNC: Return right away and don't wait for the response
 243 * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
 244 *	the response. The caller needs to call iwl_free_resp when done.
 245 * @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the
 246 *	command queue, but after other high priority commands. Valid only
 247 *	with CMD_ASYNC.
 248 * @CMD_SEND_IN_IDLE: The command should be sent even when the trans is idle.
 249 * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle.
 250 * @CMD_WAKE_UP_TRANS: The command response should wake up the trans
 251 *	(i.e. mark it as non-idle).
 252 * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be
 253 *	called after this command completes. Valid only with CMD_ASYNC.
 254 * @CMD_TB_BITMAP_POS: Position of the first bit for the TB bitmap. We need to
 255 *	check that we leave enough room for the TBs bitmap which needs 20 bits.
 256 */
 257enum CMD_MODE {
 258	CMD_ASYNC		= BIT(0),
 259	CMD_WANT_SKB		= BIT(1),
 260	CMD_SEND_IN_RFKILL	= BIT(2),
 261	CMD_HIGH_PRIO		= BIT(3),
 262	CMD_SEND_IN_IDLE	= BIT(4),
 263	CMD_MAKE_TRANS_IDLE	= BIT(5),
 264	CMD_WAKE_UP_TRANS	= BIT(6),
 265	CMD_WANT_ASYNC_CALLBACK	= BIT(7),
 266
 267	CMD_TB_BITMAP_POS	= 11,
 268};
 269
 270#define DEF_CMD_PAYLOAD_SIZE 320
 271
 272/**
 273 * struct iwl_device_cmd
 274 *
 275 * For allocation of the command and tx queues, this establishes the overall
 276 * size of the largest command we send to uCode, except for commands that
 277 * aren't fully copied and use other TFD space.
 278 */
 279struct iwl_device_cmd {
 280	union {
 281		struct {
 282			struct iwl_cmd_header hdr;	/* uCode API */
 283			u8 payload[DEF_CMD_PAYLOAD_SIZE];
 284		};
 285		struct {
 286			struct iwl_cmd_header_wide hdr_wide;
 287			u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
 288					sizeof(struct iwl_cmd_header_wide) +
 289					sizeof(struct iwl_cmd_header)];
 290		};
 291	};
 292} __packed;
 293
 
 
 
 
 
 
 
 
 
 
 
 
 294#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
 295
 296/*
 297 * number of transfer buffers (fragments) per transmit frame descriptor;
 298 * this is just the driver's idea, the hardware supports 20
 299 */
 300#define IWL_MAX_CMD_TBS_PER_TFD	2
 301
 
 
 
 
 
 
 302/**
 303 * struct iwl_hcmd_dataflag - flag for each one of the chunks of the command
 304 *
 305 * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
 306 *	ring. The transport layer doesn't map the command's buffer to DMA, but
 307 *	rather copies it to a previously allocated DMA buffer. This flag tells
 308 *	the transport layer not to copy the command, but to map the existing
 309 *	buffer (that is passed in) instead. This saves the memcpy and allows
 310 *	commands that are bigger than the fixed buffer to be submitted.
 311 *	Note that a TFD entry after a NOCOPY one cannot be a normal copied one.
 312 * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
 313 *	chunk internally and free it again after the command completes. This
 314 *	can (currently) be used only once per command.
 315 *	Note that a TFD entry after a DUP one cannot be a normal copied one.
 316 */
 317enum iwl_hcmd_dataflag {
 318	IWL_HCMD_DFL_NOCOPY	= BIT(0),
 319	IWL_HCMD_DFL_DUP	= BIT(1),
 320};
 321
 
 
 
 
 
 
 
 
 
 
 322/**
 323 * struct iwl_host_cmd - Host command to the uCode
 324 *
 325 * @data: array of chunks that composes the data of the host command
 326 * @resp_pkt: response packet, if %CMD_WANT_SKB was set
 327 * @_rx_page_order: (internally used to free response packet)
 328 * @_rx_page_addr: (internally used to free response packet)
 329 * @flags: can be CMD_*
 330 * @len: array of the lengths of the chunks in data
 331 * @dataflags: IWL_HCMD_DFL_*
 332 * @id: command id of the host command, for wide commands encoding the
 333 *	version and group as well
 334 */
 335struct iwl_host_cmd {
 336	const void *data[IWL_MAX_CMD_TBS_PER_TFD];
 337	struct iwl_rx_packet *resp_pkt;
 338	unsigned long _rx_page_addr;
 339	u32 _rx_page_order;
 340
 341	u32 flags;
 342	u32 id;
 343	u16 len[IWL_MAX_CMD_TBS_PER_TFD];
 344	u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
 345};
 346
 347static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
 348{
 349	free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
 350}
 351
 352struct iwl_rx_cmd_buffer {
 353	struct page *_page;
 354	int _offset;
 355	bool _page_stolen;
 356	u32 _rx_page_order;
 357	unsigned int truesize;
 358};
 359
 360static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
 361{
 362	return (void *)((unsigned long)page_address(r->_page) + r->_offset);
 363}
 364
 365static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
 366{
 367	return r->_offset;
 368}
 369
 370static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
 371{
 372	r->_page_stolen = true;
 373	get_page(r->_page);
 374	return r->_page;
 375}
 376
 377static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
 378{
 379	__free_pages(r->_page, r->_rx_page_order);
 380}
 381
 382#define MAX_NO_RECLAIM_CMDS	6
 383
 384/*
 385 * The first entry in driver_data array in ieee80211_tx_info
 386 * that can be used by the transport.
 387 */
 388#define IWL_TRANS_FIRST_DRIVER_DATA 2
 389#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
 390
 391/*
 392 * Maximum number of HW queues the transport layer
 393 * currently supports
 394 */
 395#define IWL_MAX_HW_QUEUES		32
 
 
 396#define IWL_MAX_TID_COUNT	8
 
 397#define IWL_FRAME_LIMIT	64
 398#define IWL_MAX_RX_HW_QUEUES	16
 
 399
 400/**
 401 * enum iwl_wowlan_status - WoWLAN image/device status
 402 * @IWL_D3_STATUS_ALIVE: firmware is still running after resume
 403 * @IWL_D3_STATUS_RESET: device was reset while suspended
 404 */
 405enum iwl_d3_status {
 406	IWL_D3_STATUS_ALIVE,
 407	IWL_D3_STATUS_RESET,
 408};
 409
 410/**
 411 * enum iwl_trans_status: transport status flags
 412 * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed
 413 * @STATUS_DEVICE_ENABLED: APM is enabled
 414 * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
 415 * @STATUS_INT_ENABLED: interrupts are enabled
 416 * @STATUS_RFKILL: the HW RFkill switch is in KILL position
 
 417 * @STATUS_FW_ERROR: the fw is in error state
 418 * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
 419 *	are sent
 420 * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
 421 * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
 
 
 422 */
 423enum iwl_trans_status {
 424	STATUS_SYNC_HCMD_ACTIVE,
 425	STATUS_DEVICE_ENABLED,
 426	STATUS_TPOWER_PMI,
 427	STATUS_INT_ENABLED,
 428	STATUS_RFKILL,
 
 429	STATUS_FW_ERROR,
 430	STATUS_TRANS_GOING_IDLE,
 431	STATUS_TRANS_IDLE,
 432	STATUS_TRANS_DEAD,
 
 433};
 434
 435static inline int
 436iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
 437{
 438	switch (rb_size) {
 
 
 439	case IWL_AMSDU_4K:
 440		return get_order(4 * 1024);
 441	case IWL_AMSDU_8K:
 442		return get_order(8 * 1024);
 443	case IWL_AMSDU_12K:
 444		return get_order(12 * 1024);
 445	default:
 446		WARN_ON(1);
 447		return -1;
 448	}
 449}
 450
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 451struct iwl_hcmd_names {
 452	u8 cmd_id;
 453	const char *const cmd_name;
 454};
 455
 456#define HCMD_NAME(x)	\
 457	{ .cmd_id = x, .cmd_name = #x }
 458
 459struct iwl_hcmd_arr {
 460	const struct iwl_hcmd_names *arr;
 461	int size;
 462};
 463
 464#define HCMD_ARR(x)	\
 465	{ .arr = x, .size = ARRAY_SIZE(x) }
 466
 467/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 468 * struct iwl_trans_config - transport configuration
 469 *
 470 * @op_mode: pointer to the upper layer.
 471 * @cmd_queue: the index of the command queue.
 472 *	Must be set before start_fw.
 473 * @cmd_fifo: the fifo for host commands
 474 * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue.
 475 * @no_reclaim_cmds: Some devices erroneously don't set the
 476 *	SEQ_RX_FRAME bit on some notifications, this is the
 477 *	list of such notifications to filter. Max length is
 478 *	%MAX_NO_RECLAIM_CMDS.
 479 * @n_no_reclaim_cmds: # of commands in list
 480 * @rx_buf_size: RX buffer size needed for A-MSDUs
 481 *	if unset 4k will be the RX buffer size
 482 * @bc_table_dword: set to true if the BC table expects the byte count to be
 483 *	in DWORD (as opposed to bytes)
 484 * @scd_set_active: should the transport configure the SCD for HCMD queue
 485 * @wide_cmd_header: firmware supports wide host command header
 486 * @sw_csum_tx: transport should compute the TCP checksum
 487 * @command_groups: array of command groups, each member is an array of the
 488 *	commands in the group; for debugging only
 489 * @command_groups_size: number of command groups, to avoid illegal access
 490 * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until
 491 *	we get the ALIVE from the uCode
 
 
 
 
 492 */
 493struct iwl_trans_config {
 494	struct iwl_op_mode *op_mode;
 495
 496	u8 cmd_queue;
 497	u8 cmd_fifo;
 498	unsigned int cmd_q_wdg_timeout;
 499	const u8 *no_reclaim_cmds;
 500	unsigned int n_no_reclaim_cmds;
 501
 502	enum iwl_amsdu_size rx_buf_size;
 503	bool bc_table_dword;
 504	bool scd_set_active;
 505	bool wide_cmd_header;
 506	bool sw_csum_tx;
 507	const struct iwl_hcmd_arr *command_groups;
 508	int command_groups_size;
 509
 510	u32 sdio_adma_addr;
 
 
 511};
 512
 513struct iwl_trans_dump_data {
 514	u32 len;
 515	u8 data[];
 516};
 517
 518struct iwl_trans;
 519
 520struct iwl_trans_txq_scd_cfg {
 521	u8 fifo;
 522	s8 sta_id;
 523	u8 tid;
 524	bool aggregate;
 525	int frame_limit;
 526};
 527
 528/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 529 * struct iwl_trans_ops - transport specific operations
 530 *
 531 * All the handlers MUST be implemented
 532 *
 533 * @start_hw: starts the HW. If low_power is true, the NIC needs to be taken
 534 *	out of a low power state. From that point on, the HW can send
 535 *	interrupts. May sleep.
 536 * @op_mode_leave: Turn off the HW RF kill indication if on
 537 *	May sleep
 538 * @start_fw: allocates and inits all the resources for the transport
 539 *	layer. Also kick a fw image.
 540 *	May sleep
 541 * @fw_alive: called when the fw sends alive notification. If the fw provides
 542 *	the SCD base address in SRAM, then provide it here, or 0 otherwise.
 543 *	May sleep
 544 * @stop_device: stops the whole device (embedded CPU put to reset) and stops
 545 *	the HW. If low_power is true, the NIC will be put in low power state.
 546 *	From that point on, the HW will be stopped but will still issue an
 547 *	interrupt if the HW RF kill switch is triggered.
 548 *	This callback must do the right thing and not crash even if %start_hw()
 549 *	was called but not &start_fw(). May sleep.
 550 * @d3_suspend: put the device into the correct mode for WoWLAN during
 551 *	suspend. This is optional, if not implemented WoWLAN will not be
 552 *	supported. This callback may sleep.
 553 * @d3_resume: resume the device after WoWLAN, enabling the opmode to
 554 *	talk to the WoWLAN image to get its status. This is optional, if not
 555 *	implemented WoWLAN will not be supported. This callback may sleep.
 556 * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
 557 *	If RFkill is asserted in the middle of a SYNC host command, it must
 558 *	return -ERFKILL straight away.
 559 *	May sleep only if CMD_ASYNC is not set
 560 * @tx: send an skb. The transport relies on the op_mode to zero the
 561 *	the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all
 562 *	the CSUM will be taken care of (TCP CSUM and IP header in case of
 563 *	IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP
 564 *	header if it is IPv4.
 565 *	Must be atomic
 566 * @reclaim: free packet until ssn. Returns a list of freed packets.
 567 *	Must be atomic
 
 568 * @txq_enable: setup a queue. To setup an AC queue, use the
 569 *	iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
 570 *	this one. The op_mode must not configure the HCMD queue. The scheduler
 571 *	configuration may be %NULL, in which case the hardware will not be
 572 *	configured. May sleep.
 
 
 573 * @txq_disable: de-configure a Tx queue to send AMPDUs
 574 *	Must be atomic
 575 * @wait_tx_queue_empty: wait until tx queues are empty. May sleep.
 
 
 
 
 576 * @freeze_txq_timer: prevents the timer of the queue from firing until the
 577 *	queue is set to awake. Must be atomic.
 578 * @block_txq_ptrs: stop updating the write pointers of the Tx queues. Note
 579 *	that the transport needs to refcount the calls since this function
 580 *	will be called several times with block = true, and then the queues
 581 *	need to be unblocked only after the same number of calls with
 582 *	block = false.
 583 * @write8: write a u8 to a register at offset ofs from the BAR
 584 * @write32: write a u32 to a register at offset ofs from the BAR
 585 * @read32: read a u32 register at offset ofs from the BAR
 586 * @read_prph: read a DWORD from a periphery register
 587 * @write_prph: write a DWORD to a periphery register
 588 * @read_mem: read device's SRAM in DWORD
 589 * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory
 590 *	will be zeroed.
 
 
 591 * @configure: configure parameters required by the transport layer from
 592 *	the op_mode. May be called several times before start_fw, can't be
 593 *	called after that.
 594 * @set_pmi: set the power pmi state
 
 595 * @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
 596 *	Sleeping is not allowed between grab_nic_access and
 597 *	release_nic_access.
 598 * @release_nic_access: let the NIC go to sleep. The "flags" parameter
 599 *	must be the same one that was sent before to the grab_nic_access.
 600 * @set_bits_mask - set SRAM register according to value and mask.
 601 * @ref: grab a reference to the transport/FW layers, disallowing
 602 *	certain low power states
 603 * @unref: release a reference previously taken with @ref. Note that
 604 *	initially the reference count is 1, making an initial @unref
 605 *	necessary to allow low power states.
 606 * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
 607 *	TX'ed commands and similar. The buffer will be vfree'd by the caller.
 608 *	Note that the transport must fill in the proper file headers.
 
 
 
 
 
 
 
 
 
 
 
 609 */
 610struct iwl_trans_ops {
 611
 612	int (*start_hw)(struct iwl_trans *iwl_trans, bool low_power);
 613	void (*op_mode_leave)(struct iwl_trans *iwl_trans);
 614	int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
 615			bool run_in_rfkill);
 616	int (*update_sf)(struct iwl_trans *trans,
 617			 struct iwl_sf_region *st_fwrd_space);
 618	void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
 619	void (*stop_device)(struct iwl_trans *trans, bool low_power);
 620
 621	void (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
 622	int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
 623			 bool test, bool reset);
 624
 625	int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
 626
 627	int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
 628		  struct iwl_device_cmd *dev_cmd, int queue);
 629	void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
 630			struct sk_buff_head *skbs);
 
 
 631
 632	void (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
 633			   const struct iwl_trans_txq_scd_cfg *cfg,
 634			   unsigned int queue_wdg_timeout);
 635	void (*txq_disable)(struct iwl_trans *trans, int queue,
 636			    bool configure_scd);
 
 
 
 
 
 
 
 637
 638	int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
 
 
 
 
 639	void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
 640				 bool freeze);
 641	void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
 642
 643	void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
 644	void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
 645	u32 (*read32)(struct iwl_trans *trans, u32 ofs);
 646	u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
 647	void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
 648	int (*read_mem)(struct iwl_trans *trans, u32 addr,
 649			void *buf, int dwords);
 650	int (*write_mem)(struct iwl_trans *trans, u32 addr,
 651			 const void *buf, int dwords);
 
 652	void (*configure)(struct iwl_trans *trans,
 653			  const struct iwl_trans_config *trans_cfg);
 654	void (*set_pmi)(struct iwl_trans *trans, bool state);
 655	bool (*grab_nic_access)(struct iwl_trans *trans, unsigned long *flags);
 656	void (*release_nic_access)(struct iwl_trans *trans,
 657				   unsigned long *flags);
 658	void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
 659			      u32 value);
 660	void (*ref)(struct iwl_trans *trans);
 661	void (*unref)(struct iwl_trans *trans);
 662	int  (*suspend)(struct iwl_trans *trans);
 663	void (*resume)(struct iwl_trans *trans);
 664
 665	struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
 666						 const struct iwl_fw_dbg_trigger_tlv
 667						 *trigger);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 668};
 669
 670/**
 671 * enum iwl_trans_state - state of the transport layer
 672 *
 673 * @IWL_TRANS_NO_FW: no fw has sent an alive response
 674 * @IWL_TRANS_FW_ALIVE: a fw has sent an alive response
 
 675 */
 676enum iwl_trans_state {
 677	IWL_TRANS_NO_FW = 0,
 678	IWL_TRANS_FW_ALIVE	= 1,
 
 679};
 680
 681/**
 682 * DOC: Platform power management
 683 *
 684 * There are two types of platform power management: system-wide
 685 * (WoWLAN) and runtime.
 686 *
 687 * In system-wide power management the entire platform goes into a low
 688 * power state (e.g. idle or suspend to RAM) at the same time and the
 689 * device is configured as a wakeup source for the entire platform.
 690 * This is usually triggered by userspace activity (e.g. the user
 691 * presses the suspend button or a power management daemon decides to
 692 * put the platform in low power mode).  The device's behavior in this
 693 * mode is dictated by the wake-on-WLAN configuration.
 694 *
 695 * In runtime power management, only the devices which are themselves
 696 * idle enter a low power state.  This is done at runtime, which means
 697 * that the entire system is still running normally.  This mode is
 698 * usually triggered automatically by the device driver and requires
 699 * the ability to enter and exit the low power modes in a very short
 700 * time, so there is not much impact in usability.
 701 *
 702 * The terms used for the device's behavior are as follows:
 703 *
 704 *	- D0: the device is fully powered and the host is awake;
 705 *	- D3: the device is in low power mode and only reacts to
 706 *		specific events (e.g. magic-packet received or scan
 707 *		results found);
 708 *	- D0I3: the device is in low power mode and reacts to any
 709 *		activity (e.g. RX);
 710 *
 711 * These terms reflect the power modes in the firmware and are not to
 712 * be confused with the physical device power state.  The NIC can be
 713 * in D0I3 mode even if, for instance, the PCI device is in D3 state.
 714 */
 715
 716/**
 717 * enum iwl_plat_pm_mode - platform power management mode
 718 *
 719 * This enumeration describes the device's platform power management
 720 * behavior when in idle mode (i.e. runtime power management) or when
 721 * in system-wide suspend (i.e WoWLAN).
 722 *
 723 * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this
 724 *	device.  At runtime, this means that nothing happens and the
 725 *	device always remains in active.  In system-wide suspend mode,
 726 *	it means that the all connections will be closed automatically
 727 *	by mac80211 before the platform is suspended.
 728 * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN).
 729 *	For runtime power management, this mode is not officially
 730 *	supported.
 731 * @IWL_PLAT_PM_MODE_D0I3: the device goes into D0I3 mode.
 732 */
 733enum iwl_plat_pm_mode {
 734	IWL_PLAT_PM_MODE_DISABLED,
 735	IWL_PLAT_PM_MODE_D3,
 736	IWL_PLAT_PM_MODE_D0I3,
 737};
 738
 739/* Max time to wait for trans to become idle/non-idle on d0i3
 740 * enter/exit (in msecs).
 741 */
 742#define IWL_TRANS_IDLE_TIMEOUT 2000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 743
 744/**
 745 * struct iwl_trans - transport common data
 746 *
 747 * @ops - pointer to iwl_trans_ops
 748 * @op_mode - pointer to the op_mode
 749 * @cfg - pointer to the configuration
 
 
 
 
 750 * @status: a bit-mask of transport status flags
 751 * @dev - pointer to struct device * that represents the device
 752 * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
 753 *	0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
 
 
 
 
 754 * @hw_id: a u32 with the ID of the device / sub-device.
 755 *	Set during transport allocation.
 756 * @hw_id_str: a string with info about HW ID. Set during transport allocation.
 
 
 
 
 757 * @pm_support: set to true in start_hw if link pm is supported
 758 * @ltr_enabled: set to true if the LTR is enabled
 
 
 
 
 
 
 
 759 * @num_rx_queues: number of RX queues allocated by the transport;
 760 *	the transport must set this before calling iwl_drv_start()
 
 
 761 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
 762 *	The user should use iwl_trans_{alloc,free}_tx_cmd.
 763 * @dev_cmd_headroom: room needed for the transport's private use before the
 764 *	device_cmd for Tx - for internal use only
 765 *	The user should use iwl_trans_{alloc,free}_tx_cmd.
 766 * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
 767 *	starting the firmware, used for tracing
 768 * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
 769 *	start of the 802.11 header in the @rx_mpdu_cmd
 770 * @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
 771 * @dbg_dest_tlv: points to the destination TLV for debug
 772 * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
 773 * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
 774 * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
 775 * @paging_req_addr: The location were the FW will upload / download the pages
 776 *	from. The address is set by the opmode
 777 * @paging_db: Pointer to the opmode paging data base, the pointer is set by
 778 *	the opmode.
 779 * @paging_download_buf: Buffer used for copying all of the pages before
 780 *	downloading them to the FW. The buffer is allocated in the opmode
 781 * @system_pm_mode: the system-wide power management mode in use.
 782 *	This mode is set dynamically, depending on the WoWLAN values
 783 *	configured from the userspace at runtime.
 784 * @runtime_pm_mode: the runtime power management mode in use.  This
 785 *	mode is set during the initialization phase and is not
 786 *	supposed to change during runtime.
 
 
 
 
 
 
 
 
 787 */
 788struct iwl_trans {
 
 789	const struct iwl_trans_ops *ops;
 790	struct iwl_op_mode *op_mode;
 
 791	const struct iwl_cfg *cfg;
 
 792	enum iwl_trans_state state;
 793	unsigned long status;
 794
 795	struct device *dev;
 796	u32 max_skb_frags;
 797	u32 hw_rev;
 
 
 
 
 
 798	u32 hw_id;
 799	char hw_id_str[52];
 
 
 
 800
 801	u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
 802
 803	bool pm_support;
 804	bool ltr_enabled;
 
 
 
 
 805
 806	const struct iwl_hcmd_arr *command_groups;
 807	int command_groups_size;
 
 808
 
 809	u8 num_rx_queues;
 810
 
 
 
 811	/* The following fields are internal only */
 812	struct kmem_cache *dev_cmd_pool;
 813	size_t dev_cmd_headroom;
 814	char dev_cmd_pool_name[50];
 815
 816	struct dentry *dbgfs_dir;
 817
 818#ifdef CONFIG_LOCKDEP
 819	struct lockdep_map sync_cmd_lockdep_map;
 820#endif
 821
 822	u64 dflt_pwr_limit;
 
 
 
 823
 824	const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
 825	const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
 826	struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
 827	u8 dbg_dest_reg_num;
 828
 829	/*
 830	 * Paging parameters - All of the parameters should be set by the
 831	 * opmode when paging is enabled
 832	 */
 833	u32 paging_req_addr;
 834	struct iwl_fw_paging *paging_db;
 835	void *paging_download_buf;
 836
 837	enum iwl_plat_pm_mode system_pm_mode;
 838	enum iwl_plat_pm_mode runtime_pm_mode;
 839	bool suspending;
 840
 841	/* pointer to trans specific struct */
 842	/*Ensure that this pointer will always be aligned to sizeof pointer */
 843	char trans_specific[0] __aligned(sizeof(void *));
 844};
 845
 846const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
 847int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
 848
 849static inline void iwl_trans_configure(struct iwl_trans *trans,
 850				       const struct iwl_trans_config *trans_cfg)
 851{
 852	trans->op_mode = trans_cfg->op_mode;
 853
 854	trans->ops->configure(trans, trans_cfg);
 855	WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
 856}
 857
 858static inline int _iwl_trans_start_hw(struct iwl_trans *trans, bool low_power)
 859{
 860	might_sleep();
 861
 862	return trans->ops->start_hw(trans, low_power);
 863}
 864
 865static inline int iwl_trans_start_hw(struct iwl_trans *trans)
 866{
 867	return trans->ops->start_hw(trans, true);
 868}
 869
 870static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
 871{
 872	might_sleep();
 873
 874	if (trans->ops->op_mode_leave)
 875		trans->ops->op_mode_leave(trans);
 876
 877	trans->op_mode = NULL;
 878
 879	trans->state = IWL_TRANS_NO_FW;
 880}
 881
 882static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
 883{
 884	might_sleep();
 885
 886	trans->state = IWL_TRANS_FW_ALIVE;
 887
 888	trans->ops->fw_alive(trans, scd_addr);
 889}
 890
 891static inline int iwl_trans_start_fw(struct iwl_trans *trans,
 892				     const struct fw_img *fw,
 893				     bool run_in_rfkill)
 894{
 
 
 895	might_sleep();
 896
 897	WARN_ON_ONCE(!trans->rx_mpdu_cmd);
 898
 899	clear_bit(STATUS_FW_ERROR, &trans->status);
 900	return trans->ops->start_fw(trans, fw, run_in_rfkill);
 901}
 902
 903static inline int iwl_trans_update_sf(struct iwl_trans *trans,
 904				      struct iwl_sf_region *st_fwrd_space)
 905{
 906	might_sleep();
 907
 908	if (trans->ops->update_sf)
 909		return trans->ops->update_sf(trans, st_fwrd_space);
 910
 911	return 0;
 912}
 913
 914static inline void _iwl_trans_stop_device(struct iwl_trans *trans,
 915					  bool low_power)
 916{
 917	might_sleep();
 918
 919	trans->ops->stop_device(trans, low_power);
 920
 921	trans->state = IWL_TRANS_NO_FW;
 922}
 923
 924static inline void iwl_trans_stop_device(struct iwl_trans *trans)
 
 925{
 926	_iwl_trans_stop_device(trans, true);
 927}
 
 928
 929static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
 930					bool reset)
 931{
 932	might_sleep();
 933	if (trans->ops->d3_suspend)
 934		trans->ops->d3_suspend(trans, test, reset);
 935}
 936
 937static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
 938				      enum iwl_d3_status *status,
 939				      bool test, bool reset)
 940{
 941	might_sleep();
 942	if (!trans->ops->d3_resume)
 943		return 0;
 944
 945	return trans->ops->d3_resume(trans, status, test, reset);
 946}
 947
 948static inline void iwl_trans_ref(struct iwl_trans *trans)
 949{
 950	if (trans->ops->ref)
 951		trans->ops->ref(trans);
 952}
 953
 954static inline void iwl_trans_unref(struct iwl_trans *trans)
 955{
 956	if (trans->ops->unref)
 957		trans->ops->unref(trans);
 958}
 959
 960static inline int iwl_trans_suspend(struct iwl_trans *trans)
 961{
 962	if (!trans->ops->suspend)
 963		return 0;
 964
 965	return trans->ops->suspend(trans);
 966}
 967
 968static inline void iwl_trans_resume(struct iwl_trans *trans)
 969{
 970	if (trans->ops->resume)
 971		trans->ops->resume(trans);
 972}
 973
 974static inline struct iwl_trans_dump_data *
 975iwl_trans_dump_data(struct iwl_trans *trans,
 976		    const struct iwl_fw_dbg_trigger_tlv *trigger)
 
 977{
 978	if (!trans->ops->dump_data)
 979		return NULL;
 980	return trans->ops->dump_data(trans, trigger);
 
 981}
 982
 983static inline struct iwl_device_cmd *
 984iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
 985{
 986	u8 *dev_cmd_ptr = kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
 987
 988	if (unlikely(dev_cmd_ptr == NULL))
 989		return NULL;
 990
 991	return (struct iwl_device_cmd *)
 992			(dev_cmd_ptr + trans->dev_cmd_headroom);
 993}
 994
 995int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
 996
 997static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
 998					 struct iwl_device_cmd *dev_cmd)
 999{
1000	u8 *dev_cmd_ptr = (u8 *)dev_cmd - trans->dev_cmd_headroom;
1001
1002	kmem_cache_free(trans->dev_cmd_pool, dev_cmd_ptr);
1003}
1004
1005static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
1006			       struct iwl_device_cmd *dev_cmd, int queue)
1007{
1008	if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
1009		return -EIO;
1010
1011	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1012		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1013		return -EIO;
1014	}
1015
1016	return trans->ops->tx(trans, skb, dev_cmd, queue);
1017}
1018
1019static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
1020				     int ssn, struct sk_buff_head *skbs)
 
1021{
1022	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1023		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1024		return;
1025	}
1026
1027	trans->ops->reclaim(trans, queue, ssn, skbs);
 
 
 
 
 
 
 
 
 
 
 
1028}
1029
1030static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
1031					 bool configure_scd)
1032{
1033	trans->ops->txq_disable(trans, queue, configure_scd);
1034}
1035
1036static inline void
1037iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
1038			 const struct iwl_trans_txq_scd_cfg *cfg,
1039			 unsigned int queue_wdg_timeout)
1040{
1041	might_sleep();
1042
1043	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1044		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1045		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1046	}
1047
1048	trans->ops->txq_enable(trans, queue, ssn, cfg, queue_wdg_timeout);
 
 
 
 
 
 
 
 
1049}
1050
1051static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
1052					int fifo, int sta_id, int tid,
1053					int frame_limit, u16 ssn,
1054					unsigned int queue_wdg_timeout)
1055{
1056	struct iwl_trans_txq_scd_cfg cfg = {
1057		.fifo = fifo,
1058		.sta_id = sta_id,
1059		.tid = tid,
1060		.frame_limit = frame_limit,
1061		.aggregate = sta_id >= 0,
1062	};
1063
1064	iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
1065}
1066
1067static inline
1068void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
1069			     unsigned int queue_wdg_timeout)
1070{
1071	struct iwl_trans_txq_scd_cfg cfg = {
1072		.fifo = fifo,
1073		.sta_id = -1,
1074		.tid = IWL_MAX_TID_COUNT,
1075		.frame_limit = IWL_FRAME_LIMIT,
1076		.aggregate = false,
1077	};
1078
1079	iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
1080}
1081
1082static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
1083					      unsigned long txqs,
1084					      bool freeze)
1085{
1086	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1087		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1088		return;
1089	}
1090
1091	if (trans->ops->freeze_txq_timer)
1092		trans->ops->freeze_txq_timer(trans, txqs, freeze);
1093}
1094
1095static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
1096					    bool block)
1097{
1098	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
 
 
 
 
1099		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1100		return;
1101	}
1102
1103	if (trans->ops->block_txq_ptrs)
1104		trans->ops->block_txq_ptrs(trans, block);
1105}
1106
1107static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans,
1108						u32 txqs)
1109{
 
 
 
1110	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1111		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1112		return -EIO;
1113	}
1114
1115	return trans->ops->wait_tx_queue_empty(trans, txqs);
1116}
1117
1118static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1119{
1120	trans->ops->write8(trans, ofs, val);
1121}
1122
1123static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1124{
1125	trans->ops->write32(trans, ofs, val);
1126}
1127
1128static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
1129{
1130	return trans->ops->read32(trans, ofs);
1131}
1132
1133static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
1134{
1135	return trans->ops->read_prph(trans, ofs);
1136}
1137
1138static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
1139					u32 val)
1140{
1141	return trans->ops->write_prph(trans, ofs, val);
1142}
1143
1144static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
1145				     void *buf, int dwords)
1146{
1147	return trans->ops->read_mem(trans, addr, buf, dwords);
1148}
1149
1150#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize)		      \
1151	do {								      \
1152		if (__builtin_constant_p(bufsize))			      \
1153			BUILD_BUG_ON((bufsize) % sizeof(u32));		      \
1154		iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
1155	} while (0)
1156
 
 
 
 
 
 
 
 
 
1157static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
1158{
1159	u32 value;
1160
1161	if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
1162		return 0xa5a5a5a5;
1163
1164	return value;
1165}
1166
1167static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
1168				      const void *buf, int dwords)
1169{
1170	return trans->ops->write_mem(trans, addr, buf, dwords);
1171}
1172
1173static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
1174					u32 val)
1175{
1176	return iwl_trans_write_mem(trans, addr, &val, 1);
1177}
1178
1179static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
1180{
1181	if (trans->ops->set_pmi)
1182		trans->ops->set_pmi(trans, state);
1183}
1184
 
 
 
 
 
 
 
 
1185static inline void
1186iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
1187{
1188	trans->ops->set_bits_mask(trans, reg, mask, value);
1189}
1190
1191#define iwl_trans_grab_nic_access(trans, flags)	\
1192	__cond_lock(nic_access,				\
1193		    likely((trans)->ops->grab_nic_access(trans, flags)))
1194
1195static inline void __releases(nic_access)
1196iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
1197{
1198	trans->ops->release_nic_access(trans, flags);
1199	__release(nic_access);
1200}
1201
1202static inline void iwl_trans_fw_error(struct iwl_trans *trans)
1203{
1204	if (WARN_ON_ONCE(!trans->op_mode))
1205		return;
1206
1207	/* prevent double restarts due to the same erroneous FW */
1208	if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
1209		iwl_op_mode_nic_error(trans->op_mode);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1210}
1211
1212/*****************************************************
1213 * transport helper functions
1214 *****************************************************/
1215struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
1216				  struct device *dev,
1217				  const struct iwl_cfg *cfg,
1218				  const struct iwl_trans_ops *ops,
1219				  size_t dev_cmd_headroom);
1220void iwl_trans_free(struct iwl_trans *trans);
1221
 
 
 
 
 
1222/*****************************************************
1223* driver (transport) register/unregister functions
1224******************************************************/
1225int __must_check iwl_pci_register_driver(void);
1226void iwl_pci_unregister_driver(void);
 
1227
1228#endif /* __iwl_trans_h__ */