Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/**************************************************************************/
   3/*                                                                        */
   4/*  IBM System i and System p Virtual NIC Device Driver                   */
   5/*  Copyright (C) 2014 IBM Corp.                                          */
   6/*  Santiago Leon (santi_leon@yahoo.com)                                  */
   7/*  Thomas Falcon (tlfalcon@linux.vnet.ibm.com)                           */
   8/*  John Allen (jallen@linux.vnet.ibm.com)                                */
   9/*                                                                        */
  10/*                                                                        */
  11/* This module contains the implementation of a virtual ethernet device   */
  12/* for use with IBM i/pSeries LPAR Linux.  It utilizes the logical LAN    */
  13/* option of the RS/6000 Platform Architecture to interface with virtual */
  14/* ethernet NICs that are presented to the partition by the hypervisor.   */
  15/*                                                                        */
  16/**************************************************************************/
  17
  18#define IBMVNIC_NAME		"ibmvnic"
  19#define IBMVNIC_DRIVER_VERSION	"1.0.1"
  20#define IBMVNIC_INVALID_MAP	-1
  21#define IBMVNIC_OPEN_FAILED	3
  22
  23/* basic structures plus 100 2k buffers */
  24#define IBMVNIC_IO_ENTITLEMENT_DEFAULT	610305
  25
  26/* Initial module_parameters */
  27#define IBMVNIC_RX_WEIGHT		16
  28/* when changing this, update IBMVNIC_IO_ENTITLEMENT_DEFAULT */
  29#define IBMVNIC_BUFFS_PER_POOL	100
  30#define IBMVNIC_MAX_QUEUES	16
  31#define IBMVNIC_MAX_QUEUE_SZ   4096
  32#define IBMVNIC_MAX_IND_DESCS  16
  33#define IBMVNIC_IND_ARR_SZ	(IBMVNIC_MAX_IND_DESCS * 32)
  34
  35#define IBMVNIC_TSO_BUF_SZ	65536
  36#define IBMVNIC_TSO_BUFS	64
  37#define IBMVNIC_TSO_POOL_MASK	0x80000000
  38
  39/* A VNIC adapter has set of Rx and Tx pools (aka queues). Each Rx/Tx pool
  40 * has a set of buffers. The size of each buffer is determined by the MTU.
  41 *
  42 * Each Rx/Tx pool is also associated with a DMA region that is shared
  43 * with the "hardware" (VIOS) and used to send/receive packets. The DMA
  44 * region is also referred to as a Long Term Buffer or LTB.
  45 *
  46 * The size of the DMA region required for an Rx/Tx pool depends on the
  47 * number and size (MTU) of the buffers in the pool. At the max levels
  48 * of 4096 jumbo frames (MTU=9000) we will need about 9K*4K = 36MB plus
  49 * some padding.
  50 *
  51 * But the size of a single DMA region is limited by MAX_PAGE_ORDER in the
  52 * kernel (about 16MB currently).  To support say 4K Jumbo frames, we
  53 * use a set of LTBs (struct ltb_set) per pool.
  54 *
  55 * IBMVNIC_ONE_LTB_MAX  - max size of each LTB supported by kernel
  56 * IBMVNIC_ONE_LTB_SIZE - current max size of each LTB in an ltb_set
  57 * (must be <= IBMVNIC_ONE_LTB_MAX)
  58 * IBMVNIC_LTB_SET_SIZE - current size of all LTBs in an ltb_set
  59 *
  60 * Each VNIC can have upto 16 Rx, 16 Tx and 16 TSO pools. The TSO pools
  61 * are of fixed length (IBMVNIC_TSO_BUF_SZ * IBMVNIC_TSO_BUFS) of 4MB.
  62 *
  63 * The Rx and Tx pools can have upto 4096 buffers. The max size of these
  64 * buffers is about 9588 (for jumbo frames, including IBMVNIC_BUFFER_HLEN).
  65 * So, setting the IBMVNIC_LTB_SET_SIZE for a pool to 4096 * 9588 ~= 38MB.
  66 *
  67 * There is a trade-off in setting IBMVNIC_ONE_LTB_SIZE. If it is large,
  68 * the allocation of the LTB can fail when system is low in memory. If
  69 * its too small, we would need several mappings for each of the Rx/
  70 * Tx/TSO pools but there is a limit of 255 mappings per vnic in the
  71 * VNIC protocol.
  72 *
  73 * So setting IBMVNIC_ONE_LTB_SIZE to 8MB. With IBMVNIC_LTB_SET_SIZE set
  74 * to 38MB, we will need 5 LTBs per Rx and Tx pool and 1 LTB per TSO
  75 * pool for the 4MB. Thus the 16 Rx and Tx queues require 32 * 5 = 160
  76 * plus 16 for the TSO pools for a total of 176 LTB mappings per VNIC.
  77 */
  78#define IBMVNIC_ONE_LTB_MAX	((u32)((1 << MAX_PAGE_ORDER) * PAGE_SIZE))
  79#define IBMVNIC_ONE_LTB_SIZE	min((u32)(8 << 20), IBMVNIC_ONE_LTB_MAX)
  80#define IBMVNIC_LTB_SET_SIZE	(38 << 20)
  81
  82#define IBMVNIC_BUFFER_HLEN		500
  83#define IBMVNIC_RESET_DELAY 100
  84
  85struct ibmvnic_login_buffer {
  86	__be32 len;
  87	__be32 version;
  88#define INITIAL_VERSION_LB 1
  89	__be32 num_txcomp_subcrqs;
  90	__be32 off_txcomp_subcrqs;
  91	__be32 num_rxcomp_subcrqs;
  92	__be32 off_rxcomp_subcrqs;
  93	__be32 login_rsp_ioba;
  94	__be32 login_rsp_len;
  95	__be32 client_data_offset;
  96	__be32 client_data_len;
  97} __packed __aligned(8);
  98
  99struct ibmvnic_login_rsp_buffer {
 100	__be32 len;
 101	__be32 version;
 102#define INITIAL_VERSION_LRB 1
 103	__be32 num_txsubm_subcrqs;
 104	__be32 off_txsubm_subcrqs;
 105	__be32 num_rxadd_subcrqs;
 106	__be32 off_rxadd_subcrqs;
 107	__be32 off_rxadd_buff_size;
 108	__be32 num_supp_tx_desc;
 109	__be32 off_supp_tx_desc;
 110} __packed __aligned(8);
 111
 112struct ibmvnic_query_ip_offload_buffer {
 113	__be32 len;
 114	__be32 version;
 115#define INITIAL_VERSION_IOB 1
 116	u8 ipv4_chksum;
 117	u8 ipv6_chksum;
 118	u8 tcp_ipv4_chksum;
 119	u8 tcp_ipv6_chksum;
 120	u8 udp_ipv4_chksum;
 121	u8 udp_ipv6_chksum;
 122	u8 large_tx_ipv4;
 123	u8 large_tx_ipv6;
 124	u8 large_rx_ipv4;
 125	u8 large_rx_ipv6;
 126	u8 reserved1[14];
 127	__be16 max_ipv4_header_size;
 128	__be16 max_ipv6_header_size;
 129	__be16 max_tcp_header_size;
 130	__be16 max_udp_header_size;
 131	__be32 max_large_tx_size;
 132	__be32 max_large_rx_size;
 133	u8 reserved2[16];
 134	u8 ipv6_extension_header;
 135#define IPV6_EH_NOT_SUPPORTED	0x00
 136#define IPV6_EH_SUPPORTED_LIM	0x01
 137#define IPV6_EH_SUPPORTED	0xFF
 138	u8 tcp_pseudosum_req;
 139#define TCP_PS_NOT_REQUIRED	0x00
 140#define TCP_PS_REQUIRED		0x01
 141	u8 reserved3[30];
 142	__be16 num_ipv6_ext_headers;
 143	__be32 off_ipv6_ext_headers;
 144	u8 reserved4[154];
 145} __packed __aligned(8);
 146
 147struct ibmvnic_control_ip_offload_buffer {
 148	__be32 len;
 149	__be32 version;
 150#define INITIAL_VERSION_IOB 1
 151	u8 ipv4_chksum;
 152	u8 ipv6_chksum;
 153	u8 tcp_ipv4_chksum;
 154	u8 tcp_ipv6_chksum;
 155	u8 udp_ipv4_chksum;
 156	u8 udp_ipv6_chksum;
 157	u8 large_tx_ipv4;
 158	u8 large_tx_ipv6;
 159	u8 bad_packet_rx;
 160	u8 large_rx_ipv4;
 161	u8 large_rx_ipv6;
 162	u8 reserved4[111];
 163} __packed __aligned(8);
 164
 165struct ibmvnic_fw_component {
 166	u8 name[48];
 167	__be32 trace_buff_size;
 168	u8 correlator;
 169	u8 trace_level;
 170	u8 parent_correlator;
 171	u8 error_check_level;
 172	u8 trace_on;
 173	u8 reserved[7];
 174	u8 description[192];
 175} __packed __aligned(8);
 176
 177struct ibmvnic_fw_trace_entry {
 178	__be32 trace_id;
 179	u8 num_valid_data;
 180	u8 reserved[3];
 181	__be64 pmc_registers;
 182	__be64 timebase;
 183	__be64 trace_data[5];
 184} __packed __aligned(8);
 185
 186struct ibmvnic_statistics {
 187	__be32 version;
 188	__be32 promiscuous;
 189	__be64 rx_packets;
 190	__be64 rx_bytes;
 191	__be64 tx_packets;
 192	__be64 tx_bytes;
 193	__be64 ucast_tx_packets;
 194	__be64 ucast_rx_packets;
 195	__be64 mcast_tx_packets;
 196	__be64 mcast_rx_packets;
 197	__be64 bcast_tx_packets;
 198	__be64 bcast_rx_packets;
 199	__be64 align_errors;
 200	__be64 fcs_errors;
 201	__be64 single_collision_frames;
 202	__be64 multi_collision_frames;
 203	__be64 sqe_test_errors;
 204	__be64 deferred_tx;
 205	__be64 late_collisions;
 206	__be64 excess_collisions;
 207	__be64 internal_mac_tx_errors;
 208	__be64 carrier_sense;
 209	__be64 too_long_frames;
 210	__be64 internal_mac_rx_errors;
 211	u8 reserved[72];
 212} __packed __aligned(8);
 213
 214#define NUM_TX_STATS 3
 215struct ibmvnic_tx_queue_stats {
 216	u64 batched_packets;
 217	u64 direct_packets;
 218	u64 bytes;
 219	u64 dropped_packets;
 220};
 221
 222#define NUM_RX_STATS 3
 223struct ibmvnic_rx_queue_stats {
 224	u64 packets;
 225	u64 bytes;
 226	u64 interrupts;
 227};
 228
 229struct ibmvnic_acl_buffer {
 230	__be32 len;
 231	__be32 version;
 232#define INITIAL_VERSION_IOB 1
 233	u8 mac_acls_restrict;
 234	u8 vlan_acls_restrict;
 235	u8 reserved1[22];
 236	__be32 num_mac_addrs;
 237	__be32 offset_mac_addrs;
 238	__be32 num_vlan_ids;
 239	__be32 offset_vlan_ids;
 240	u8 reserved2[80];
 241} __packed __aligned(8);
 242
 243/* descriptors have been changed, how should this be defined?  1? 4? */
 244
 245#define IBMVNIC_TX_DESC_VERSIONS 3
 246
 247/* is this still needed? */
 248struct ibmvnic_tx_comp_desc {
 249	u8 first;
 250	u8 num_comps;
 251	__be16 rcs[5];
 252	__be32 correlators[5];
 253} __packed __aligned(8);
 254
 255/* some flags that included in v0 descriptor, which is gone
 256 * only used for IBMVNIC_TCP_CHKSUM and IBMVNIC_UDP_CHKSUM
 257 * and only in some offload_flags variable that doesn't seem
 258 * to be used anywhere, can probably be removed?
 259 */
 260
 261#define IBMVNIC_TCP_CHKSUM		0x20
 262#define IBMVNIC_UDP_CHKSUM		0x08
 263
 264struct ibmvnic_tx_desc {
 265	u8 first;
 266	u8 type;
 267
 268#define IBMVNIC_TX_DESC 0x10
 269	u8 n_crq_elem;
 270	u8 n_sge;
 271	u8 flags1;
 272#define IBMVNIC_TX_COMP_NEEDED		0x80
 273#define IBMVNIC_TX_CHKSUM_OFFLOAD	0x40
 274#define IBMVNIC_TX_LSO			0x20
 275#define IBMVNIC_TX_PROT_TCP		0x10
 276#define IBMVNIC_TX_PROT_UDP		0x08
 277#define IBMVNIC_TX_PROT_IPV4		0x04
 278#define IBMVNIC_TX_PROT_IPV6		0x02
 279#define IBMVNIC_TX_VLAN_PRESENT		0x01
 280	u8 flags2;
 281#define IBMVNIC_TX_VLAN_INSERT		0x80
 282	__be16 mss;
 283	u8 reserved[4];
 284	__be32 correlator;
 285	__be16 vlan_id;
 286	__be16 dma_reg;
 287	__be32 sge_len;
 288	__be64 ioba;
 289} __packed __aligned(8);
 290
 291struct ibmvnic_hdr_desc {
 292	u8 first;
 293	u8 type;
 294#define IBMVNIC_HDR_DESC		0x11
 295	u8 len;
 296	u8 l2_len;
 297	__be16 l3_len;
 298	u8 l4_len;
 299	u8 flag;
 300	u8 data[24];
 301} __packed __aligned(8);
 302
 303struct ibmvnic_hdr_ext_desc {
 304	u8 first;
 305	u8 type;
 306#define IBMVNIC_HDR_EXT_DESC		0x12
 307	u8 len;
 308	u8 data[29];
 309} __packed __aligned(8);
 310
 311struct ibmvnic_sge_desc {
 312	u8 first;
 313	u8 type;
 314#define IBMVNIC_SGE_DESC		0x30
 315	__be16 sge1_dma_reg;
 316	__be32 sge1_len;
 317	__be64 sge1_ioba;
 318	__be16 reserved;
 319	__be16 sge2_dma_reg;
 320	__be32 sge2_len;
 321	__be64 sge2_ioba;
 322} __packed __aligned(8);
 323
 324struct ibmvnic_rx_comp_desc {
 325	u8 first;
 326	u8 flags;
 327#define IBMVNIC_IP_CHKSUM_GOOD		0x80
 328#define IBMVNIC_TCP_UDP_CHKSUM_GOOD	0x40
 329#define IBMVNIC_END_FRAME			0x20
 330#define IBMVNIC_EXACT_MC			0x10
 331#define IBMVNIC_VLAN_STRIPPED			0x08
 332	__be16 off_frame_data;
 333	__be32 len;
 334	__be64 correlator;
 335	__be16 vlan_tci;
 336	__be16 rc;
 337	u8 reserved[12];
 338} __packed __aligned(8);
 339
 340struct ibmvnic_generic_scrq {
 341	u8 first;
 342	u8 reserved[31];
 343} __packed __aligned(8);
 344
 345struct ibmvnic_rx_buff_add_desc {
 346	u8 first;
 347	u8 reserved[7];
 348	__be64 correlator;
 349	__be32 ioba;
 350	u8 map_id;
 351	__be32 len:24;
 352	u8 reserved2[8];
 353} __packed __aligned(8);
 354
 355struct ibmvnic_rc {
 356	u8 code; /* one of enum ibmvnic_rc_codes */
 357	u8 detailed_data[3];
 358} __packed __aligned(4);
 359
 360struct ibmvnic_generic_crq {
 361	u8 first;
 362	u8 cmd;
 363	u8 params[10];
 364	struct ibmvnic_rc rc;
 365} __packed __aligned(8);
 366
 367struct ibmvnic_version_exchange {
 368	u8 first;
 369	u8 cmd;
 370	__be16 version;
 371#define IBMVNIC_INITIAL_VERSION 1
 372	u8 reserved[8];
 373	struct ibmvnic_rc rc;
 374} __packed __aligned(8);
 375
 376struct ibmvnic_capability {
 377	u8 first;
 378	u8 cmd;
 379	__be16 capability; /* one of ibmvnic_capabilities */
 380	__be64 number;
 381	struct ibmvnic_rc rc;
 382} __packed __aligned(8);
 383
 384struct ibmvnic_login {
 385	u8 first;
 386	u8 cmd;
 387	u8 reserved[6];
 388	__be32 ioba;
 389	__be32 len;
 390} __packed __aligned(8);
 391
 392struct ibmvnic_phys_parms {
 393	u8 first;
 394	u8 cmd;
 395	u8 flags1;
 396#define IBMVNIC_EXTERNAL_LOOPBACK	0x80
 397#define IBMVNIC_INTERNAL_LOOPBACK	0x40
 398#define IBMVNIC_PROMISC		0x20
 399#define IBMVNIC_PHYS_LINK_ACTIVE	0x10
 400#define IBMVNIC_AUTONEG_DUPLEX	0x08
 401#define IBMVNIC_FULL_DUPLEX	0x04
 402#define IBMVNIC_HALF_DUPLEX	0x02
 403#define IBMVNIC_CAN_CHG_PHYS_PARMS	0x01
 404	u8 flags2;
 405#define IBMVNIC_LOGICAL_LNK_ACTIVE 0x80
 406	__be32 speed;
 407#define IBMVNIC_AUTONEG		0x80000000
 408#define IBMVNIC_10MBPS		0x40000000
 409#define IBMVNIC_100MBPS		0x20000000
 410#define IBMVNIC_1GBPS		0x10000000
 411#define IBMVNIC_10GBPS		0x08000000
 412#define IBMVNIC_40GBPS		0x04000000
 413#define IBMVNIC_100GBPS		0x02000000
 414#define IBMVNIC_25GBPS		0x01000000
 415#define IBMVNIC_50GBPS		0x00800000
 416#define IBMVNIC_200GBPS		0x00400000
 417	__be32 mtu;
 418	struct ibmvnic_rc rc;
 419} __packed __aligned(8);
 420
 421struct ibmvnic_logical_link_state {
 422	u8 first;
 423	u8 cmd;
 424	u8 link_state;
 425#define IBMVNIC_LOGICAL_LNK_DN 0x00
 426#define IBMVNIC_LOGICAL_LNK_UP 0x01
 427#define IBMVNIC_LOGICAL_LNK_QUERY 0xff
 428	u8 reserved[9];
 429	struct ibmvnic_rc rc;
 430} __packed __aligned(8);
 431
 432struct ibmvnic_query_ip_offload {
 433	u8 first;
 434	u8 cmd;
 435	u8 reserved[2];
 436	__be32 len;
 437	__be32 ioba;
 438	struct ibmvnic_rc rc;
 439} __packed __aligned(8);
 440
 441struct ibmvnic_control_ip_offload {
 442	u8 first;
 443	u8 cmd;
 444	u8 reserved[2];
 445	__be32 ioba;
 446	__be32 len;
 447	struct ibmvnic_rc rc;
 448} __packed __aligned(8);
 449
 450struct ibmvnic_request_statistics {
 451	u8 first;
 452	u8 cmd;
 453	u8 flags;
 454#define IBMVNIC_PHYSICAL_PORT	0x80
 455	u8 reserved1;
 456	__be32 ioba;
 457	__be32 len;
 458	u8 reserved[4];
 459} __packed __aligned(8);
 460
 461struct ibmvnic_error_indication {
 462	u8 first;
 463	u8 cmd;
 464	u8 flags;
 465#define IBMVNIC_FATAL_ERROR	0x80
 466	u8 reserved1;
 467	__be32 error_id;
 468	__be32 detail_error_sz;
 469	__be16 error_cause;
 470	u8 reserved2[2];
 471} __packed __aligned(8);
 472
 473struct ibmvnic_link_state_indication {
 474	u8 first;
 475	u8 cmd;
 476	u8 reserved1[2];
 477	u8 phys_link_state;
 478	u8 logical_link_state;
 479	u8 reserved2[10];
 480} __packed __aligned(8);
 481
 482struct ibmvnic_change_mac_addr {
 483	u8 first;
 484	u8 cmd;
 485	u8 mac_addr[6];
 486	u8 reserved[4];
 487	struct ibmvnic_rc rc;
 488} __packed __aligned(8);
 489
 490struct ibmvnic_multicast_ctrl {
 491	u8 first;
 492	u8 cmd;
 493	u8 mac_addr[6];
 494	u8 flags;
 495#define IBMVNIC_ENABLE_MC		0x80
 496#define IBMVNIC_DISABLE_MC		0x40
 497#define IBMVNIC_ENABLE_ALL		0x20
 498#define IBMVNIC_DISABLE_ALL	0x10
 499	u8 reserved1;
 500	__be16 reserved2; /* was num_enabled_mc_addr; */
 501	struct ibmvnic_rc rc;
 502} __packed __aligned(8);
 503
 504struct ibmvnic_get_vpd_size {
 505	u8 first;
 506	u8 cmd;
 507	u8 reserved[14];
 508} __packed __aligned(8);
 509
 510struct ibmvnic_get_vpd_size_rsp {
 511	u8 first;
 512	u8 cmd;
 513	u8 reserved[2];
 514	__be64 len;
 515	struct ibmvnic_rc rc;
 516} __packed __aligned(8);
 517
 518struct ibmvnic_get_vpd {
 519	u8 first;
 520	u8 cmd;
 521	u8 reserved1[2];
 522	__be32 ioba;
 523	__be32 len;
 524	u8 reserved[4];
 525} __packed __aligned(8);
 526
 527struct ibmvnic_get_vpd_rsp {
 528	u8 first;
 529	u8 cmd;
 530	u8 reserved[10];
 531	struct ibmvnic_rc rc;
 532} __packed __aligned(8);
 533
 534struct ibmvnic_acl_change_indication {
 535	u8 first;
 536	u8 cmd;
 537	__be16 change_type;
 538#define IBMVNIC_MAC_ACL 0
 539#define IBMVNIC_VLAN_ACL 1
 540	u8 reserved[12];
 541} __packed __aligned(8);
 542
 543struct ibmvnic_acl_query {
 544	u8 first;
 545	u8 cmd;
 546	u8 reserved1[2];
 547	__be32 ioba;
 548	__be32 len;
 549	u8 reserved2[4];
 550} __packed __aligned(8);
 551
 552struct ibmvnic_tune {
 553	u8 first;
 554	u8 cmd;
 555	u8 reserved1[2];
 556	__be32 ioba;
 557	__be32 len;
 558	u8 reserved2[4];
 559} __packed __aligned(8);
 560
 561struct ibmvnic_request_map {
 562	u8 first;
 563	u8 cmd;
 564	u8 reserved1;
 565	u8 map_id;
 566	__be32 ioba;
 567	__be32 len;
 568	u8 reserved2[4];
 569} __packed __aligned(8);
 570
 571struct ibmvnic_request_map_rsp {
 572	u8 first;
 573	u8 cmd;
 574	u8 reserved1;
 575	u8 map_id;
 576	u8 reserved2[8];
 577	struct ibmvnic_rc rc;
 578} __packed __aligned(8);
 579
 580struct ibmvnic_request_unmap {
 581	u8 first;
 582	u8 cmd;
 583	u8 reserved1;
 584	u8 map_id;
 585	u8 reserved2[12];
 586} __packed __aligned(8);
 587
 588struct ibmvnic_request_unmap_rsp {
 589	u8 first;
 590	u8 cmd;
 591	u8 reserved1;
 592	u8 map_id;
 593	u8 reserved2[8];
 594	struct ibmvnic_rc rc;
 595} __packed __aligned(8);
 596
 597struct ibmvnic_query_map {
 598	u8 first;
 599	u8 cmd;
 600	u8 reserved[14];
 601} __packed __aligned(8);
 602
 603struct ibmvnic_query_map_rsp {
 604	u8 first;
 605	u8 cmd;
 606	u8 reserved;
 607	u8 page_size;
 608	__be32 tot_pages;
 609	__be32 free_pages;
 610	struct ibmvnic_rc rc;
 611} __packed __aligned(8);
 612
 613union ibmvnic_crq {
 614	struct ibmvnic_generic_crq generic;
 615	struct ibmvnic_version_exchange version_exchange;
 616	struct ibmvnic_version_exchange version_exchange_rsp;
 617	struct ibmvnic_capability query_capability;
 618	struct ibmvnic_capability query_capability_rsp;
 619	struct ibmvnic_capability request_capability;
 620	struct ibmvnic_capability request_capability_rsp;
 621	struct ibmvnic_login login;
 622	struct ibmvnic_generic_crq login_rsp;
 623	struct ibmvnic_phys_parms query_phys_parms;
 624	struct ibmvnic_phys_parms query_phys_parms_rsp;
 625	struct ibmvnic_phys_parms query_phys_capabilities;
 626	struct ibmvnic_phys_parms query_phys_capabilities_rsp;
 627	struct ibmvnic_phys_parms set_phys_parms;
 628	struct ibmvnic_phys_parms set_phys_parms_rsp;
 629	struct ibmvnic_logical_link_state logical_link_state;
 630	struct ibmvnic_logical_link_state logical_link_state_rsp;
 631	struct ibmvnic_query_ip_offload query_ip_offload;
 632	struct ibmvnic_query_ip_offload query_ip_offload_rsp;
 633	struct ibmvnic_control_ip_offload control_ip_offload;
 634	struct ibmvnic_control_ip_offload control_ip_offload_rsp;
 635	struct ibmvnic_request_statistics request_statistics;
 636	struct ibmvnic_generic_crq request_statistics_rsp;
 637	struct ibmvnic_error_indication error_indication;
 638	struct ibmvnic_link_state_indication link_state_indication;
 639	struct ibmvnic_change_mac_addr change_mac_addr;
 640	struct ibmvnic_change_mac_addr change_mac_addr_rsp;
 641	struct ibmvnic_multicast_ctrl multicast_ctrl;
 642	struct ibmvnic_multicast_ctrl multicast_ctrl_rsp;
 643	struct ibmvnic_get_vpd_size get_vpd_size;
 644	struct ibmvnic_get_vpd_size_rsp get_vpd_size_rsp;
 645	struct ibmvnic_get_vpd get_vpd;
 646	struct ibmvnic_get_vpd_rsp get_vpd_rsp;
 647	struct ibmvnic_acl_change_indication acl_change_indication;
 648	struct ibmvnic_acl_query acl_query;
 649	struct ibmvnic_generic_crq acl_query_rsp;
 650	struct ibmvnic_tune tune;
 651	struct ibmvnic_generic_crq tune_rsp;
 652	struct ibmvnic_request_map request_map;
 653	struct ibmvnic_request_map_rsp request_map_rsp;
 654	struct ibmvnic_request_unmap request_unmap;
 655	struct ibmvnic_request_unmap_rsp request_unmap_rsp;
 656	struct ibmvnic_query_map query_map;
 657	struct ibmvnic_query_map_rsp query_map_rsp;
 658};
 659
 660enum ibmvnic_rc_codes {
 661	SUCCESS = 0,
 662	PARTIALSUCCESS = 1,
 663	PERMISSION = 2,
 664	NOMEMORY = 3,
 665	PARAMETER = 4,
 666	UNKNOWNCOMMAND = 5,
 667	ABORTED = 6,
 668	INVALIDSTATE = 7,
 669	INVALIDIOBA = 8,
 670	INVALIDLENGTH = 9,
 671	UNSUPPORTEDOPTION = 10,
 672};
 673
 674enum ibmvnic_capabilities {
 675	MIN_TX_QUEUES = 1,
 676	MIN_RX_QUEUES = 2,
 677	MIN_RX_ADD_QUEUES = 3,
 678	MAX_TX_QUEUES = 4,
 679	MAX_RX_QUEUES = 5,
 680	MAX_RX_ADD_QUEUES = 6,
 681	REQ_TX_QUEUES = 7,
 682	REQ_RX_QUEUES = 8,
 683	REQ_RX_ADD_QUEUES = 9,
 684	MIN_TX_ENTRIES_PER_SUBCRQ = 10,
 685	MIN_RX_ADD_ENTRIES_PER_SUBCRQ = 11,
 686	MAX_TX_ENTRIES_PER_SUBCRQ = 12,
 687	MAX_RX_ADD_ENTRIES_PER_SUBCRQ = 13,
 688	REQ_TX_ENTRIES_PER_SUBCRQ = 14,
 689	REQ_RX_ADD_ENTRIES_PER_SUBCRQ = 15,
 690	TCP_IP_OFFLOAD = 16,
 691	PROMISC_REQUESTED = 17,
 692	PROMISC_SUPPORTED = 18,
 693	MIN_MTU = 19,
 694	MAX_MTU = 20,
 695	REQ_MTU = 21,
 696	MAX_MULTICAST_FILTERS = 22,
 697	VLAN_HEADER_INSERTION = 23,
 698	RX_VLAN_HEADER_INSERTION = 24,
 699	MAX_TX_SG_ENTRIES = 25,
 700	RX_SG_SUPPORTED = 26,
 701	RX_SG_REQUESTED = 27,
 702	OPT_TX_COMP_SUB_QUEUES = 28,
 703	OPT_RX_COMP_QUEUES = 29,
 704	OPT_RX_BUFADD_Q_PER_RX_COMP_Q = 30,
 705	OPT_TX_ENTRIES_PER_SUBCRQ = 31,
 706	OPT_RXBA_ENTRIES_PER_SUBCRQ = 32,
 707	TX_RX_DESC_REQ = 33,
 708};
 709
 710enum ibmvnic_error_cause {
 711	ADAPTER_PROBLEM = 0,
 712	BUS_PROBLEM = 1,
 713	FW_PROBLEM = 2,
 714	DD_PROBLEM = 3,
 715	EEH_RECOVERY = 4,
 716	FW_UPDATED = 5,
 717	LOW_MEMORY = 6,
 718};
 719
 720enum ibmvnic_commands {
 721	VERSION_EXCHANGE = 0x01,
 722	VERSION_EXCHANGE_RSP = 0x81,
 723	QUERY_CAPABILITY = 0x02,
 724	QUERY_CAPABILITY_RSP = 0x82,
 725	REQUEST_CAPABILITY = 0x03,
 726	REQUEST_CAPABILITY_RSP = 0x83,
 727	LOGIN = 0x04,
 728	LOGIN_RSP = 0x84,
 729	QUERY_PHYS_PARMS = 0x05,
 730	QUERY_PHYS_PARMS_RSP = 0x85,
 731	QUERY_PHYS_CAPABILITIES = 0x06,
 732	QUERY_PHYS_CAPABILITIES_RSP = 0x86,
 733	SET_PHYS_PARMS = 0x07,
 734	SET_PHYS_PARMS_RSP = 0x87,
 735	ERROR_INDICATION = 0x08,
 736	LOGICAL_LINK_STATE = 0x0C,
 737	LOGICAL_LINK_STATE_RSP = 0x8C,
 738	REQUEST_STATISTICS = 0x0D,
 739	REQUEST_STATISTICS_RSP = 0x8D,
 740	COLLECT_FW_TRACE = 0x11,
 741	COLLECT_FW_TRACE_RSP = 0x91,
 742	LINK_STATE_INDICATION = 0x12,
 743	CHANGE_MAC_ADDR = 0x13,
 744	CHANGE_MAC_ADDR_RSP = 0x93,
 745	MULTICAST_CTRL = 0x14,
 746	MULTICAST_CTRL_RSP = 0x94,
 747	GET_VPD_SIZE = 0x15,
 748	GET_VPD_SIZE_RSP = 0x95,
 749	GET_VPD = 0x16,
 750	GET_VPD_RSP = 0x96,
 751	TUNE = 0x17,
 752	TUNE_RSP = 0x97,
 753	QUERY_IP_OFFLOAD = 0x18,
 754	QUERY_IP_OFFLOAD_RSP = 0x98,
 755	CONTROL_IP_OFFLOAD = 0x19,
 756	CONTROL_IP_OFFLOAD_RSP = 0x99,
 757	ACL_CHANGE_INDICATION = 0x1A,
 758	ACL_QUERY = 0x1B,
 759	ACL_QUERY_RSP = 0x9B,
 760	QUERY_MAP = 0x1D,
 761	QUERY_MAP_RSP = 0x9D,
 762	REQUEST_MAP = 0x1E,
 763	REQUEST_MAP_RSP = 0x9E,
 764	REQUEST_UNMAP = 0x1F,
 765	REQUEST_UNMAP_RSP = 0x9F,
 766	VLAN_CTRL = 0x20,
 767	VLAN_CTRL_RSP = 0xA0,
 768};
 769
 770enum ibmvnic_crq_type {
 771	IBMVNIC_CRQ_CMD			= 0x80,
 772	IBMVNIC_CRQ_CMD_RSP		= 0x80,
 773	IBMVNIC_CRQ_INIT_CMD		= 0xC0,
 774	IBMVNIC_CRQ_INIT_RSP		= 0xC0,
 775	IBMVNIC_CRQ_XPORT_EVENT		= 0xFF,
 776};
 777
 778enum ibmvfc_crq_format {
 779	IBMVNIC_CRQ_INIT                 = 0x01,
 780	IBMVNIC_CRQ_INIT_COMPLETE        = 0x02,
 781	IBMVNIC_PARTITION_MIGRATED       = 0x06,
 782	IBMVNIC_DEVICE_FAILOVER          = 0x08,
 783};
 784
 785struct ibmvnic_crq_queue {
 786	union ibmvnic_crq *msgs;
 787	int size, cur;
 788	dma_addr_t msg_token;
 789	/* Used for serialization of msgs, cur */
 790	spinlock_t lock;
 791	bool active;
 792	char name[32];
 793};
 794
 795union sub_crq {
 796	struct ibmvnic_generic_scrq generic;
 797	struct ibmvnic_tx_comp_desc tx_comp;
 798	struct ibmvnic_tx_desc v1;
 799	struct ibmvnic_hdr_desc hdr;
 800	struct ibmvnic_hdr_ext_desc hdr_ext;
 801	struct ibmvnic_sge_desc sge;
 802	struct ibmvnic_rx_comp_desc rx_comp;
 803	struct ibmvnic_rx_buff_add_desc rx_add;
 804};
 805
 806struct ibmvnic_ind_xmit_queue {
 807	union sub_crq *indir_arr;
 808	dma_addr_t indir_dma;
 809	int index;
 810};
 811
 812struct ibmvnic_sub_crq_queue {
 813	union sub_crq *msgs;
 814	int size, cur;
 815	dma_addr_t msg_token;
 816	unsigned long crq_num;
 817	unsigned long hw_irq;
 818	unsigned int irq;
 819	unsigned int pool_index;
 820	int scrq_num;
 821	/* Used for serialization of msgs, cur */
 822	spinlock_t lock;
 823	struct sk_buff *rx_skb_top;
 824	struct ibmvnic_adapter *adapter;
 825	struct ibmvnic_ind_xmit_queue ind_buf;
 826	atomic_t used;
 827	char name[32];
 828	u64 handle;
 829	cpumask_var_t affinity_mask;
 830} ____cacheline_aligned;
 831
 832struct ibmvnic_long_term_buff {
 833	unsigned char *buff;
 834	dma_addr_t addr;
 835	u64 size;
 836	u8 map_id;
 837};
 838
 839struct ibmvnic_ltb_set {
 840	int num_ltbs;
 841	struct ibmvnic_long_term_buff *ltbs;
 842};
 843
 844struct ibmvnic_tx_buff {
 845	struct sk_buff *skb;
 846	int index;
 847	int pool_index;
 848	int num_entries;
 849};
 850
 851struct ibmvnic_tx_pool {
 852	struct ibmvnic_tx_buff *tx_buff;
 853	int *free_map;
 854	int consumer_index;
 855	int producer_index;
 856	struct ibmvnic_ltb_set ltb_set;
 857	int num_buffers;
 858	int buf_size;
 859} ____cacheline_aligned;
 860
 861struct ibmvnic_rx_buff {
 862	struct sk_buff *skb;
 863	dma_addr_t dma;
 864	unsigned char *data;
 865	int size;
 866	int pool_index;
 867};
 868
 869struct ibmvnic_rx_pool {
 870	struct ibmvnic_rx_buff *rx_buff;
 871	int size;			/* # of buffers in the pool */
 872	int index;
 873	int buff_size;
 874	atomic_t available;
 875	int *free_map;
 876	int next_free;
 877	int next_alloc;
 878	int active;
 879	struct ibmvnic_ltb_set ltb_set;
 880} ____cacheline_aligned;
 881
 882struct ibmvnic_vpd {
 883	unsigned char *buff;
 884	dma_addr_t dma_addr;
 885	u64 len;
 886};
 887
 888enum vnic_state {VNIC_PROBING = 1,
 889		 VNIC_PROBED,
 890		 VNIC_OPENING,
 891		 VNIC_OPEN,
 892		 VNIC_CLOSING,
 893		 VNIC_CLOSED,
 894		 VNIC_REMOVING,
 895		 VNIC_REMOVED,
 896		 VNIC_DOWN};
 897
 898enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1,
 899			   VNIC_RESET_MOBILITY,
 900			   VNIC_RESET_FATAL,
 901			   VNIC_RESET_NON_FATAL,
 902			   VNIC_RESET_TIMEOUT,
 903			   VNIC_RESET_CHANGE_PARAM,
 904			   VNIC_RESET_PASSIVE_INIT};
 905
 906struct ibmvnic_rwi {
 907	enum ibmvnic_reset_reason reset_reason;
 908	struct list_head list;
 909};
 910
 911struct ibmvnic_tunables {
 912	u64 rx_queues;
 913	u64 tx_queues;
 914	u64 rx_entries;
 915	u64 tx_entries;
 916	u64 mtu;
 917};
 918
 919struct ibmvnic_adapter {
 920	struct vio_dev *vdev;
 921	struct net_device *netdev;
 922	struct ibmvnic_crq_queue crq;
 923	u8 mac_addr[ETH_ALEN];
 924	struct ibmvnic_query_ip_offload_buffer ip_offload_buf;
 925	dma_addr_t ip_offload_tok;
 926	struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl;
 927	dma_addr_t ip_offload_ctrl_tok;
 928	u32 msg_enable;
 929
 930	/* Vital Product Data (VPD) */
 931	struct ibmvnic_vpd *vpd;
 932	char fw_version[32];
 933
 934	/* Statistics */
 935	struct ibmvnic_statistics stats;
 936	dma_addr_t stats_token;
 937	struct completion stats_done;
 938	int replenish_no_mem;
 939	int replenish_add_buff_success;
 940	int replenish_add_buff_failure;
 941	int replenish_task_cycles;
 942	int tx_send_failed;
 943	int tx_map_failed;
 944
 945	struct ibmvnic_tx_queue_stats *tx_stats_buffers;
 946	struct ibmvnic_rx_queue_stats *rx_stats_buffers;
 947
 948	int phys_link_state;
 949	int logical_link_state;
 950
 951	u32 speed;
 952	u8 duplex;
 953
 954	/* login data */
 955	struct ibmvnic_login_buffer *login_buf;
 956	dma_addr_t login_buf_token;
 957	int login_buf_sz;
 958
 959	struct ibmvnic_login_rsp_buffer *login_rsp_buf;
 960	dma_addr_t login_rsp_buf_token;
 961	int login_rsp_buf_sz;
 962
 963	atomic_t running_cap_crqs;
 964
 965	struct ibmvnic_sub_crq_queue **tx_scrq ____cacheline_aligned;
 966	struct ibmvnic_sub_crq_queue **rx_scrq ____cacheline_aligned;
 967
 968	/* rx structs */
 969	struct napi_struct *napi;
 970	struct ibmvnic_rx_pool *rx_pool;
 971	u64 promisc;
 972
 973	struct ibmvnic_tx_pool *tx_pool;
 974	struct ibmvnic_tx_pool *tso_pool;
 975	struct completion probe_done;
 976	struct completion init_done;
 977	int init_done_rc;
 978
 979	struct completion fw_done;
 980	/* Used for serialization of device commands */
 981	struct mutex fw_lock;
 982	int fw_done_rc;
 983
 984	struct completion reset_done;
 985	int reset_done_rc;
 986	bool wait_for_reset;
 987
 988	/* CPU hotplug instances for online & dead */
 989	struct hlist_node node;
 990	struct hlist_node node_dead;
 991
 992	/* partner capabilities */
 993	u64 min_tx_queues;
 994	u64 min_rx_queues;
 995	u64 min_rx_add_queues;
 996	u64 max_tx_queues;
 997	u64 max_rx_queues;
 998	u64 max_rx_add_queues;
 999	u64 req_tx_queues;
1000	u64 req_rx_queues;
1001	u64 req_rx_add_queues;
1002	u64 min_tx_entries_per_subcrq;
1003	u64 min_rx_add_entries_per_subcrq;
1004	u64 max_tx_entries_per_subcrq;
1005	u64 max_rx_add_entries_per_subcrq;
1006	u64 req_tx_entries_per_subcrq;
1007	u64 req_rx_add_entries_per_subcrq;
1008	u64 tcp_ip_offload;
1009	u64 promisc_requested;
1010	u64 promisc_supported;
1011	u64 min_mtu;
1012	u64 max_mtu;
1013	u64 req_mtu;
1014	u64 prev_mtu;
1015	u64 max_multicast_filters;
1016	u64 vlan_header_insertion;
1017	u64 rx_vlan_header_insertion;
1018	u64 max_tx_sg_entries;
1019	u64 rx_sg_supported;
1020	u64 rx_sg_requested;
1021	u64 opt_tx_comp_sub_queues;
1022	u64 opt_rx_comp_queues;
1023	u64 opt_rx_bufadd_q_per_rx_comp_q;
1024	u64 opt_tx_entries_per_subcrq;
1025	u64 opt_rxba_entries_per_subcrq;
1026	__be64 tx_rx_desc_req;
1027#define MAX_MAP_ID	255
1028	DECLARE_BITMAP(map_ids, MAX_MAP_ID);
1029	u32 num_active_rx_scrqs;
1030	u32 num_active_rx_pools;
1031	u32 num_active_rx_napi;
1032	u32 num_active_tx_scrqs;
1033	u32 num_active_tx_pools;
1034
1035	u32 prev_rx_pool_size;
1036	u32 prev_tx_pool_size;
1037	u32 cur_rx_buf_sz;
1038	u32 prev_rx_buf_sz;
1039
1040	struct tasklet_struct tasklet;
1041	enum vnic_state state;
1042	/* Used for serialization of state field. When taking both state
1043	 * and rwi locks, take state lock first.
1044	 */
1045	spinlock_t state_lock;
1046	enum ibmvnic_reset_reason reset_reason;
1047	struct list_head rwi_list;
1048	/* Used for serialization of rwi_list. When taking both state
1049	 * and rwi locks, take state lock first
1050	 */
1051	spinlock_t rwi_lock;
1052	struct work_struct ibmvnic_reset;
1053	struct delayed_work ibmvnic_delayed_reset;
1054	unsigned long resetting;
1055	/* last device reset time */
1056	unsigned long last_reset_time;
1057
1058	bool napi_enabled;
1059	bool from_passive_init;
1060	bool login_pending;
1061	/* protected by rcu */
1062	bool tx_queues_active;
1063	bool failover_pending;
1064	bool force_reset_recovery;
1065
1066	struct ibmvnic_tunables desired;
1067	struct ibmvnic_tunables fallback;
1068};