Linux Audio

Check our new training course

Loading...
v6.8
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/**************************************************************************/
   3/*                                                                        */
   4/*  IBM System i and System p Virtual NIC Device Driver                   */
   5/*  Copyright (C) 2014 IBM Corp.                                          */
   6/*  Santiago Leon (santi_leon@yahoo.com)                                  */
   7/*  Thomas Falcon (tlfalcon@linux.vnet.ibm.com)                           */
   8/*  John Allen (jallen@linux.vnet.ibm.com)                                */
   9/*                                                                        */
  10/*                                                                        */
  11/* This module contains the implementation of a virtual ethernet device   */
  12/* for use with IBM i/pSeries LPAR Linux.  It utilizes the logical LAN    */
  13/* option of the RS/6000 Platform Architecture to interface with virtual */
  14/* ethernet NICs that are presented to the partition by the hypervisor.   */
  15/*                                                                        */
  16/**************************************************************************/
  17
  18#define IBMVNIC_NAME		"ibmvnic"
  19#define IBMVNIC_DRIVER_VERSION	"1.0.1"
  20#define IBMVNIC_INVALID_MAP	-1
  21#define IBMVNIC_OPEN_FAILED	3
  22
  23/* basic structures plus 100 2k buffers */
  24#define IBMVNIC_IO_ENTITLEMENT_DEFAULT	610305
  25
  26/* Initial module_parameters */
  27#define IBMVNIC_RX_WEIGHT		16
  28/* when changing this, update IBMVNIC_IO_ENTITLEMENT_DEFAULT */
  29#define IBMVNIC_BUFFS_PER_POOL	100
  30#define IBMVNIC_MAX_QUEUES	16
  31#define IBMVNIC_MAX_QUEUE_SZ   4096
  32#define IBMVNIC_MAX_IND_DESCS  16
  33#define IBMVNIC_IND_ARR_SZ	(IBMVNIC_MAX_IND_DESCS * 32)
  34
  35#define IBMVNIC_TSO_BUF_SZ	65536
  36#define IBMVNIC_TSO_BUFS	64
  37#define IBMVNIC_TSO_POOL_MASK	0x80000000
  38
  39/* A VNIC adapter has set of Rx and Tx pools (aka queues). Each Rx/Tx pool
  40 * has a set of buffers. The size of each buffer is determined by the MTU.
  41 *
  42 * Each Rx/Tx pool is also associated with a DMA region that is shared
  43 * with the "hardware" (VIOS) and used to send/receive packets. The DMA
  44 * region is also referred to as a Long Term Buffer or LTB.
  45 *
  46 * The size of the DMA region required for an Rx/Tx pool depends on the
  47 * number and size (MTU) of the buffers in the pool. At the max levels
  48 * of 4096 jumbo frames (MTU=9000) we will need about 9K*4K = 36MB plus
  49 * some padding.
  50 *
  51 * But the size of a single DMA region is limited by MAX_PAGE_ORDER in the
  52 * kernel (about 16MB currently).  To support say 4K Jumbo frames, we
  53 * use a set of LTBs (struct ltb_set) per pool.
  54 *
  55 * IBMVNIC_ONE_LTB_MAX  - max size of each LTB supported by kernel
  56 * IBMVNIC_ONE_LTB_SIZE - current max size of each LTB in an ltb_set
  57 * (must be <= IBMVNIC_ONE_LTB_MAX)
  58 * IBMVNIC_LTB_SET_SIZE - current size of all LTBs in an ltb_set
  59 *
  60 * Each VNIC can have upto 16 Rx, 16 Tx and 16 TSO pools. The TSO pools
  61 * are of fixed length (IBMVNIC_TSO_BUF_SZ * IBMVNIC_TSO_BUFS) of 4MB.
  62 *
  63 * The Rx and Tx pools can have upto 4096 buffers. The max size of these
  64 * buffers is about 9588 (for jumbo frames, including IBMVNIC_BUFFER_HLEN).
  65 * So, setting the IBMVNIC_LTB_SET_SIZE for a pool to 4096 * 9588 ~= 38MB.
  66 *
  67 * There is a trade-off in setting IBMVNIC_ONE_LTB_SIZE. If it is large,
  68 * the allocation of the LTB can fail when system is low in memory. If
  69 * its too small, we would need several mappings for each of the Rx/
  70 * Tx/TSO pools but there is a limit of 255 mappings per vnic in the
  71 * VNIC protocol.
  72 *
  73 * So setting IBMVNIC_ONE_LTB_SIZE to 8MB. With IBMVNIC_LTB_SET_SIZE set
  74 * to 38MB, we will need 5 LTBs per Rx and Tx pool and 1 LTB per TSO
  75 * pool for the 4MB. Thus the 16 Rx and Tx queues require 32 * 5 = 160
  76 * plus 16 for the TSO pools for a total of 176 LTB mappings per VNIC.
  77 */
  78#define IBMVNIC_ONE_LTB_MAX	((u32)((1 << MAX_PAGE_ORDER) * PAGE_SIZE))
  79#define IBMVNIC_ONE_LTB_SIZE	min((u32)(8 << 20), IBMVNIC_ONE_LTB_MAX)
  80#define IBMVNIC_LTB_SET_SIZE	(38 << 20)
  81
  82#define IBMVNIC_BUFFER_HLEN		500
  83#define IBMVNIC_RESET_DELAY 100
  84
  85struct ibmvnic_login_buffer {
  86	__be32 len;
  87	__be32 version;
  88#define INITIAL_VERSION_LB 1
  89	__be32 num_txcomp_subcrqs;
  90	__be32 off_txcomp_subcrqs;
  91	__be32 num_rxcomp_subcrqs;
  92	__be32 off_rxcomp_subcrqs;
  93	__be32 login_rsp_ioba;
  94	__be32 login_rsp_len;
  95	__be32 client_data_offset;
  96	__be32 client_data_len;
  97} __packed __aligned(8);
  98
  99struct ibmvnic_login_rsp_buffer {
 100	__be32 len;
 101	__be32 version;
 102#define INITIAL_VERSION_LRB 1
 103	__be32 num_txsubm_subcrqs;
 104	__be32 off_txsubm_subcrqs;
 105	__be32 num_rxadd_subcrqs;
 106	__be32 off_rxadd_subcrqs;
 107	__be32 off_rxadd_buff_size;
 108	__be32 num_supp_tx_desc;
 109	__be32 off_supp_tx_desc;
 110} __packed __aligned(8);
 111
 112struct ibmvnic_query_ip_offload_buffer {
 113	__be32 len;
 114	__be32 version;
 115#define INITIAL_VERSION_IOB 1
 116	u8 ipv4_chksum;
 117	u8 ipv6_chksum;
 118	u8 tcp_ipv4_chksum;
 119	u8 tcp_ipv6_chksum;
 120	u8 udp_ipv4_chksum;
 121	u8 udp_ipv6_chksum;
 122	u8 large_tx_ipv4;
 123	u8 large_tx_ipv6;
 124	u8 large_rx_ipv4;
 125	u8 large_rx_ipv6;
 126	u8 reserved1[14];
 127	__be16 max_ipv4_header_size;
 128	__be16 max_ipv6_header_size;
 129	__be16 max_tcp_header_size;
 130	__be16 max_udp_header_size;
 131	__be32 max_large_tx_size;
 132	__be32 max_large_rx_size;
 133	u8 reserved2[16];
 134	u8 ipv6_extension_header;
 135#define IPV6_EH_NOT_SUPPORTED	0x00
 136#define IPV6_EH_SUPPORTED_LIM	0x01
 137#define IPV6_EH_SUPPORTED	0xFF
 138	u8 tcp_pseudosum_req;
 139#define TCP_PS_NOT_REQUIRED	0x00
 140#define TCP_PS_REQUIRED		0x01
 141	u8 reserved3[30];
 142	__be16 num_ipv6_ext_headers;
 143	__be32 off_ipv6_ext_headers;
 144	u8 reserved4[154];
 145} __packed __aligned(8);
 146
 147struct ibmvnic_control_ip_offload_buffer {
 148	__be32 len;
 149	__be32 version;
 150#define INITIAL_VERSION_IOB 1
 151	u8 ipv4_chksum;
 152	u8 ipv6_chksum;
 153	u8 tcp_ipv4_chksum;
 154	u8 tcp_ipv6_chksum;
 155	u8 udp_ipv4_chksum;
 156	u8 udp_ipv6_chksum;
 157	u8 large_tx_ipv4;
 158	u8 large_tx_ipv6;
 159	u8 bad_packet_rx;
 160	u8 large_rx_ipv4;
 161	u8 large_rx_ipv6;
 162	u8 reserved4[111];
 163} __packed __aligned(8);
 164
 165struct ibmvnic_fw_component {
 166	u8 name[48];
 167	__be32 trace_buff_size;
 168	u8 correlator;
 169	u8 trace_level;
 170	u8 parent_correlator;
 171	u8 error_check_level;
 172	u8 trace_on;
 173	u8 reserved[7];
 174	u8 description[192];
 175} __packed __aligned(8);
 176
 177struct ibmvnic_fw_trace_entry {
 178	__be32 trace_id;
 179	u8 num_valid_data;
 180	u8 reserved[3];
 181	__be64 pmc_registers;
 182	__be64 timebase;
 183	__be64 trace_data[5];
 184} __packed __aligned(8);
 185
 186struct ibmvnic_statistics {
 187	__be32 version;
 188	__be32 promiscuous;
 189	__be64 rx_packets;
 190	__be64 rx_bytes;
 191	__be64 tx_packets;
 192	__be64 tx_bytes;
 193	__be64 ucast_tx_packets;
 194	__be64 ucast_rx_packets;
 195	__be64 mcast_tx_packets;
 196	__be64 mcast_rx_packets;
 197	__be64 bcast_tx_packets;
 198	__be64 bcast_rx_packets;
 199	__be64 align_errors;
 200	__be64 fcs_errors;
 201	__be64 single_collision_frames;
 202	__be64 multi_collision_frames;
 203	__be64 sqe_test_errors;
 204	__be64 deferred_tx;
 205	__be64 late_collisions;
 206	__be64 excess_collisions;
 207	__be64 internal_mac_tx_errors;
 208	__be64 carrier_sense;
 209	__be64 too_long_frames;
 210	__be64 internal_mac_rx_errors;
 211	u8 reserved[72];
 212} __packed __aligned(8);
 213
 214#define NUM_TX_STATS 3
 215struct ibmvnic_tx_queue_stats {
 216	u64 packets;
 217	u64 bytes;
 218	u64 dropped_packets;
 219};
 220
 221#define NUM_RX_STATS 3
 222struct ibmvnic_rx_queue_stats {
 223	u64 packets;
 224	u64 bytes;
 225	u64 interrupts;
 226};
 227
 228struct ibmvnic_acl_buffer {
 229	__be32 len;
 230	__be32 version;
 231#define INITIAL_VERSION_IOB 1
 232	u8 mac_acls_restrict;
 233	u8 vlan_acls_restrict;
 234	u8 reserved1[22];
 235	__be32 num_mac_addrs;
 236	__be32 offset_mac_addrs;
 237	__be32 num_vlan_ids;
 238	__be32 offset_vlan_ids;
 239	u8 reserved2[80];
 240} __packed __aligned(8);
 241
 242/* descriptors have been changed, how should this be defined?  1? 4? */
 243
 244#define IBMVNIC_TX_DESC_VERSIONS 3
 245
 246/* is this still needed? */
 247struct ibmvnic_tx_comp_desc {
 248	u8 first;
 249	u8 num_comps;
 250	__be16 rcs[5];
 251	__be32 correlators[5];
 252} __packed __aligned(8);
 253
 254/* some flags that included in v0 descriptor, which is gone
 255 * only used for IBMVNIC_TCP_CHKSUM and IBMVNIC_UDP_CHKSUM
 256 * and only in some offload_flags variable that doesn't seem
 257 * to be used anywhere, can probably be removed?
 258 */
 259
 260#define IBMVNIC_TCP_CHKSUM		0x20
 261#define IBMVNIC_UDP_CHKSUM		0x08
 262
 263struct ibmvnic_tx_desc {
 264	u8 first;
 265	u8 type;
 266
 267#define IBMVNIC_TX_DESC 0x10
 268	u8 n_crq_elem;
 269	u8 n_sge;
 270	u8 flags1;
 271#define IBMVNIC_TX_COMP_NEEDED		0x80
 272#define IBMVNIC_TX_CHKSUM_OFFLOAD	0x40
 273#define IBMVNIC_TX_LSO			0x20
 274#define IBMVNIC_TX_PROT_TCP		0x10
 275#define IBMVNIC_TX_PROT_UDP		0x08
 276#define IBMVNIC_TX_PROT_IPV4		0x04
 277#define IBMVNIC_TX_PROT_IPV6		0x02
 278#define IBMVNIC_TX_VLAN_PRESENT		0x01
 279	u8 flags2;
 280#define IBMVNIC_TX_VLAN_INSERT		0x80
 281	__be16 mss;
 282	u8 reserved[4];
 283	__be32 correlator;
 284	__be16 vlan_id;
 285	__be16 dma_reg;
 286	__be32 sge_len;
 287	__be64 ioba;
 288} __packed __aligned(8);
 289
 290struct ibmvnic_hdr_desc {
 291	u8 first;
 292	u8 type;
 293#define IBMVNIC_HDR_DESC		0x11
 294	u8 len;
 295	u8 l2_len;
 296	__be16 l3_len;
 297	u8 l4_len;
 298	u8 flag;
 299	u8 data[24];
 300} __packed __aligned(8);
 301
 302struct ibmvnic_hdr_ext_desc {
 303	u8 first;
 304	u8 type;
 305#define IBMVNIC_HDR_EXT_DESC		0x12
 306	u8 len;
 307	u8 data[29];
 308} __packed __aligned(8);
 309
 310struct ibmvnic_sge_desc {
 311	u8 first;
 312	u8 type;
 313#define IBMVNIC_SGE_DESC		0x30
 314	__be16 sge1_dma_reg;
 315	__be32 sge1_len;
 316	__be64 sge1_ioba;
 317	__be16 reserved;
 318	__be16 sge2_dma_reg;
 319	__be32 sge2_len;
 320	__be64 sge2_ioba;
 321} __packed __aligned(8);
 322
 323struct ibmvnic_rx_comp_desc {
 324	u8 first;
 325	u8 flags;
 326#define IBMVNIC_IP_CHKSUM_GOOD		0x80
 327#define IBMVNIC_TCP_UDP_CHKSUM_GOOD	0x40
 328#define IBMVNIC_END_FRAME			0x20
 329#define IBMVNIC_EXACT_MC			0x10
 330#define IBMVNIC_VLAN_STRIPPED			0x08
 331	__be16 off_frame_data;
 332	__be32 len;
 333	__be64 correlator;
 334	__be16 vlan_tci;
 335	__be16 rc;
 336	u8 reserved[12];
 337} __packed __aligned(8);
 338
 339struct ibmvnic_generic_scrq {
 340	u8 first;
 341	u8 reserved[31];
 342} __packed __aligned(8);
 343
 344struct ibmvnic_rx_buff_add_desc {
 345	u8 first;
 346	u8 reserved[7];
 347	__be64 correlator;
 348	__be32 ioba;
 349	u8 map_id;
 350	__be32 len:24;
 351	u8 reserved2[8];
 352} __packed __aligned(8);
 353
 354struct ibmvnic_rc {
 355	u8 code; /* one of enum ibmvnic_rc_codes */
 356	u8 detailed_data[3];
 357} __packed __aligned(4);
 358
 359struct ibmvnic_generic_crq {
 360	u8 first;
 361	u8 cmd;
 362	u8 params[10];
 363	struct ibmvnic_rc rc;
 364} __packed __aligned(8);
 365
 366struct ibmvnic_version_exchange {
 367	u8 first;
 368	u8 cmd;
 369	__be16 version;
 370#define IBMVNIC_INITIAL_VERSION 1
 371	u8 reserved[8];
 372	struct ibmvnic_rc rc;
 373} __packed __aligned(8);
 374
 375struct ibmvnic_capability {
 376	u8 first;
 377	u8 cmd;
 378	__be16 capability; /* one of ibmvnic_capabilities */
 379	__be64 number;
 380	struct ibmvnic_rc rc;
 381} __packed __aligned(8);
 382
 383struct ibmvnic_login {
 384	u8 first;
 385	u8 cmd;
 386	u8 reserved[6];
 387	__be32 ioba;
 388	__be32 len;
 389} __packed __aligned(8);
 390
 391struct ibmvnic_phys_parms {
 392	u8 first;
 393	u8 cmd;
 394	u8 flags1;
 395#define IBMVNIC_EXTERNAL_LOOPBACK	0x80
 396#define IBMVNIC_INTERNAL_LOOPBACK	0x40
 397#define IBMVNIC_PROMISC		0x20
 398#define IBMVNIC_PHYS_LINK_ACTIVE	0x10
 399#define IBMVNIC_AUTONEG_DUPLEX	0x08
 400#define IBMVNIC_FULL_DUPLEX	0x04
 401#define IBMVNIC_HALF_DUPLEX	0x02
 402#define IBMVNIC_CAN_CHG_PHYS_PARMS	0x01
 403	u8 flags2;
 404#define IBMVNIC_LOGICAL_LNK_ACTIVE 0x80
 405	__be32 speed;
 406#define IBMVNIC_AUTONEG		0x80000000
 407#define IBMVNIC_10MBPS		0x40000000
 408#define IBMVNIC_100MBPS		0x20000000
 409#define IBMVNIC_1GBPS		0x10000000
 410#define IBMVNIC_10GBPS		0x08000000
 411#define IBMVNIC_40GBPS		0x04000000
 412#define IBMVNIC_100GBPS		0x02000000
 413#define IBMVNIC_25GBPS		0x01000000
 414#define IBMVNIC_50GBPS		0x00800000
 415#define IBMVNIC_200GBPS		0x00400000
 416	__be32 mtu;
 417	struct ibmvnic_rc rc;
 418} __packed __aligned(8);
 419
 420struct ibmvnic_logical_link_state {
 421	u8 first;
 422	u8 cmd;
 423	u8 link_state;
 424#define IBMVNIC_LOGICAL_LNK_DN 0x00
 425#define IBMVNIC_LOGICAL_LNK_UP 0x01
 426#define IBMVNIC_LOGICAL_LNK_QUERY 0xff
 427	u8 reserved[9];
 428	struct ibmvnic_rc rc;
 429} __packed __aligned(8);
 430
 431struct ibmvnic_query_ip_offload {
 432	u8 first;
 433	u8 cmd;
 434	u8 reserved[2];
 435	__be32 len;
 436	__be32 ioba;
 437	struct ibmvnic_rc rc;
 438} __packed __aligned(8);
 439
 440struct ibmvnic_control_ip_offload {
 441	u8 first;
 442	u8 cmd;
 443	u8 reserved[2];
 444	__be32 ioba;
 445	__be32 len;
 446	struct ibmvnic_rc rc;
 447} __packed __aligned(8);
 448
 449struct ibmvnic_request_statistics {
 450	u8 first;
 451	u8 cmd;
 452	u8 flags;
 453#define IBMVNIC_PHYSICAL_PORT	0x80
 454	u8 reserved1;
 455	__be32 ioba;
 456	__be32 len;
 457	u8 reserved[4];
 458} __packed __aligned(8);
 459
 460struct ibmvnic_error_indication {
 461	u8 first;
 462	u8 cmd;
 463	u8 flags;
 464#define IBMVNIC_FATAL_ERROR	0x80
 465	u8 reserved1;
 466	__be32 error_id;
 467	__be32 detail_error_sz;
 468	__be16 error_cause;
 469	u8 reserved2[2];
 470} __packed __aligned(8);
 471
 472struct ibmvnic_link_state_indication {
 473	u8 first;
 474	u8 cmd;
 475	u8 reserved1[2];
 476	u8 phys_link_state;
 477	u8 logical_link_state;
 478	u8 reserved2[10];
 479} __packed __aligned(8);
 480
 481struct ibmvnic_change_mac_addr {
 482	u8 first;
 483	u8 cmd;
 484	u8 mac_addr[6];
 485	u8 reserved[4];
 486	struct ibmvnic_rc rc;
 487} __packed __aligned(8);
 488
 489struct ibmvnic_multicast_ctrl {
 490	u8 first;
 491	u8 cmd;
 492	u8 mac_addr[6];
 493	u8 flags;
 494#define IBMVNIC_ENABLE_MC		0x80
 495#define IBMVNIC_DISABLE_MC		0x40
 496#define IBMVNIC_ENABLE_ALL		0x20
 497#define IBMVNIC_DISABLE_ALL	0x10
 498	u8 reserved1;
 499	__be16 reserved2; /* was num_enabled_mc_addr; */
 500	struct ibmvnic_rc rc;
 501} __packed __aligned(8);
 502
 503struct ibmvnic_get_vpd_size {
 504	u8 first;
 505	u8 cmd;
 506	u8 reserved[14];
 507} __packed __aligned(8);
 508
 509struct ibmvnic_get_vpd_size_rsp {
 510	u8 first;
 511	u8 cmd;
 512	u8 reserved[2];
 513	__be64 len;
 514	struct ibmvnic_rc rc;
 515} __packed __aligned(8);
 516
 517struct ibmvnic_get_vpd {
 518	u8 first;
 519	u8 cmd;
 520	u8 reserved1[2];
 521	__be32 ioba;
 522	__be32 len;
 523	u8 reserved[4];
 524} __packed __aligned(8);
 525
 526struct ibmvnic_get_vpd_rsp {
 527	u8 first;
 528	u8 cmd;
 529	u8 reserved[10];
 530	struct ibmvnic_rc rc;
 531} __packed __aligned(8);
 532
 533struct ibmvnic_acl_change_indication {
 534	u8 first;
 535	u8 cmd;
 536	__be16 change_type;
 537#define IBMVNIC_MAC_ACL 0
 538#define IBMVNIC_VLAN_ACL 1
 539	u8 reserved[12];
 540} __packed __aligned(8);
 541
 542struct ibmvnic_acl_query {
 543	u8 first;
 544	u8 cmd;
 545	u8 reserved1[2];
 546	__be32 ioba;
 547	__be32 len;
 548	u8 reserved2[4];
 549} __packed __aligned(8);
 550
 551struct ibmvnic_tune {
 552	u8 first;
 553	u8 cmd;
 554	u8 reserved1[2];
 555	__be32 ioba;
 556	__be32 len;
 557	u8 reserved2[4];
 558} __packed __aligned(8);
 559
 560struct ibmvnic_request_map {
 561	u8 first;
 562	u8 cmd;
 563	u8 reserved1;
 564	u8 map_id;
 565	__be32 ioba;
 566	__be32 len;
 567	u8 reserved2[4];
 568} __packed __aligned(8);
 569
 570struct ibmvnic_request_map_rsp {
 571	u8 first;
 572	u8 cmd;
 573	u8 reserved1;
 574	u8 map_id;
 575	u8 reserved2[8];
 576	struct ibmvnic_rc rc;
 577} __packed __aligned(8);
 578
 579struct ibmvnic_request_unmap {
 580	u8 first;
 581	u8 cmd;
 582	u8 reserved1;
 583	u8 map_id;
 584	u8 reserved2[12];
 585} __packed __aligned(8);
 586
 587struct ibmvnic_request_unmap_rsp {
 588	u8 first;
 589	u8 cmd;
 590	u8 reserved1;
 591	u8 map_id;
 592	u8 reserved2[8];
 593	struct ibmvnic_rc rc;
 594} __packed __aligned(8);
 595
 596struct ibmvnic_query_map {
 597	u8 first;
 598	u8 cmd;
 599	u8 reserved[14];
 600} __packed __aligned(8);
 601
 602struct ibmvnic_query_map_rsp {
 603	u8 first;
 604	u8 cmd;
 605	u8 reserved;
 606	u8 page_size;
 607	__be32 tot_pages;
 608	__be32 free_pages;
 609	struct ibmvnic_rc rc;
 610} __packed __aligned(8);
 611
 612union ibmvnic_crq {
 613	struct ibmvnic_generic_crq generic;
 614	struct ibmvnic_version_exchange version_exchange;
 615	struct ibmvnic_version_exchange version_exchange_rsp;
 616	struct ibmvnic_capability query_capability;
 617	struct ibmvnic_capability query_capability_rsp;
 618	struct ibmvnic_capability request_capability;
 619	struct ibmvnic_capability request_capability_rsp;
 620	struct ibmvnic_login login;
 621	struct ibmvnic_generic_crq login_rsp;
 622	struct ibmvnic_phys_parms query_phys_parms;
 623	struct ibmvnic_phys_parms query_phys_parms_rsp;
 624	struct ibmvnic_phys_parms query_phys_capabilities;
 625	struct ibmvnic_phys_parms query_phys_capabilities_rsp;
 626	struct ibmvnic_phys_parms set_phys_parms;
 627	struct ibmvnic_phys_parms set_phys_parms_rsp;
 628	struct ibmvnic_logical_link_state logical_link_state;
 629	struct ibmvnic_logical_link_state logical_link_state_rsp;
 630	struct ibmvnic_query_ip_offload query_ip_offload;
 631	struct ibmvnic_query_ip_offload query_ip_offload_rsp;
 632	struct ibmvnic_control_ip_offload control_ip_offload;
 633	struct ibmvnic_control_ip_offload control_ip_offload_rsp;
 634	struct ibmvnic_request_statistics request_statistics;
 635	struct ibmvnic_generic_crq request_statistics_rsp;
 636	struct ibmvnic_error_indication error_indication;
 637	struct ibmvnic_link_state_indication link_state_indication;
 638	struct ibmvnic_change_mac_addr change_mac_addr;
 639	struct ibmvnic_change_mac_addr change_mac_addr_rsp;
 640	struct ibmvnic_multicast_ctrl multicast_ctrl;
 641	struct ibmvnic_multicast_ctrl multicast_ctrl_rsp;
 642	struct ibmvnic_get_vpd_size get_vpd_size;
 643	struct ibmvnic_get_vpd_size_rsp get_vpd_size_rsp;
 644	struct ibmvnic_get_vpd get_vpd;
 645	struct ibmvnic_get_vpd_rsp get_vpd_rsp;
 646	struct ibmvnic_acl_change_indication acl_change_indication;
 647	struct ibmvnic_acl_query acl_query;
 648	struct ibmvnic_generic_crq acl_query_rsp;
 649	struct ibmvnic_tune tune;
 650	struct ibmvnic_generic_crq tune_rsp;
 651	struct ibmvnic_request_map request_map;
 652	struct ibmvnic_request_map_rsp request_map_rsp;
 653	struct ibmvnic_request_unmap request_unmap;
 654	struct ibmvnic_request_unmap_rsp request_unmap_rsp;
 655	struct ibmvnic_query_map query_map;
 656	struct ibmvnic_query_map_rsp query_map_rsp;
 657};
 658
 659enum ibmvnic_rc_codes {
 660	SUCCESS = 0,
 661	PARTIALSUCCESS = 1,
 662	PERMISSION = 2,
 663	NOMEMORY = 3,
 664	PARAMETER = 4,
 665	UNKNOWNCOMMAND = 5,
 666	ABORTED = 6,
 667	INVALIDSTATE = 7,
 668	INVALIDIOBA = 8,
 669	INVALIDLENGTH = 9,
 670	UNSUPPORTEDOPTION = 10,
 671};
 672
 673enum ibmvnic_capabilities {
 674	MIN_TX_QUEUES = 1,
 675	MIN_RX_QUEUES = 2,
 676	MIN_RX_ADD_QUEUES = 3,
 677	MAX_TX_QUEUES = 4,
 678	MAX_RX_QUEUES = 5,
 679	MAX_RX_ADD_QUEUES = 6,
 680	REQ_TX_QUEUES = 7,
 681	REQ_RX_QUEUES = 8,
 682	REQ_RX_ADD_QUEUES = 9,
 683	MIN_TX_ENTRIES_PER_SUBCRQ = 10,
 684	MIN_RX_ADD_ENTRIES_PER_SUBCRQ = 11,
 685	MAX_TX_ENTRIES_PER_SUBCRQ = 12,
 686	MAX_RX_ADD_ENTRIES_PER_SUBCRQ = 13,
 687	REQ_TX_ENTRIES_PER_SUBCRQ = 14,
 688	REQ_RX_ADD_ENTRIES_PER_SUBCRQ = 15,
 689	TCP_IP_OFFLOAD = 16,
 690	PROMISC_REQUESTED = 17,
 691	PROMISC_SUPPORTED = 18,
 692	MIN_MTU = 19,
 693	MAX_MTU = 20,
 694	REQ_MTU = 21,
 695	MAX_MULTICAST_FILTERS = 22,
 696	VLAN_HEADER_INSERTION = 23,
 697	RX_VLAN_HEADER_INSERTION = 24,
 698	MAX_TX_SG_ENTRIES = 25,
 699	RX_SG_SUPPORTED = 26,
 700	RX_SG_REQUESTED = 27,
 701	OPT_TX_COMP_SUB_QUEUES = 28,
 702	OPT_RX_COMP_QUEUES = 29,
 703	OPT_RX_BUFADD_Q_PER_RX_COMP_Q = 30,
 704	OPT_TX_ENTRIES_PER_SUBCRQ = 31,
 705	OPT_RXBA_ENTRIES_PER_SUBCRQ = 32,
 706	TX_RX_DESC_REQ = 33,
 707};
 708
 709enum ibmvnic_error_cause {
 710	ADAPTER_PROBLEM = 0,
 711	BUS_PROBLEM = 1,
 712	FW_PROBLEM = 2,
 713	DD_PROBLEM = 3,
 714	EEH_RECOVERY = 4,
 715	FW_UPDATED = 5,
 716	LOW_MEMORY = 6,
 717};
 718
 719enum ibmvnic_commands {
 720	VERSION_EXCHANGE = 0x01,
 721	VERSION_EXCHANGE_RSP = 0x81,
 722	QUERY_CAPABILITY = 0x02,
 723	QUERY_CAPABILITY_RSP = 0x82,
 724	REQUEST_CAPABILITY = 0x03,
 725	REQUEST_CAPABILITY_RSP = 0x83,
 726	LOGIN = 0x04,
 727	LOGIN_RSP = 0x84,
 728	QUERY_PHYS_PARMS = 0x05,
 729	QUERY_PHYS_PARMS_RSP = 0x85,
 730	QUERY_PHYS_CAPABILITIES = 0x06,
 731	QUERY_PHYS_CAPABILITIES_RSP = 0x86,
 732	SET_PHYS_PARMS = 0x07,
 733	SET_PHYS_PARMS_RSP = 0x87,
 734	ERROR_INDICATION = 0x08,
 735	LOGICAL_LINK_STATE = 0x0C,
 736	LOGICAL_LINK_STATE_RSP = 0x8C,
 737	REQUEST_STATISTICS = 0x0D,
 738	REQUEST_STATISTICS_RSP = 0x8D,
 739	COLLECT_FW_TRACE = 0x11,
 740	COLLECT_FW_TRACE_RSP = 0x91,
 741	LINK_STATE_INDICATION = 0x12,
 742	CHANGE_MAC_ADDR = 0x13,
 743	CHANGE_MAC_ADDR_RSP = 0x93,
 744	MULTICAST_CTRL = 0x14,
 745	MULTICAST_CTRL_RSP = 0x94,
 746	GET_VPD_SIZE = 0x15,
 747	GET_VPD_SIZE_RSP = 0x95,
 748	GET_VPD = 0x16,
 749	GET_VPD_RSP = 0x96,
 750	TUNE = 0x17,
 751	TUNE_RSP = 0x97,
 752	QUERY_IP_OFFLOAD = 0x18,
 753	QUERY_IP_OFFLOAD_RSP = 0x98,
 754	CONTROL_IP_OFFLOAD = 0x19,
 755	CONTROL_IP_OFFLOAD_RSP = 0x99,
 756	ACL_CHANGE_INDICATION = 0x1A,
 757	ACL_QUERY = 0x1B,
 758	ACL_QUERY_RSP = 0x9B,
 759	QUERY_MAP = 0x1D,
 760	QUERY_MAP_RSP = 0x9D,
 761	REQUEST_MAP = 0x1E,
 762	REQUEST_MAP_RSP = 0x9E,
 763	REQUEST_UNMAP = 0x1F,
 764	REQUEST_UNMAP_RSP = 0x9F,
 765	VLAN_CTRL = 0x20,
 766	VLAN_CTRL_RSP = 0xA0,
 767};
 768
 769enum ibmvnic_crq_type {
 770	IBMVNIC_CRQ_CMD			= 0x80,
 771	IBMVNIC_CRQ_CMD_RSP		= 0x80,
 772	IBMVNIC_CRQ_INIT_CMD		= 0xC0,
 773	IBMVNIC_CRQ_INIT_RSP		= 0xC0,
 774	IBMVNIC_CRQ_XPORT_EVENT		= 0xFF,
 775};
 776
 777enum ibmvfc_crq_format {
 778	IBMVNIC_CRQ_INIT                 = 0x01,
 779	IBMVNIC_CRQ_INIT_COMPLETE        = 0x02,
 780	IBMVNIC_PARTITION_MIGRATED       = 0x06,
 781	IBMVNIC_DEVICE_FAILOVER          = 0x08,
 782};
 783
 784struct ibmvnic_crq_queue {
 785	union ibmvnic_crq *msgs;
 786	int size, cur;
 787	dma_addr_t msg_token;
 788	/* Used for serialization of msgs, cur */
 789	spinlock_t lock;
 790	bool active;
 791	char name[32];
 792};
 793
 794union sub_crq {
 795	struct ibmvnic_generic_scrq generic;
 796	struct ibmvnic_tx_comp_desc tx_comp;
 797	struct ibmvnic_tx_desc v1;
 798	struct ibmvnic_hdr_desc hdr;
 799	struct ibmvnic_hdr_ext_desc hdr_ext;
 800	struct ibmvnic_sge_desc sge;
 801	struct ibmvnic_rx_comp_desc rx_comp;
 802	struct ibmvnic_rx_buff_add_desc rx_add;
 803};
 804
 805struct ibmvnic_ind_xmit_queue {
 806	union sub_crq *indir_arr;
 807	dma_addr_t indir_dma;
 808	int index;
 809};
 810
 811struct ibmvnic_sub_crq_queue {
 812	union sub_crq *msgs;
 813	int size, cur;
 814	dma_addr_t msg_token;
 815	unsigned long crq_num;
 816	unsigned long hw_irq;
 817	unsigned int irq;
 818	unsigned int pool_index;
 819	int scrq_num;
 820	/* Used for serialization of msgs, cur */
 821	spinlock_t lock;
 822	struct sk_buff *rx_skb_top;
 823	struct ibmvnic_adapter *adapter;
 824	struct ibmvnic_ind_xmit_queue ind_buf;
 825	atomic_t used;
 826	char name[32];
 827	u64 handle;
 828	cpumask_var_t affinity_mask;
 829} ____cacheline_aligned;
 830
 831struct ibmvnic_long_term_buff {
 832	unsigned char *buff;
 833	dma_addr_t addr;
 834	u64 size;
 835	u8 map_id;
 836};
 837
 838struct ibmvnic_ltb_set {
 839	int num_ltbs;
 840	struct ibmvnic_long_term_buff *ltbs;
 841};
 842
 843struct ibmvnic_tx_buff {
 844	struct sk_buff *skb;
 845	int index;
 846	int pool_index;
 847	int num_entries;
 848};
 849
 850struct ibmvnic_tx_pool {
 851	struct ibmvnic_tx_buff *tx_buff;
 852	int *free_map;
 853	int consumer_index;
 854	int producer_index;
 855	struct ibmvnic_ltb_set ltb_set;
 856	int num_buffers;
 857	int buf_size;
 858} ____cacheline_aligned;
 859
 860struct ibmvnic_rx_buff {
 861	struct sk_buff *skb;
 862	dma_addr_t dma;
 863	unsigned char *data;
 864	int size;
 865	int pool_index;
 866};
 867
 868struct ibmvnic_rx_pool {
 869	struct ibmvnic_rx_buff *rx_buff;
 870	int size;			/* # of buffers in the pool */
 871	int index;
 872	int buff_size;
 873	atomic_t available;
 874	int *free_map;
 875	int next_free;
 876	int next_alloc;
 877	int active;
 878	struct ibmvnic_ltb_set ltb_set;
 879} ____cacheline_aligned;
 880
 881struct ibmvnic_vpd {
 882	unsigned char *buff;
 883	dma_addr_t dma_addr;
 884	u64 len;
 885};
 886
 887enum vnic_state {VNIC_PROBING = 1,
 888		 VNIC_PROBED,
 889		 VNIC_OPENING,
 890		 VNIC_OPEN,
 891		 VNIC_CLOSING,
 892		 VNIC_CLOSED,
 893		 VNIC_REMOVING,
 894		 VNIC_REMOVED,
 895		 VNIC_DOWN};
 896
 897enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1,
 898			   VNIC_RESET_MOBILITY,
 899			   VNIC_RESET_FATAL,
 900			   VNIC_RESET_NON_FATAL,
 901			   VNIC_RESET_TIMEOUT,
 902			   VNIC_RESET_CHANGE_PARAM,
 903			   VNIC_RESET_PASSIVE_INIT};
 904
 905struct ibmvnic_rwi {
 906	enum ibmvnic_reset_reason reset_reason;
 907	struct list_head list;
 908};
 909
 910struct ibmvnic_tunables {
 911	u64 rx_queues;
 912	u64 tx_queues;
 913	u64 rx_entries;
 914	u64 tx_entries;
 915	u64 mtu;
 916};
 917
 918struct ibmvnic_adapter {
 919	struct vio_dev *vdev;
 920	struct net_device *netdev;
 921	struct ibmvnic_crq_queue crq;
 922	u8 mac_addr[ETH_ALEN];
 923	struct ibmvnic_query_ip_offload_buffer ip_offload_buf;
 924	dma_addr_t ip_offload_tok;
 925	struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl;
 926	dma_addr_t ip_offload_ctrl_tok;
 927	u32 msg_enable;
 928
 929	/* Vital Product Data (VPD) */
 930	struct ibmvnic_vpd *vpd;
 931	char fw_version[32];
 932
 933	/* Statistics */
 934	struct ibmvnic_statistics stats;
 935	dma_addr_t stats_token;
 936	struct completion stats_done;
 937	int replenish_no_mem;
 938	int replenish_add_buff_success;
 939	int replenish_add_buff_failure;
 940	int replenish_task_cycles;
 941	int tx_send_failed;
 942	int tx_map_failed;
 943
 944	struct ibmvnic_tx_queue_stats *tx_stats_buffers;
 945	struct ibmvnic_rx_queue_stats *rx_stats_buffers;
 946
 947	int phys_link_state;
 948	int logical_link_state;
 949
 950	u32 speed;
 951	u8 duplex;
 952
 953	/* login data */
 954	struct ibmvnic_login_buffer *login_buf;
 955	dma_addr_t login_buf_token;
 956	int login_buf_sz;
 957
 958	struct ibmvnic_login_rsp_buffer *login_rsp_buf;
 959	dma_addr_t login_rsp_buf_token;
 960	int login_rsp_buf_sz;
 961
 962	atomic_t running_cap_crqs;
 963
 964	struct ibmvnic_sub_crq_queue **tx_scrq ____cacheline_aligned;
 965	struct ibmvnic_sub_crq_queue **rx_scrq ____cacheline_aligned;
 966
 967	/* rx structs */
 968	struct napi_struct *napi;
 969	struct ibmvnic_rx_pool *rx_pool;
 970	u64 promisc;
 971
 972	struct ibmvnic_tx_pool *tx_pool;
 973	struct ibmvnic_tx_pool *tso_pool;
 974	struct completion probe_done;
 975	struct completion init_done;
 976	int init_done_rc;
 977
 978	struct completion fw_done;
 979	/* Used for serialization of device commands */
 980	struct mutex fw_lock;
 981	int fw_done_rc;
 982
 983	struct completion reset_done;
 984	int reset_done_rc;
 985	bool wait_for_reset;
 986
 987	/* CPU hotplug instances for online & dead */
 988	struct hlist_node node;
 989	struct hlist_node node_dead;
 990
 991	/* partner capabilities */
 992	u64 min_tx_queues;
 993	u64 min_rx_queues;
 994	u64 min_rx_add_queues;
 995	u64 max_tx_queues;
 996	u64 max_rx_queues;
 997	u64 max_rx_add_queues;
 998	u64 req_tx_queues;
 999	u64 req_rx_queues;
1000	u64 req_rx_add_queues;
1001	u64 min_tx_entries_per_subcrq;
1002	u64 min_rx_add_entries_per_subcrq;
1003	u64 max_tx_entries_per_subcrq;
1004	u64 max_rx_add_entries_per_subcrq;
1005	u64 req_tx_entries_per_subcrq;
1006	u64 req_rx_add_entries_per_subcrq;
1007	u64 tcp_ip_offload;
1008	u64 promisc_requested;
1009	u64 promisc_supported;
1010	u64 min_mtu;
1011	u64 max_mtu;
1012	u64 req_mtu;
1013	u64 prev_mtu;
1014	u64 max_multicast_filters;
1015	u64 vlan_header_insertion;
1016	u64 rx_vlan_header_insertion;
1017	u64 max_tx_sg_entries;
1018	u64 rx_sg_supported;
1019	u64 rx_sg_requested;
1020	u64 opt_tx_comp_sub_queues;
1021	u64 opt_rx_comp_queues;
1022	u64 opt_rx_bufadd_q_per_rx_comp_q;
1023	u64 opt_tx_entries_per_subcrq;
1024	u64 opt_rxba_entries_per_subcrq;
1025	__be64 tx_rx_desc_req;
1026#define MAX_MAP_ID	255
1027	DECLARE_BITMAP(map_ids, MAX_MAP_ID);
1028	u32 num_active_rx_scrqs;
1029	u32 num_active_rx_pools;
1030	u32 num_active_rx_napi;
1031	u32 num_active_tx_scrqs;
1032	u32 num_active_tx_pools;
1033
1034	u32 prev_rx_pool_size;
1035	u32 prev_tx_pool_size;
1036	u32 cur_rx_buf_sz;
1037	u32 prev_rx_buf_sz;
1038
1039	struct tasklet_struct tasklet;
1040	enum vnic_state state;
1041	/* Used for serialization of state field. When taking both state
1042	 * and rwi locks, take state lock first.
1043	 */
1044	spinlock_t state_lock;
1045	enum ibmvnic_reset_reason reset_reason;
1046	struct list_head rwi_list;
1047	/* Used for serialization of rwi_list. When taking both state
1048	 * and rwi locks, take state lock first
1049	 */
1050	spinlock_t rwi_lock;
1051	struct work_struct ibmvnic_reset;
1052	struct delayed_work ibmvnic_delayed_reset;
1053	unsigned long resetting;
1054	/* last device reset time */
1055	unsigned long last_reset_time;
1056
1057	bool napi_enabled;
1058	bool from_passive_init;
1059	bool login_pending;
1060	/* protected by rcu */
1061	bool tx_queues_active;
1062	bool failover_pending;
1063	bool force_reset_recovery;
1064
1065	struct ibmvnic_tunables desired;
1066	struct ibmvnic_tunables fallback;
1067};
v6.2
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/**************************************************************************/
   3/*                                                                        */
   4/*  IBM System i and System p Virtual NIC Device Driver                   */
   5/*  Copyright (C) 2014 IBM Corp.                                          */
   6/*  Santiago Leon (santi_leon@yahoo.com)                                  */
   7/*  Thomas Falcon (tlfalcon@linux.vnet.ibm.com)                           */
   8/*  John Allen (jallen@linux.vnet.ibm.com)                                */
   9/*                                                                        */
  10/*                                                                        */
  11/* This module contains the implementation of a virtual ethernet device   */
  12/* for use with IBM i/pSeries LPAR Linux.  It utilizes the logical LAN    */
  13/* option of the RS/6000 Platform Architecture to interface with virtual */
  14/* ethernet NICs that are presented to the partition by the hypervisor.   */
  15/*                                                                        */
  16/**************************************************************************/
  17
  18#define IBMVNIC_NAME		"ibmvnic"
  19#define IBMVNIC_DRIVER_VERSION	"1.0.1"
  20#define IBMVNIC_INVALID_MAP	-1
  21#define IBMVNIC_OPEN_FAILED	3
  22
  23/* basic structures plus 100 2k buffers */
  24#define IBMVNIC_IO_ENTITLEMENT_DEFAULT	610305
  25
  26/* Initial module_parameters */
  27#define IBMVNIC_RX_WEIGHT		16
  28/* when changing this, update IBMVNIC_IO_ENTITLEMENT_DEFAULT */
  29#define IBMVNIC_BUFFS_PER_POOL	100
  30#define IBMVNIC_MAX_QUEUES	16
  31#define IBMVNIC_MAX_QUEUE_SZ   4096
  32#define IBMVNIC_MAX_IND_DESCS  16
  33#define IBMVNIC_IND_ARR_SZ	(IBMVNIC_MAX_IND_DESCS * 32)
  34
  35#define IBMVNIC_TSO_BUF_SZ	65536
  36#define IBMVNIC_TSO_BUFS	64
  37#define IBMVNIC_TSO_POOL_MASK	0x80000000
  38
  39/* A VNIC adapter has set of Rx and Tx pools (aka queues). Each Rx/Tx pool
  40 * has a set of buffers. The size of each buffer is determined by the MTU.
  41 *
  42 * Each Rx/Tx pool is also associated with a DMA region that is shared
  43 * with the "hardware" (VIOS) and used to send/receive packets. The DMA
  44 * region is also referred to as a Long Term Buffer or LTB.
  45 *
  46 * The size of the DMA region required for an Rx/Tx pool depends on the
  47 * number and size (MTU) of the buffers in the pool. At the max levels
  48 * of 4096 jumbo frames (MTU=9000) we will need about 9K*4K = 36MB plus
  49 * some padding.
  50 *
  51 * But the size of a single DMA region is limited by MAX_ORDER in the
  52 * kernel (about 16MB currently).  To support say 4K Jumbo frames, we
  53 * use a set of LTBs (struct ltb_set) per pool.
  54 *
  55 * IBMVNIC_ONE_LTB_MAX  - max size of each LTB supported by kernel
  56 * IBMVNIC_ONE_LTB_SIZE - current max size of each LTB in an ltb_set
  57 * (must be <= IBMVNIC_ONE_LTB_MAX)
  58 * IBMVNIC_LTB_SET_SIZE - current size of all LTBs in an ltb_set
  59 *
  60 * Each VNIC can have upto 16 Rx, 16 Tx and 16 TSO pools. The TSO pools
  61 * are of fixed length (IBMVNIC_TSO_BUF_SZ * IBMVNIC_TSO_BUFS) of 4MB.
  62 *
  63 * The Rx and Tx pools can have upto 4096 buffers. The max size of these
  64 * buffers is about 9588 (for jumbo frames, including IBMVNIC_BUFFER_HLEN).
  65 * So, setting the IBMVNIC_LTB_SET_SIZE for a pool to 4096 * 9588 ~= 38MB.
  66 *
  67 * There is a trade-off in setting IBMVNIC_ONE_LTB_SIZE. If it is large,
  68 * the allocation of the LTB can fail when system is low in memory. If
  69 * its too small, we would need several mappings for each of the Rx/
  70 * Tx/TSO pools but there is a limit of 255 mappings per vnic in the
  71 * VNIC protocol.
  72 *
  73 * So setting IBMVNIC_ONE_LTB_SIZE to 8MB. With IBMVNIC_LTB_SET_SIZE set
  74 * to 38MB, we will need 5 LTBs per Rx and Tx pool and 1 LTB per TSO
  75 * pool for the 4MB. Thus the 16 Rx and Tx queues require 32 * 5 = 160
  76 * plus 16 for the TSO pools for a total of 176 LTB mappings per VNIC.
  77 */
  78#define IBMVNIC_ONE_LTB_MAX	((u32)((1 << (MAX_ORDER - 1)) * PAGE_SIZE))
  79#define IBMVNIC_ONE_LTB_SIZE	min((u32)(8 << 20), IBMVNIC_ONE_LTB_MAX)
  80#define IBMVNIC_LTB_SET_SIZE	(38 << 20)
  81
  82#define IBMVNIC_BUFFER_HLEN		500
  83#define IBMVNIC_RESET_DELAY 100
  84
  85struct ibmvnic_login_buffer {
  86	__be32 len;
  87	__be32 version;
  88#define INITIAL_VERSION_LB 1
  89	__be32 num_txcomp_subcrqs;
  90	__be32 off_txcomp_subcrqs;
  91	__be32 num_rxcomp_subcrqs;
  92	__be32 off_rxcomp_subcrqs;
  93	__be32 login_rsp_ioba;
  94	__be32 login_rsp_len;
  95	__be32 client_data_offset;
  96	__be32 client_data_len;
  97} __packed __aligned(8);
  98
  99struct ibmvnic_login_rsp_buffer {
 100	__be32 len;
 101	__be32 version;
 102#define INITIAL_VERSION_LRB 1
 103	__be32 num_txsubm_subcrqs;
 104	__be32 off_txsubm_subcrqs;
 105	__be32 num_rxadd_subcrqs;
 106	__be32 off_rxadd_subcrqs;
 107	__be32 off_rxadd_buff_size;
 108	__be32 num_supp_tx_desc;
 109	__be32 off_supp_tx_desc;
 110} __packed __aligned(8);
 111
 112struct ibmvnic_query_ip_offload_buffer {
 113	__be32 len;
 114	__be32 version;
 115#define INITIAL_VERSION_IOB 1
 116	u8 ipv4_chksum;
 117	u8 ipv6_chksum;
 118	u8 tcp_ipv4_chksum;
 119	u8 tcp_ipv6_chksum;
 120	u8 udp_ipv4_chksum;
 121	u8 udp_ipv6_chksum;
 122	u8 large_tx_ipv4;
 123	u8 large_tx_ipv6;
 124	u8 large_rx_ipv4;
 125	u8 large_rx_ipv6;
 126	u8 reserved1[14];
 127	__be16 max_ipv4_header_size;
 128	__be16 max_ipv6_header_size;
 129	__be16 max_tcp_header_size;
 130	__be16 max_udp_header_size;
 131	__be32 max_large_tx_size;
 132	__be32 max_large_rx_size;
 133	u8 reserved2[16];
 134	u8 ipv6_extension_header;
 135#define IPV6_EH_NOT_SUPPORTED	0x00
 136#define IPV6_EH_SUPPORTED_LIM	0x01
 137#define IPV6_EH_SUPPORTED	0xFF
 138	u8 tcp_pseudosum_req;
 139#define TCP_PS_NOT_REQUIRED	0x00
 140#define TCP_PS_REQUIRED		0x01
 141	u8 reserved3[30];
 142	__be16 num_ipv6_ext_headers;
 143	__be32 off_ipv6_ext_headers;
 144	u8 reserved4[154];
 145} __packed __aligned(8);
 146
 147struct ibmvnic_control_ip_offload_buffer {
 148	__be32 len;
 149	__be32 version;
 150#define INITIAL_VERSION_IOB 1
 151	u8 ipv4_chksum;
 152	u8 ipv6_chksum;
 153	u8 tcp_ipv4_chksum;
 154	u8 tcp_ipv6_chksum;
 155	u8 udp_ipv4_chksum;
 156	u8 udp_ipv6_chksum;
 157	u8 large_tx_ipv4;
 158	u8 large_tx_ipv6;
 159	u8 bad_packet_rx;
 160	u8 large_rx_ipv4;
 161	u8 large_rx_ipv6;
 162	u8 reserved4[111];
 163} __packed __aligned(8);
 164
 165struct ibmvnic_fw_component {
 166	u8 name[48];
 167	__be32 trace_buff_size;
 168	u8 correlator;
 169	u8 trace_level;
 170	u8 parent_correlator;
 171	u8 error_check_level;
 172	u8 trace_on;
 173	u8 reserved[7];
 174	u8 description[192];
 175} __packed __aligned(8);
 176
 177struct ibmvnic_fw_trace_entry {
 178	__be32 trace_id;
 179	u8 num_valid_data;
 180	u8 reserved[3];
 181	__be64 pmc_registers;
 182	__be64 timebase;
 183	__be64 trace_data[5];
 184} __packed __aligned(8);
 185
 186struct ibmvnic_statistics {
 187	__be32 version;
 188	__be32 promiscuous;
 189	__be64 rx_packets;
 190	__be64 rx_bytes;
 191	__be64 tx_packets;
 192	__be64 tx_bytes;
 193	__be64 ucast_tx_packets;
 194	__be64 ucast_rx_packets;
 195	__be64 mcast_tx_packets;
 196	__be64 mcast_rx_packets;
 197	__be64 bcast_tx_packets;
 198	__be64 bcast_rx_packets;
 199	__be64 align_errors;
 200	__be64 fcs_errors;
 201	__be64 single_collision_frames;
 202	__be64 multi_collision_frames;
 203	__be64 sqe_test_errors;
 204	__be64 deferred_tx;
 205	__be64 late_collisions;
 206	__be64 excess_collisions;
 207	__be64 internal_mac_tx_errors;
 208	__be64 carrier_sense;
 209	__be64 too_long_frames;
 210	__be64 internal_mac_rx_errors;
 211	u8 reserved[72];
 212} __packed __aligned(8);
 213
 214#define NUM_TX_STATS 3
 215struct ibmvnic_tx_queue_stats {
 216	u64 packets;
 217	u64 bytes;
 218	u64 dropped_packets;
 219};
 220
 221#define NUM_RX_STATS 3
 222struct ibmvnic_rx_queue_stats {
 223	u64 packets;
 224	u64 bytes;
 225	u64 interrupts;
 226};
 227
 228struct ibmvnic_acl_buffer {
 229	__be32 len;
 230	__be32 version;
 231#define INITIAL_VERSION_IOB 1
 232	u8 mac_acls_restrict;
 233	u8 vlan_acls_restrict;
 234	u8 reserved1[22];
 235	__be32 num_mac_addrs;
 236	__be32 offset_mac_addrs;
 237	__be32 num_vlan_ids;
 238	__be32 offset_vlan_ids;
 239	u8 reserved2[80];
 240} __packed __aligned(8);
 241
 242/* descriptors have been changed, how should this be defined?  1? 4? */
 243
 244#define IBMVNIC_TX_DESC_VERSIONS 3
 245
 246/* is this still needed? */
 247struct ibmvnic_tx_comp_desc {
 248	u8 first;
 249	u8 num_comps;
 250	__be16 rcs[5];
 251	__be32 correlators[5];
 252} __packed __aligned(8);
 253
 254/* some flags that included in v0 descriptor, which is gone
 255 * only used for IBMVNIC_TCP_CHKSUM and IBMVNIC_UDP_CHKSUM
 256 * and only in some offload_flags variable that doesn't seem
 257 * to be used anywhere, can probably be removed?
 258 */
 259
 260#define IBMVNIC_TCP_CHKSUM		0x20
 261#define IBMVNIC_UDP_CHKSUM		0x08
 262
 263struct ibmvnic_tx_desc {
 264	u8 first;
 265	u8 type;
 266
 267#define IBMVNIC_TX_DESC 0x10
 268	u8 n_crq_elem;
 269	u8 n_sge;
 270	u8 flags1;
 271#define IBMVNIC_TX_COMP_NEEDED		0x80
 272#define IBMVNIC_TX_CHKSUM_OFFLOAD	0x40
 273#define IBMVNIC_TX_LSO			0x20
 274#define IBMVNIC_TX_PROT_TCP		0x10
 275#define IBMVNIC_TX_PROT_UDP		0x08
 276#define IBMVNIC_TX_PROT_IPV4		0x04
 277#define IBMVNIC_TX_PROT_IPV6		0x02
 278#define IBMVNIC_TX_VLAN_PRESENT		0x01
 279	u8 flags2;
 280#define IBMVNIC_TX_VLAN_INSERT		0x80
 281	__be16 mss;
 282	u8 reserved[4];
 283	__be32 correlator;
 284	__be16 vlan_id;
 285	__be16 dma_reg;
 286	__be32 sge_len;
 287	__be64 ioba;
 288} __packed __aligned(8);
 289
 290struct ibmvnic_hdr_desc {
 291	u8 first;
 292	u8 type;
 293#define IBMVNIC_HDR_DESC		0x11
 294	u8 len;
 295	u8 l2_len;
 296	__be16 l3_len;
 297	u8 l4_len;
 298	u8 flag;
 299	u8 data[24];
 300} __packed __aligned(8);
 301
 302struct ibmvnic_hdr_ext_desc {
 303	u8 first;
 304	u8 type;
 305#define IBMVNIC_HDR_EXT_DESC		0x12
 306	u8 len;
 307	u8 data[29];
 308} __packed __aligned(8);
 309
 310struct ibmvnic_sge_desc {
 311	u8 first;
 312	u8 type;
 313#define IBMVNIC_SGE_DESC		0x30
 314	__be16 sge1_dma_reg;
 315	__be32 sge1_len;
 316	__be64 sge1_ioba;
 317	__be16 reserved;
 318	__be16 sge2_dma_reg;
 319	__be32 sge2_len;
 320	__be64 sge2_ioba;
 321} __packed __aligned(8);
 322
 323struct ibmvnic_rx_comp_desc {
 324	u8 first;
 325	u8 flags;
 326#define IBMVNIC_IP_CHKSUM_GOOD		0x80
 327#define IBMVNIC_TCP_UDP_CHKSUM_GOOD	0x40
 328#define IBMVNIC_END_FRAME			0x20
 329#define IBMVNIC_EXACT_MC			0x10
 330#define IBMVNIC_VLAN_STRIPPED			0x08
 331	__be16 off_frame_data;
 332	__be32 len;
 333	__be64 correlator;
 334	__be16 vlan_tci;
 335	__be16 rc;
 336	u8 reserved[12];
 337} __packed __aligned(8);
 338
 339struct ibmvnic_generic_scrq {
 340	u8 first;
 341	u8 reserved[31];
 342} __packed __aligned(8);
 343
 344struct ibmvnic_rx_buff_add_desc {
 345	u8 first;
 346	u8 reserved[7];
 347	__be64 correlator;
 348	__be32 ioba;
 349	u8 map_id;
 350	__be32 len:24;
 351	u8 reserved2[8];
 352} __packed __aligned(8);
 353
 354struct ibmvnic_rc {
 355	u8 code; /* one of enum ibmvnic_rc_codes */
 356	u8 detailed_data[3];
 357} __packed __aligned(4);
 358
 359struct ibmvnic_generic_crq {
 360	u8 first;
 361	u8 cmd;
 362	u8 params[10];
 363	struct ibmvnic_rc rc;
 364} __packed __aligned(8);
 365
 366struct ibmvnic_version_exchange {
 367	u8 first;
 368	u8 cmd;
 369	__be16 version;
 370#define IBMVNIC_INITIAL_VERSION 1
 371	u8 reserved[8];
 372	struct ibmvnic_rc rc;
 373} __packed __aligned(8);
 374
 375struct ibmvnic_capability {
 376	u8 first;
 377	u8 cmd;
 378	__be16 capability; /* one of ibmvnic_capabilities */
 379	__be64 number;
 380	struct ibmvnic_rc rc;
 381} __packed __aligned(8);
 382
 383struct ibmvnic_login {
 384	u8 first;
 385	u8 cmd;
 386	u8 reserved[6];
 387	__be32 ioba;
 388	__be32 len;
 389} __packed __aligned(8);
 390
 391struct ibmvnic_phys_parms {
 392	u8 first;
 393	u8 cmd;
 394	u8 flags1;
 395#define IBMVNIC_EXTERNAL_LOOPBACK	0x80
 396#define IBMVNIC_INTERNAL_LOOPBACK	0x40
 397#define IBMVNIC_PROMISC		0x20
 398#define IBMVNIC_PHYS_LINK_ACTIVE	0x10
 399#define IBMVNIC_AUTONEG_DUPLEX	0x08
 400#define IBMVNIC_FULL_DUPLEX	0x04
 401#define IBMVNIC_HALF_DUPLEX	0x02
 402#define IBMVNIC_CAN_CHG_PHYS_PARMS	0x01
 403	u8 flags2;
 404#define IBMVNIC_LOGICAL_LNK_ACTIVE 0x80
 405	__be32 speed;
 406#define IBMVNIC_AUTONEG		0x80000000
 407#define IBMVNIC_10MBPS		0x40000000
 408#define IBMVNIC_100MBPS		0x20000000
 409#define IBMVNIC_1GBPS		0x10000000
 410#define IBMVNIC_10GBPS		0x08000000
 411#define IBMVNIC_40GBPS		0x04000000
 412#define IBMVNIC_100GBPS		0x02000000
 413#define IBMVNIC_25GBPS		0x01000000
 414#define IBMVNIC_50GBPS		0x00800000
 415#define IBMVNIC_200GBPS		0x00400000
 416	__be32 mtu;
 417	struct ibmvnic_rc rc;
 418} __packed __aligned(8);
 419
 420struct ibmvnic_logical_link_state {
 421	u8 first;
 422	u8 cmd;
 423	u8 link_state;
 424#define IBMVNIC_LOGICAL_LNK_DN 0x00
 425#define IBMVNIC_LOGICAL_LNK_UP 0x01
 426#define IBMVNIC_LOGICAL_LNK_QUERY 0xff
 427	u8 reserved[9];
 428	struct ibmvnic_rc rc;
 429} __packed __aligned(8);
 430
 431struct ibmvnic_query_ip_offload {
 432	u8 first;
 433	u8 cmd;
 434	u8 reserved[2];
 435	__be32 len;
 436	__be32 ioba;
 437	struct ibmvnic_rc rc;
 438} __packed __aligned(8);
 439
 440struct ibmvnic_control_ip_offload {
 441	u8 first;
 442	u8 cmd;
 443	u8 reserved[2];
 444	__be32 ioba;
 445	__be32 len;
 446	struct ibmvnic_rc rc;
 447} __packed __aligned(8);
 448
 449struct ibmvnic_request_statistics {
 450	u8 first;
 451	u8 cmd;
 452	u8 flags;
 453#define IBMVNIC_PHYSICAL_PORT	0x80
 454	u8 reserved1;
 455	__be32 ioba;
 456	__be32 len;
 457	u8 reserved[4];
 458} __packed __aligned(8);
 459
 460struct ibmvnic_error_indication {
 461	u8 first;
 462	u8 cmd;
 463	u8 flags;
 464#define IBMVNIC_FATAL_ERROR	0x80
 465	u8 reserved1;
 466	__be32 error_id;
 467	__be32 detail_error_sz;
 468	__be16 error_cause;
 469	u8 reserved2[2];
 470} __packed __aligned(8);
 471
 472struct ibmvnic_link_state_indication {
 473	u8 first;
 474	u8 cmd;
 475	u8 reserved1[2];
 476	u8 phys_link_state;
 477	u8 logical_link_state;
 478	u8 reserved2[10];
 479} __packed __aligned(8);
 480
 481struct ibmvnic_change_mac_addr {
 482	u8 first;
 483	u8 cmd;
 484	u8 mac_addr[6];
 485	u8 reserved[4];
 486	struct ibmvnic_rc rc;
 487} __packed __aligned(8);
 488
 489struct ibmvnic_multicast_ctrl {
 490	u8 first;
 491	u8 cmd;
 492	u8 mac_addr[6];
 493	u8 flags;
 494#define IBMVNIC_ENABLE_MC		0x80
 495#define IBMVNIC_DISABLE_MC		0x40
 496#define IBMVNIC_ENABLE_ALL		0x20
 497#define IBMVNIC_DISABLE_ALL	0x10
 498	u8 reserved1;
 499	__be16 reserved2; /* was num_enabled_mc_addr; */
 500	struct ibmvnic_rc rc;
 501} __packed __aligned(8);
 502
 503struct ibmvnic_get_vpd_size {
 504	u8 first;
 505	u8 cmd;
 506	u8 reserved[14];
 507} __packed __aligned(8);
 508
 509struct ibmvnic_get_vpd_size_rsp {
 510	u8 first;
 511	u8 cmd;
 512	u8 reserved[2];
 513	__be64 len;
 514	struct ibmvnic_rc rc;
 515} __packed __aligned(8);
 516
 517struct ibmvnic_get_vpd {
 518	u8 first;
 519	u8 cmd;
 520	u8 reserved1[2];
 521	__be32 ioba;
 522	__be32 len;
 523	u8 reserved[4];
 524} __packed __aligned(8);
 525
 526struct ibmvnic_get_vpd_rsp {
 527	u8 first;
 528	u8 cmd;
 529	u8 reserved[10];
 530	struct ibmvnic_rc rc;
 531} __packed __aligned(8);
 532
 533struct ibmvnic_acl_change_indication {
 534	u8 first;
 535	u8 cmd;
 536	__be16 change_type;
 537#define IBMVNIC_MAC_ACL 0
 538#define IBMVNIC_VLAN_ACL 1
 539	u8 reserved[12];
 540} __packed __aligned(8);
 541
 542struct ibmvnic_acl_query {
 543	u8 first;
 544	u8 cmd;
 545	u8 reserved1[2];
 546	__be32 ioba;
 547	__be32 len;
 548	u8 reserved2[4];
 549} __packed __aligned(8);
 550
 551struct ibmvnic_tune {
 552	u8 first;
 553	u8 cmd;
 554	u8 reserved1[2];
 555	__be32 ioba;
 556	__be32 len;
 557	u8 reserved2[4];
 558} __packed __aligned(8);
 559
 560struct ibmvnic_request_map {
 561	u8 first;
 562	u8 cmd;
 563	u8 reserved1;
 564	u8 map_id;
 565	__be32 ioba;
 566	__be32 len;
 567	u8 reserved2[4];
 568} __packed __aligned(8);
 569
 570struct ibmvnic_request_map_rsp {
 571	u8 first;
 572	u8 cmd;
 573	u8 reserved1;
 574	u8 map_id;
 575	u8 reserved2[8];
 576	struct ibmvnic_rc rc;
 577} __packed __aligned(8);
 578
 579struct ibmvnic_request_unmap {
 580	u8 first;
 581	u8 cmd;
 582	u8 reserved1;
 583	u8 map_id;
 584	u8 reserved2[12];
 585} __packed __aligned(8);
 586
 587struct ibmvnic_request_unmap_rsp {
 588	u8 first;
 589	u8 cmd;
 590	u8 reserved1;
 591	u8 map_id;
 592	u8 reserved2[8];
 593	struct ibmvnic_rc rc;
 594} __packed __aligned(8);
 595
 596struct ibmvnic_query_map {
 597	u8 first;
 598	u8 cmd;
 599	u8 reserved[14];
 600} __packed __aligned(8);
 601
 602struct ibmvnic_query_map_rsp {
 603	u8 first;
 604	u8 cmd;
 605	u8 reserved;
 606	u8 page_size;
 607	__be32 tot_pages;
 608	__be32 free_pages;
 609	struct ibmvnic_rc rc;
 610} __packed __aligned(8);
 611
 612union ibmvnic_crq {
 613	struct ibmvnic_generic_crq generic;
 614	struct ibmvnic_version_exchange version_exchange;
 615	struct ibmvnic_version_exchange version_exchange_rsp;
 616	struct ibmvnic_capability query_capability;
 617	struct ibmvnic_capability query_capability_rsp;
 618	struct ibmvnic_capability request_capability;
 619	struct ibmvnic_capability request_capability_rsp;
 620	struct ibmvnic_login login;
 621	struct ibmvnic_generic_crq login_rsp;
 622	struct ibmvnic_phys_parms query_phys_parms;
 623	struct ibmvnic_phys_parms query_phys_parms_rsp;
 624	struct ibmvnic_phys_parms query_phys_capabilities;
 625	struct ibmvnic_phys_parms query_phys_capabilities_rsp;
 626	struct ibmvnic_phys_parms set_phys_parms;
 627	struct ibmvnic_phys_parms set_phys_parms_rsp;
 628	struct ibmvnic_logical_link_state logical_link_state;
 629	struct ibmvnic_logical_link_state logical_link_state_rsp;
 630	struct ibmvnic_query_ip_offload query_ip_offload;
 631	struct ibmvnic_query_ip_offload query_ip_offload_rsp;
 632	struct ibmvnic_control_ip_offload control_ip_offload;
 633	struct ibmvnic_control_ip_offload control_ip_offload_rsp;
 634	struct ibmvnic_request_statistics request_statistics;
 635	struct ibmvnic_generic_crq request_statistics_rsp;
 636	struct ibmvnic_error_indication error_indication;
 637	struct ibmvnic_link_state_indication link_state_indication;
 638	struct ibmvnic_change_mac_addr change_mac_addr;
 639	struct ibmvnic_change_mac_addr change_mac_addr_rsp;
 640	struct ibmvnic_multicast_ctrl multicast_ctrl;
 641	struct ibmvnic_multicast_ctrl multicast_ctrl_rsp;
 642	struct ibmvnic_get_vpd_size get_vpd_size;
 643	struct ibmvnic_get_vpd_size_rsp get_vpd_size_rsp;
 644	struct ibmvnic_get_vpd get_vpd;
 645	struct ibmvnic_get_vpd_rsp get_vpd_rsp;
 646	struct ibmvnic_acl_change_indication acl_change_indication;
 647	struct ibmvnic_acl_query acl_query;
 648	struct ibmvnic_generic_crq acl_query_rsp;
 649	struct ibmvnic_tune tune;
 650	struct ibmvnic_generic_crq tune_rsp;
 651	struct ibmvnic_request_map request_map;
 652	struct ibmvnic_request_map_rsp request_map_rsp;
 653	struct ibmvnic_request_unmap request_unmap;
 654	struct ibmvnic_request_unmap_rsp request_unmap_rsp;
 655	struct ibmvnic_query_map query_map;
 656	struct ibmvnic_query_map_rsp query_map_rsp;
 657};
 658
 659enum ibmvnic_rc_codes {
 660	SUCCESS = 0,
 661	PARTIALSUCCESS = 1,
 662	PERMISSION = 2,
 663	NOMEMORY = 3,
 664	PARAMETER = 4,
 665	UNKNOWNCOMMAND = 5,
 666	ABORTED = 6,
 667	INVALIDSTATE = 7,
 668	INVALIDIOBA = 8,
 669	INVALIDLENGTH = 9,
 670	UNSUPPORTEDOPTION = 10,
 671};
 672
 673enum ibmvnic_capabilities {
 674	MIN_TX_QUEUES = 1,
 675	MIN_RX_QUEUES = 2,
 676	MIN_RX_ADD_QUEUES = 3,
 677	MAX_TX_QUEUES = 4,
 678	MAX_RX_QUEUES = 5,
 679	MAX_RX_ADD_QUEUES = 6,
 680	REQ_TX_QUEUES = 7,
 681	REQ_RX_QUEUES = 8,
 682	REQ_RX_ADD_QUEUES = 9,
 683	MIN_TX_ENTRIES_PER_SUBCRQ = 10,
 684	MIN_RX_ADD_ENTRIES_PER_SUBCRQ = 11,
 685	MAX_TX_ENTRIES_PER_SUBCRQ = 12,
 686	MAX_RX_ADD_ENTRIES_PER_SUBCRQ = 13,
 687	REQ_TX_ENTRIES_PER_SUBCRQ = 14,
 688	REQ_RX_ADD_ENTRIES_PER_SUBCRQ = 15,
 689	TCP_IP_OFFLOAD = 16,
 690	PROMISC_REQUESTED = 17,
 691	PROMISC_SUPPORTED = 18,
 692	MIN_MTU = 19,
 693	MAX_MTU = 20,
 694	REQ_MTU = 21,
 695	MAX_MULTICAST_FILTERS = 22,
 696	VLAN_HEADER_INSERTION = 23,
 697	RX_VLAN_HEADER_INSERTION = 24,
 698	MAX_TX_SG_ENTRIES = 25,
 699	RX_SG_SUPPORTED = 26,
 700	RX_SG_REQUESTED = 27,
 701	OPT_TX_COMP_SUB_QUEUES = 28,
 702	OPT_RX_COMP_QUEUES = 29,
 703	OPT_RX_BUFADD_Q_PER_RX_COMP_Q = 30,
 704	OPT_TX_ENTRIES_PER_SUBCRQ = 31,
 705	OPT_RXBA_ENTRIES_PER_SUBCRQ = 32,
 706	TX_RX_DESC_REQ = 33,
 707};
 708
 709enum ibmvnic_error_cause {
 710	ADAPTER_PROBLEM = 0,
 711	BUS_PROBLEM = 1,
 712	FW_PROBLEM = 2,
 713	DD_PROBLEM = 3,
 714	EEH_RECOVERY = 4,
 715	FW_UPDATED = 5,
 716	LOW_MEMORY = 6,
 717};
 718
 719enum ibmvnic_commands {
 720	VERSION_EXCHANGE = 0x01,
 721	VERSION_EXCHANGE_RSP = 0x81,
 722	QUERY_CAPABILITY = 0x02,
 723	QUERY_CAPABILITY_RSP = 0x82,
 724	REQUEST_CAPABILITY = 0x03,
 725	REQUEST_CAPABILITY_RSP = 0x83,
 726	LOGIN = 0x04,
 727	LOGIN_RSP = 0x84,
 728	QUERY_PHYS_PARMS = 0x05,
 729	QUERY_PHYS_PARMS_RSP = 0x85,
 730	QUERY_PHYS_CAPABILITIES = 0x06,
 731	QUERY_PHYS_CAPABILITIES_RSP = 0x86,
 732	SET_PHYS_PARMS = 0x07,
 733	SET_PHYS_PARMS_RSP = 0x87,
 734	ERROR_INDICATION = 0x08,
 735	LOGICAL_LINK_STATE = 0x0C,
 736	LOGICAL_LINK_STATE_RSP = 0x8C,
 737	REQUEST_STATISTICS = 0x0D,
 738	REQUEST_STATISTICS_RSP = 0x8D,
 739	COLLECT_FW_TRACE = 0x11,
 740	COLLECT_FW_TRACE_RSP = 0x91,
 741	LINK_STATE_INDICATION = 0x12,
 742	CHANGE_MAC_ADDR = 0x13,
 743	CHANGE_MAC_ADDR_RSP = 0x93,
 744	MULTICAST_CTRL = 0x14,
 745	MULTICAST_CTRL_RSP = 0x94,
 746	GET_VPD_SIZE = 0x15,
 747	GET_VPD_SIZE_RSP = 0x95,
 748	GET_VPD = 0x16,
 749	GET_VPD_RSP = 0x96,
 750	TUNE = 0x17,
 751	TUNE_RSP = 0x97,
 752	QUERY_IP_OFFLOAD = 0x18,
 753	QUERY_IP_OFFLOAD_RSP = 0x98,
 754	CONTROL_IP_OFFLOAD = 0x19,
 755	CONTROL_IP_OFFLOAD_RSP = 0x99,
 756	ACL_CHANGE_INDICATION = 0x1A,
 757	ACL_QUERY = 0x1B,
 758	ACL_QUERY_RSP = 0x9B,
 759	QUERY_MAP = 0x1D,
 760	QUERY_MAP_RSP = 0x9D,
 761	REQUEST_MAP = 0x1E,
 762	REQUEST_MAP_RSP = 0x9E,
 763	REQUEST_UNMAP = 0x1F,
 764	REQUEST_UNMAP_RSP = 0x9F,
 765	VLAN_CTRL = 0x20,
 766	VLAN_CTRL_RSP = 0xA0,
 767};
 768
 769enum ibmvnic_crq_type {
 770	IBMVNIC_CRQ_CMD			= 0x80,
 771	IBMVNIC_CRQ_CMD_RSP		= 0x80,
 772	IBMVNIC_CRQ_INIT_CMD		= 0xC0,
 773	IBMVNIC_CRQ_INIT_RSP		= 0xC0,
 774	IBMVNIC_CRQ_XPORT_EVENT		= 0xFF,
 775};
 776
 777enum ibmvfc_crq_format {
 778	IBMVNIC_CRQ_INIT                 = 0x01,
 779	IBMVNIC_CRQ_INIT_COMPLETE        = 0x02,
 780	IBMVNIC_PARTITION_MIGRATED       = 0x06,
 781	IBMVNIC_DEVICE_FAILOVER          = 0x08,
 782};
 783
 784struct ibmvnic_crq_queue {
 785	union ibmvnic_crq *msgs;
 786	int size, cur;
 787	dma_addr_t msg_token;
 788	/* Used for serialization of msgs, cur */
 789	spinlock_t lock;
 790	bool active;
 791	char name[32];
 792};
 793
 794union sub_crq {
 795	struct ibmvnic_generic_scrq generic;
 796	struct ibmvnic_tx_comp_desc tx_comp;
 797	struct ibmvnic_tx_desc v1;
 798	struct ibmvnic_hdr_desc hdr;
 799	struct ibmvnic_hdr_ext_desc hdr_ext;
 800	struct ibmvnic_sge_desc sge;
 801	struct ibmvnic_rx_comp_desc rx_comp;
 802	struct ibmvnic_rx_buff_add_desc rx_add;
 803};
 804
 805struct ibmvnic_ind_xmit_queue {
 806	union sub_crq *indir_arr;
 807	dma_addr_t indir_dma;
 808	int index;
 809};
 810
 811struct ibmvnic_sub_crq_queue {
 812	union sub_crq *msgs;
 813	int size, cur;
 814	dma_addr_t msg_token;
 815	unsigned long crq_num;
 816	unsigned long hw_irq;
 817	unsigned int irq;
 818	unsigned int pool_index;
 819	int scrq_num;
 820	/* Used for serialization of msgs, cur */
 821	spinlock_t lock;
 822	struct sk_buff *rx_skb_top;
 823	struct ibmvnic_adapter *adapter;
 824	struct ibmvnic_ind_xmit_queue ind_buf;
 825	atomic_t used;
 826	char name[32];
 827	u64 handle;
 828	cpumask_var_t affinity_mask;
 829} ____cacheline_aligned;
 830
 831struct ibmvnic_long_term_buff {
 832	unsigned char *buff;
 833	dma_addr_t addr;
 834	u64 size;
 835	u8 map_id;
 836};
 837
 838struct ibmvnic_ltb_set {
 839	int num_ltbs;
 840	struct ibmvnic_long_term_buff *ltbs;
 841};
 842
 843struct ibmvnic_tx_buff {
 844	struct sk_buff *skb;
 845	int index;
 846	int pool_index;
 847	int num_entries;
 848};
 849
 850struct ibmvnic_tx_pool {
 851	struct ibmvnic_tx_buff *tx_buff;
 852	int *free_map;
 853	int consumer_index;
 854	int producer_index;
 855	struct ibmvnic_ltb_set ltb_set;
 856	int num_buffers;
 857	int buf_size;
 858} ____cacheline_aligned;
 859
 860struct ibmvnic_rx_buff {
 861	struct sk_buff *skb;
 862	dma_addr_t dma;
 863	unsigned char *data;
 864	int size;
 865	int pool_index;
 866};
 867
 868struct ibmvnic_rx_pool {
 869	struct ibmvnic_rx_buff *rx_buff;
 870	int size;			/* # of buffers in the pool */
 871	int index;
 872	int buff_size;
 873	atomic_t available;
 874	int *free_map;
 875	int next_free;
 876	int next_alloc;
 877	int active;
 878	struct ibmvnic_ltb_set ltb_set;
 879} ____cacheline_aligned;
 880
 881struct ibmvnic_vpd {
 882	unsigned char *buff;
 883	dma_addr_t dma_addr;
 884	u64 len;
 885};
 886
 887enum vnic_state {VNIC_PROBING = 1,
 888		 VNIC_PROBED,
 889		 VNIC_OPENING,
 890		 VNIC_OPEN,
 891		 VNIC_CLOSING,
 892		 VNIC_CLOSED,
 893		 VNIC_REMOVING,
 894		 VNIC_REMOVED,
 895		 VNIC_DOWN};
 896
 897enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1,
 898			   VNIC_RESET_MOBILITY,
 899			   VNIC_RESET_FATAL,
 900			   VNIC_RESET_NON_FATAL,
 901			   VNIC_RESET_TIMEOUT,
 902			   VNIC_RESET_CHANGE_PARAM,
 903			   VNIC_RESET_PASSIVE_INIT};
 904
 905struct ibmvnic_rwi {
 906	enum ibmvnic_reset_reason reset_reason;
 907	struct list_head list;
 908};
 909
 910struct ibmvnic_tunables {
 911	u64 rx_queues;
 912	u64 tx_queues;
 913	u64 rx_entries;
 914	u64 tx_entries;
 915	u64 mtu;
 916};
 917
 918struct ibmvnic_adapter {
 919	struct vio_dev *vdev;
 920	struct net_device *netdev;
 921	struct ibmvnic_crq_queue crq;
 922	u8 mac_addr[ETH_ALEN];
 923	struct ibmvnic_query_ip_offload_buffer ip_offload_buf;
 924	dma_addr_t ip_offload_tok;
 925	struct ibmvnic_control_ip_offload_buffer ip_offload_ctrl;
 926	dma_addr_t ip_offload_ctrl_tok;
 927	u32 msg_enable;
 928
 929	/* Vital Product Data (VPD) */
 930	struct ibmvnic_vpd *vpd;
 931	char fw_version[32];
 932
 933	/* Statistics */
 934	struct ibmvnic_statistics stats;
 935	dma_addr_t stats_token;
 936	struct completion stats_done;
 937	int replenish_no_mem;
 938	int replenish_add_buff_success;
 939	int replenish_add_buff_failure;
 940	int replenish_task_cycles;
 941	int tx_send_failed;
 942	int tx_map_failed;
 943
 944	struct ibmvnic_tx_queue_stats *tx_stats_buffers;
 945	struct ibmvnic_rx_queue_stats *rx_stats_buffers;
 946
 947	int phys_link_state;
 948	int logical_link_state;
 949
 950	u32 speed;
 951	u8 duplex;
 952
 953	/* login data */
 954	struct ibmvnic_login_buffer *login_buf;
 955	dma_addr_t login_buf_token;
 956	int login_buf_sz;
 957
 958	struct ibmvnic_login_rsp_buffer *login_rsp_buf;
 959	dma_addr_t login_rsp_buf_token;
 960	int login_rsp_buf_sz;
 961
 962	atomic_t running_cap_crqs;
 963
 964	struct ibmvnic_sub_crq_queue **tx_scrq ____cacheline_aligned;
 965	struct ibmvnic_sub_crq_queue **rx_scrq ____cacheline_aligned;
 966
 967	/* rx structs */
 968	struct napi_struct *napi;
 969	struct ibmvnic_rx_pool *rx_pool;
 970	u64 promisc;
 971
 972	struct ibmvnic_tx_pool *tx_pool;
 973	struct ibmvnic_tx_pool *tso_pool;
 974	struct completion probe_done;
 975	struct completion init_done;
 976	int init_done_rc;
 977
 978	struct completion fw_done;
 979	/* Used for serialization of device commands */
 980	struct mutex fw_lock;
 981	int fw_done_rc;
 982
 983	struct completion reset_done;
 984	int reset_done_rc;
 985	bool wait_for_reset;
 986
 987	/* CPU hotplug instances for online & dead */
 988	struct hlist_node node;
 989	struct hlist_node node_dead;
 990
 991	/* partner capabilities */
 992	u64 min_tx_queues;
 993	u64 min_rx_queues;
 994	u64 min_rx_add_queues;
 995	u64 max_tx_queues;
 996	u64 max_rx_queues;
 997	u64 max_rx_add_queues;
 998	u64 req_tx_queues;
 999	u64 req_rx_queues;
1000	u64 req_rx_add_queues;
1001	u64 min_tx_entries_per_subcrq;
1002	u64 min_rx_add_entries_per_subcrq;
1003	u64 max_tx_entries_per_subcrq;
1004	u64 max_rx_add_entries_per_subcrq;
1005	u64 req_tx_entries_per_subcrq;
1006	u64 req_rx_add_entries_per_subcrq;
1007	u64 tcp_ip_offload;
1008	u64 promisc_requested;
1009	u64 promisc_supported;
1010	u64 min_mtu;
1011	u64 max_mtu;
1012	u64 req_mtu;
1013	u64 prev_mtu;
1014	u64 max_multicast_filters;
1015	u64 vlan_header_insertion;
1016	u64 rx_vlan_header_insertion;
1017	u64 max_tx_sg_entries;
1018	u64 rx_sg_supported;
1019	u64 rx_sg_requested;
1020	u64 opt_tx_comp_sub_queues;
1021	u64 opt_rx_comp_queues;
1022	u64 opt_rx_bufadd_q_per_rx_comp_q;
1023	u64 opt_tx_entries_per_subcrq;
1024	u64 opt_rxba_entries_per_subcrq;
1025	__be64 tx_rx_desc_req;
1026#define MAX_MAP_ID	255
1027	DECLARE_BITMAP(map_ids, MAX_MAP_ID);
1028	u32 num_active_rx_scrqs;
1029	u32 num_active_rx_pools;
1030	u32 num_active_rx_napi;
1031	u32 num_active_tx_scrqs;
1032	u32 num_active_tx_pools;
1033
1034	u32 prev_rx_pool_size;
1035	u32 prev_tx_pool_size;
1036	u32 cur_rx_buf_sz;
1037	u32 prev_rx_buf_sz;
1038
1039	struct tasklet_struct tasklet;
1040	enum vnic_state state;
1041	/* Used for serialization of state field. When taking both state
1042	 * and rwi locks, take state lock first.
1043	 */
1044	spinlock_t state_lock;
1045	enum ibmvnic_reset_reason reset_reason;
1046	struct list_head rwi_list;
1047	/* Used for serialization of rwi_list. When taking both state
1048	 * and rwi locks, take state lock first
1049	 */
1050	spinlock_t rwi_lock;
1051	struct work_struct ibmvnic_reset;
1052	struct delayed_work ibmvnic_delayed_reset;
1053	unsigned long resetting;
1054	/* last device reset time */
1055	unsigned long last_reset_time;
1056
1057	bool napi_enabled;
1058	bool from_passive_init;
1059	bool login_pending;
1060	/* protected by rcu */
1061	bool tx_queues_active;
1062	bool failover_pending;
1063	bool force_reset_recovery;
1064
1065	struct ibmvnic_tunables desired;
1066	struct ibmvnic_tunables fallback;
1067};