Linux Audio

Check our new training course

Loading...
v6.13.7
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 *    Copyright IBM Corp. 2007
   4 *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
   5 *		 Frank Pavlic <fpavlic@de.ibm.com>,
   6 *		 Thomas Spatzier <tspat@de.ibm.com>,
   7 *		 Frank Blaschka <frank.blaschka@de.ibm.com>
   8 */
   9
  10#ifndef __QETH_CORE_H__
  11#define __QETH_CORE_H__
  12
  13#include <linux/completion.h>
  14#include <linux/debugfs.h>
  15#include <linux/if.h>
  16#include <linux/if_arp.h>
  17#include <linux/etherdevice.h>
  18#include <linux/if_vlan.h>
  19#include <linux/ctype.h>
  20#include <linux/in6.h>
  21#include <linux/bitops.h>
  22#include <linux/seq_file.h>
 
  23#include <linux/hashtable.h>
  24#include <linux/ip.h>
  25#include <linux/rcupdate.h>
  26#include <linux/refcount.h>
  27#include <linux/timer.h>
  28#include <linux/types.h>
  29#include <linux/wait.h>
  30#include <linux/workqueue.h>
  31
  32#include <net/dst.h>
  33#include <net/ip6_fib.h>
  34#include <net/ipv6.h>
  35#include <net/if_inet6.h>
  36#include <net/addrconf.h>
  37#include <net/route.h>
  38#include <net/sch_generic.h>
  39#include <net/tcp.h>
  40
  41#include <asm/debug.h>
  42#include <asm/qdio.h>
  43#include <asm/ccwdev.h>
  44#include <asm/ccwgroup.h>
  45#include <asm/sysinfo.h>
  46
  47#include <uapi/linux/if_link.h>
  48
  49#include "qeth_core_mpc.h"
  50
  51/**
  52 * Debug Facility stuff
  53 */
  54enum qeth_dbf_names {
  55	QETH_DBF_SETUP,
  56	QETH_DBF_MSG,
  57	QETH_DBF_CTRL,
  58	QETH_DBF_INFOS	/* must be last element */
  59};
  60
  61struct qeth_dbf_info {
  62	char name[DEBUG_MAX_NAME_LEN];
  63	int pages;
  64	int areas;
  65	int len;
  66	int level;
  67	struct debug_view *view;
  68	debug_info_t *id;
  69};
  70
  71#define QETH_DBF_CTRL_LEN 256U
  72
  73#define QETH_DBF_TEXT(name, level, text) \
  74	debug_text_event(qeth_dbf[QETH_DBF_##name].id, level, text)
  75
  76#define QETH_DBF_HEX(name, level, addr, len) \
  77	debug_event(qeth_dbf[QETH_DBF_##name].id, level, (void *)(addr), len)
  78
  79#define QETH_DBF_MESSAGE(level, text...) \
  80	debug_sprintf_event(qeth_dbf[QETH_DBF_MSG].id, level, text)
  81
  82#define QETH_DBF_TEXT_(name, level, text...) \
  83	qeth_dbf_longtext(qeth_dbf[QETH_DBF_##name].id, level, text)
  84
  85#define QETH_CARD_TEXT(card, level, text) \
  86	debug_text_event(card->debug, level, text)
  87
  88#define QETH_CARD_HEX(card, level, addr, len) \
  89	debug_event(card->debug, level, (void *)(addr), len)
  90
  91#define QETH_CARD_MESSAGE(card, text...) \
  92	debug_sprintf_event(card->debug, level, text)
  93
  94#define QETH_CARD_TEXT_(card, level, text...) \
  95	qeth_dbf_longtext(card->debug, level, text)
  96
  97#define SENSE_COMMAND_REJECT_BYTE 0
  98#define SENSE_COMMAND_REJECT_FLAG 0x80
  99#define SENSE_RESETTING_EVENT_BYTE 1
 100#define SENSE_RESETTING_EVENT_FLAG 0x80
 101
 102static inline u32 qeth_get_device_id(struct ccw_device *cdev)
 103{
 104	struct ccw_dev_id dev_id;
 105	u32 id;
 106
 107	ccw_device_get_id(cdev, &dev_id);
 108	id = dev_id.devno;
 109	id |= (u32) (dev_id.ssid << 16);
 110
 111	return id;
 112}
 113
 114/*
 115 * Common IO related definitions
 116 */
 117#define CARD_RDEV(card) card->read.ccwdev
 118#define CARD_WDEV(card) card->write.ccwdev
 119#define CARD_DDEV(card) card->data.ccwdev
 120#define CARD_BUS_ID(card) dev_name(&card->gdev->dev)
 121#define CARD_RDEV_ID(card) dev_name(&card->read.ccwdev->dev)
 122#define CARD_WDEV_ID(card) dev_name(&card->write.ccwdev->dev)
 123#define CARD_DDEV_ID(card) dev_name(&card->data.ccwdev->dev)
 124#define CCW_DEVID(cdev)		(qeth_get_device_id(cdev))
 125#define CARD_DEVID(card)	(CCW_DEVID(CARD_RDEV(card)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 126
 127/* Routing stuff */
 128struct qeth_routing_info {
 129	enum qeth_routing_types type;
 130};
 131
 
 
 
 
 
 
 132/* SETBRIDGEPORT stuff */
 133enum qeth_sbp_roles {
 134	QETH_SBP_ROLE_NONE	= 0,
 135	QETH_SBP_ROLE_PRIMARY	= 1,
 136	QETH_SBP_ROLE_SECONDARY	= 2,
 137};
 138
 139enum qeth_sbp_states {
 140	QETH_SBP_STATE_INACTIVE	= 0,
 141	QETH_SBP_STATE_STANDBY	= 1,
 142	QETH_SBP_STATE_ACTIVE	= 2,
 143};
 144
 145#define QETH_SBP_HOST_NOTIFICATION 1
 146
 147struct qeth_sbp_info {
 148	__u32 supported_funcs;
 149	enum qeth_sbp_roles role;
 150	__u32 hostnotification:1;
 151	__u32 reflect_promisc:1;
 152	__u32 reflect_promisc_primary:1;
 153};
 154
 155struct qeth_vnicc_info {
 156	/* supported/currently configured VNICCs; updated in IPA exchanges */
 157	u32 sup_chars;
 158	u32 cur_chars;
 159	/* supported commands: bitmasks which VNICCs support respective cmd */
 160	u32 set_char_sup;
 161	u32 getset_timeout_sup;
 162	/* timeout value for the learning characteristic */
 163	u32 learning_timeout;
 164	/* characteristics wanted/configured by user */
 165	u32 wanted_chars;
 166	/* has user explicitly enabled rx_bcast while online? */
 167	bool rx_bcast_enabled;
 168};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 169
 170#define QETH_IDX_FUNC_LEVEL_OSD		 0x0101
 171#define QETH_IDX_FUNC_LEVEL_IQD		 0x4108
 172
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 173#define QETH_BUFSIZE		4096
 174#define CCW_CMD_WRITE		0x01
 175#define CCW_CMD_READ		0x02
 176
 177/**
 178 * some more defs
 179 */
 180#define QETH_TX_TIMEOUT		(100 * HZ)
 181#define QETH_RCD_TIMEOUT	(60 * HZ)
 182#define QETH_RECLAIM_WORK_TIME	HZ
 
 183#define QETH_MAX_PORTNO		15
 184
 
 
 
 
 185/*****************************************************************************/
 186/* QDIO queue and buffer handling                                            */
 187/*****************************************************************************/
 188#define QETH_MAX_OUT_QUEUES	4
 189#define QETH_IQD_MIN_TXQ	2	/* One for ucast, one for mcast. */
 190#define QETH_IQD_MCAST_TXQ	0
 191#define QETH_IQD_MIN_UCAST_TXQ	1
 192
 193#define QETH_MAX_IN_QUEUES	2
 194#define QETH_RX_COPYBREAK      (PAGE_SIZE >> 1)
 195#define QETH_IN_BUF_SIZE_DEFAULT 65536
 196#define QETH_IN_BUF_COUNT_DEFAULT 64
 197#define QETH_IN_BUF_COUNT_HSDEFAULT 128
 198#define QETH_IN_BUF_COUNT_MIN	8U
 199#define QETH_IN_BUF_COUNT_MAX	128U
 200#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12)
 201#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \
 202		 ((card)->qdio.in_buf_pool.buf_count / 2)
 203
 204/* buffers we have to be behind before we get a PCI */
 205#define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1)
 206/*enqueued free buffers left before we get a PCI*/
 207#define QETH_PCI_THRESHOLD_B(card) 0
 208/*not used unless the microcode gets patched*/
 209#define QETH_PCI_TIMER_VALUE(card) 3
 210
 211/* priority queing */
 212#define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING
 213#define QETH_DEFAULT_QUEUE    2
 214#define QETH_NO_PRIO_QUEUEING 0
 215#define QETH_PRIO_Q_ING_PREC  1
 216#define QETH_PRIO_Q_ING_TOS   2
 217#define QETH_PRIO_Q_ING_SKB   3
 218#define QETH_PRIO_Q_ING_VLAN  4
 219#define QETH_PRIO_Q_ING_FIXED 5
 220
 221/* Packing */
 222#define QETH_LOW_WATERMARK_PACK  2
 223#define QETH_HIGH_WATERMARK_PACK 5
 224#define QETH_WATERMARK_PACK_FUZZ 1
 225
 
 
 
 
 
 
 226struct qeth_hdr_layer3 {
 227	__u8  id;
 228	__u8  flags;
 229	__u16 inbound_checksum; /*TSO:__u16 seqno */
 230	__u32 token;		/*TSO: __u32 reserved */
 231	__u16 length;
 232	__u8  vlan_prio;
 233	__u8  ext_flags;
 234	__u16 vlan_id;
 235	__u16 frame_offset;
 236	union {
 237		/* TX: */
 238		struct in6_addr addr;
 239		/* RX: */
 240		struct rx {
 241			u8 res1[2];
 242			u8 src_mac[6];
 243			u8 res2[4];
 244			u16 vlan_id;
 245			u8 res3[2];
 246		} rx;
 247	} next_hop;
 248};
 249
 250struct qeth_hdr_layer2 {
 251	__u8 id;
 252	__u8 flags[3];
 253	__u8 port_no;
 254	__u8 hdr_length;
 255	__u16 pkt_length;
 256	__u16 seq_no;
 257	__u16 vlan_id;
 258	__u32 reserved;
 259	__u8 reserved2[16];
 260} __attribute__ ((packed));
 261
 
 
 
 
 
 
 
 
 
 
 
 262struct qeth_hdr {
 263	union {
 264		struct qeth_hdr_layer2 l2;
 265		struct qeth_hdr_layer3 l3;
 
 266	} hdr;
 267} __attribute__ ((packed));
 268
 269#define QETH_QIB_PQUE_ORDER_RR		0
 270#define QETH_QIB_PQUE_UNITS_SBAL	2
 271#define QETH_QIB_PQUE_PRIO_DEFAULT	4
 272
 273struct qeth_qib_parms {
 274	char pcit_magic[4];
 275	u32 pcit_a;
 276	u32 pcit_b;
 277	u32 pcit_c;
 278	char blkt_magic[4];
 279	u32 blkt_total;
 280	u32 blkt_inter_packet;
 281	u32 blkt_inter_packet_jumbo;
 282	char pque_magic[4];
 283	u8 pque_order;
 284	u8 pque_units;
 285	u16 reserved;
 286	u32 pque_priority[4];
 287};
 288
 289/*TCP Segmentation Offload header*/
 290struct qeth_hdr_ext_tso {
 291	__u16 hdr_tot_len;
 292	__u8  imb_hdr_no;
 293	__u8  reserved;
 294	__u8  hdr_type;
 295	__u8  hdr_version;
 296	__u16 hdr_len;
 297	__u32 payload_len;
 298	__u16 mss;
 299	__u16 dg_hdr_len;
 300	__u8  padding[16];
 301} __attribute__ ((packed));
 302
 303struct qeth_hdr_tso {
 304	struct qeth_hdr hdr;	/*hdr->hdr.l3.xxx*/
 305	struct qeth_hdr_ext_tso ext;
 306} __attribute__ ((packed));
 307
 308
 309/* flags for qeth_hdr.flags */
 310#define QETH_HDR_PASSTHRU 0x10
 311#define QETH_HDR_IPV6     0x80
 312#define QETH_HDR_CAST_MASK 0x07
 313enum qeth_cast_flags {
 314	QETH_CAST_UNICAST   = 0x06,
 315	QETH_CAST_MULTICAST = 0x04,
 316	QETH_CAST_BROADCAST = 0x05,
 317	QETH_CAST_ANYCAST   = 0x07,
 318	QETH_CAST_NOCAST    = 0x00,
 319};
 320
 321enum qeth_layer2_frame_flags {
 322	QETH_LAYER2_FLAG_MULTICAST = 0x01,
 323	QETH_LAYER2_FLAG_BROADCAST = 0x02,
 324	QETH_LAYER2_FLAG_UNICAST   = 0x04,
 325	QETH_LAYER2_FLAG_VLAN      = 0x10,
 326};
 327
 328enum qeth_header_ids {
 329	QETH_HEADER_TYPE_LAYER3 = 0x01,
 330	QETH_HEADER_TYPE_LAYER2 = 0x02,
 331	QETH_HEADER_TYPE_L3_TSO	= 0x03,
 332	QETH_HEADER_TYPE_L2_TSO	= 0x06,
 333	QETH_HEADER_MASK_INVAL	= 0x80,
 334};
 335/* flags for qeth_hdr.ext_flags */
 336#define QETH_HDR_EXT_VLAN_FRAME       0x01
 337#define QETH_HDR_EXT_TOKEN_ID         0x02
 338#define QETH_HDR_EXT_INCLUDE_VLAN_TAG 0x04
 339#define QETH_HDR_EXT_SRC_MAC_ADDR     0x08
 340#define QETH_HDR_EXT_CSUM_HDR_REQ     0x10
 341#define QETH_HDR_EXT_CSUM_TRANSP_REQ  0x20
 342#define QETH_HDR_EXT_UDP	      0x40 /*bit off for TCP*/
 343
 344static inline bool qeth_l2_same_vlan(struct qeth_hdr_layer2 *h1,
 345				     struct qeth_hdr_layer2 *h2)
 346{
 347	return !((h1->flags[2] ^ h2->flags[2]) & QETH_LAYER2_FLAG_VLAN) &&
 348	       h1->vlan_id == h2->vlan_id;
 349}
 350
 351static inline bool qeth_l3_iqd_same_vlan(struct qeth_hdr_layer3 *h1,
 352					 struct qeth_hdr_layer3 *h2)
 353{
 354	return !((h1->ext_flags ^ h2->ext_flags) & QETH_HDR_EXT_VLAN_FRAME) &&
 355	       h1->vlan_id == h2->vlan_id;
 356}
 357
 358static inline bool qeth_l3_same_next_hop(struct qeth_hdr_layer3 *h1,
 359					 struct qeth_hdr_layer3 *h2)
 360{
 361	return !((h1->flags ^ h2->flags) & QETH_HDR_IPV6) &&
 362	       ipv6_addr_equal(&h1->next_hop.addr, &h2->next_hop.addr);
 363}
 364
 365struct qeth_local_addr {
 366	struct hlist_node hnode;
 367	struct rcu_head rcu;
 368	struct in6_addr addr;
 
 369};
 370
 371enum qeth_qdio_info_states {
 372	QETH_QDIO_UNINITIALIZED,
 373	QETH_QDIO_ALLOCATED,
 374	QETH_QDIO_ESTABLISHED,
 375	QETH_QDIO_CLEANING
 376};
 377
 378struct qeth_buffer_pool_entry {
 379	struct list_head list;
 380	struct list_head init_list;
 381	struct page *elements[QDIO_MAX_ELEMENTS_PER_BUFFER];
 382};
 383
 384struct qeth_qdio_buffer_pool {
 385	struct list_head entry_list;
 386	int buf_count;
 387};
 388
 389struct qeth_qdio_buffer {
 390	struct qdio_buffer *buffer;
 391	/* the buffer pool entry currently associated to this buffer */
 392	struct qeth_buffer_pool_entry *pool_entry;
 393	struct sk_buff *rx_skb;
 394};
 395
 396struct qeth_qdio_q {
 397	struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
 398	struct qeth_qdio_buffer bufs[QDIO_MAX_BUFFERS_PER_Q];
 399	int next_buf_to_init;
 400};
 401
 402enum qeth_qdio_out_buffer_state {
 403	/* Owned by driver, in order to be filled. */
 404	QETH_QDIO_BUF_EMPTY,
 405	/* Filled by driver; owned by hardware in order to be sent. */
 406	QETH_QDIO_BUF_PRIMED,
 407};
 408
 409enum qeth_qaob_state {
 410	QETH_QAOB_ISSUED,
 411	QETH_QAOB_PENDING,
 412	QETH_QAOB_DONE,
 413};
 414
 415struct qeth_qaob_priv1 {
 416	unsigned int state;
 417	u8 queue_no;
 418};
 419
 420struct qeth_qdio_out_buffer {
 421	struct qdio_buffer *buffer;
 422	atomic_t state;
 423	int next_element_to_fill;
 424	unsigned int frames;
 425	unsigned int bytes;
 426	struct sk_buff_head skb_list;
 427	DECLARE_BITMAP(from_kmem_cache, QDIO_MAX_ELEMENTS_PER_BUFFER);
 428
 429	struct list_head list_entry;
 430	struct qaob *aob;
 
 
 431};
 432
 433struct qeth_card;
 434
 435#define QETH_CARD_STAT_ADD(_c, _stat, _val)	((_c)->stats._stat += (_val))
 436#define QETH_CARD_STAT_INC(_c, _stat)		QETH_CARD_STAT_ADD(_c, _stat, 1)
 437
 438#define QETH_TXQ_STAT_ADD(_q, _stat, _val)	((_q)->stats._stat += (_val))
 439#define QETH_TXQ_STAT_INC(_q, _stat)		QETH_TXQ_STAT_ADD(_q, _stat, 1)
 440
 441struct qeth_card_stats {
 442	u64 rx_bufs;
 443	u64 rx_skb_csum;
 444	u64 rx_sg_skbs;
 445	u64 rx_sg_frags;
 446	u64 rx_sg_alloc_page;
 447
 448	u64 rx_dropped_nomem;
 449	u64 rx_dropped_notsupp;
 450	u64 rx_dropped_runt;
 451
 452	/* rtnl_link_stats64 */
 453	u64 rx_packets;
 454	u64 rx_bytes;
 455	u64 rx_multicast;
 456	u64 rx_length_errors;
 457	u64 rx_frame_errors;
 458	u64 rx_fifo_errors;
 459};
 460
 461struct qeth_out_q_stats {
 462	u64 bufs;
 463	u64 bufs_pack;
 464	u64 buf_elements;
 465	u64 skbs_pack;
 466	u64 skbs_sg;
 467	u64 skbs_csum;
 468	u64 skbs_tso;
 469	u64 skbs_linearized;
 470	u64 skbs_linearized_fail;
 471	u64 tso_bytes;
 472	u64 packing_mode_switch;
 473	u64 stopped;
 474	u64 doorbell;
 475	u64 coal_frames;
 476	u64 completion_irq;
 477	u64 completion_yield;
 478	u64 completion_timer;
 479
 480	/* rtnl_link_stats64 */
 481	u64 tx_packets;
 482	u64 tx_bytes;
 483	u64 tx_errors;
 484	u64 tx_dropped;
 485};
 486
 487#define QETH_TX_MAX_COALESCED_FRAMES	1
 488#define QETH_TX_COALESCE_USECS		25
 489#define QETH_TX_TIMER_USECS		500
 490
 491struct qeth_qdio_out_q {
 492	struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
 493	struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
 494	struct list_head pending_bufs;
 495	struct qeth_out_q_stats stats;
 496	spinlock_t lock;
 497	unsigned int priority;
 498	u8 next_buf_to_fill;
 499	u8 max_elements;
 500	u8 queue_no;
 501	u8 do_pack;
 502	struct qeth_card *card;
 
 
 
 
 
 
 503	/*
 504	 * number of buffers that are currently filled (PRIMED)
 505	 * -> these buffers are hardware-owned
 506	 */
 507	atomic_t used_buffers;
 508	/* indicates whether PCI flag must be set (or if one is outstanding) */
 509	atomic_t set_pci_flags_count;
 510	struct napi_struct napi;
 511	struct timer_list timer;
 512	struct qeth_hdr *prev_hdr;
 513	unsigned int coalesced_frames;
 514	u8 bulk_start;
 515	u8 bulk_count;
 516	u8 bulk_max;
 517
 518	unsigned int coalesce_usecs;
 519	unsigned int max_coalesced_frames;
 520	unsigned int rescan_usecs;
 521};
 522
 523#define qeth_for_each_output_queue(card, q, i)		\
 524	for (i = 0; i < card->qdio.no_out_queues &&	\
 525		    (q = card->qdio.out_qs[i]); i++)
 526
 527#define	qeth_napi_to_out_queue(n) container_of(n, struct qeth_qdio_out_q, napi)
 528
 529static inline void qeth_tx_arm_timer(struct qeth_qdio_out_q *queue,
 530				     unsigned long usecs)
 531{
 532	timer_reduce(&queue->timer, usecs_to_jiffies(usecs) + jiffies);
 533}
 534
 535static inline bool qeth_out_queue_is_full(struct qeth_qdio_out_q *queue)
 536{
 537	return atomic_read(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q;
 538}
 539
 540static inline bool qeth_out_queue_is_empty(struct qeth_qdio_out_q *queue)
 541{
 542	return atomic_read(&queue->used_buffers) == 0;
 543}
 544
 545struct qeth_qdio_info {
 546	atomic_t state;
 547	/* input */
 
 548	struct qeth_qdio_q *in_q;
 549	struct qeth_qdio_q *c_q;
 550	struct qeth_qdio_buffer_pool in_buf_pool;
 551	struct qeth_qdio_buffer_pool init_pool;
 552	int in_buf_size;
 553
 554	/* output */
 555	unsigned int no_out_queues;
 556	struct qeth_qdio_out_q *out_qs[QETH_MAX_OUT_QUEUES];
 
 557
 558	/* priority queueing */
 559	int do_prio_queueing;
 560	int default_out_queue;
 561};
 562
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 563/**
 564 *  channel state machine
 565 */
 566enum qeth_channel_states {
 567	CH_STATE_UP,
 568	CH_STATE_DOWN,
 
 569	CH_STATE_HALTED,
 570	CH_STATE_STOPPED,
 
 
 571};
 572/**
 573 * card state machine
 574 */
 575enum qeth_card_states {
 576	CARD_STATE_DOWN,
 
 577	CARD_STATE_SOFTSETUP,
 
 
 578};
 579
 580/**
 581 * Protocol versions
 582 */
 583enum qeth_prot_versions {
 584	QETH_PROT_NONE = 0x0000,
 585	QETH_PROT_IPV4 = 0x0004,
 586	QETH_PROT_IPV6 = 0x0006,
 587};
 588
 
 
 
 
 
 
 
 
 
 
 
 
 589enum qeth_cq {
 590	QETH_CQ_DISABLED = 0,
 591	QETH_CQ_ENABLED = 1,
 592	QETH_CQ_NOTAVAILABLE = 2,
 593};
 594
 595struct qeth_ipato {
 596	bool enabled;
 597	bool invert4;
 598	bool invert6;
 599	struct list_head entries;
 600};
 601
 602struct qeth_channel {
 603	struct ccw_device *ccwdev;
 604	struct qeth_cmd_buffer *active_cmd;
 605	enum qeth_channel_states state;
 606};
 607
 608struct qeth_reply {
 609	int (*callback)(struct qeth_card *card, struct qeth_reply *reply,
 610			unsigned long data);
 611	void *param;
 612};
 613
 614struct qeth_cmd_buffer {
 615	struct list_head list_entry;
 616	struct completion done;
 617	spinlock_t lock;
 618	unsigned int length;
 619	refcount_t ref_count;
 620	struct qeth_channel *channel;
 621	struct qeth_reply reply;
 622	long timeout;
 623	unsigned char *data;
 624	void (*finalize)(struct qeth_card *card, struct qeth_cmd_buffer *iob);
 625	bool (*match)(struct qeth_cmd_buffer *iob,
 626		      struct qeth_cmd_buffer *reply);
 627	void (*callback)(struct qeth_card *card, struct qeth_cmd_buffer *iob,
 628			 unsigned int data_length);
 629	int rc;
 
 630};
 631
 632static inline void qeth_get_cmd(struct qeth_cmd_buffer *iob)
 633{
 634	refcount_inc(&iob->ref_count);
 635}
 636
 637static inline struct qeth_ipa_cmd *__ipa_reply(struct qeth_cmd_buffer *iob)
 638{
 639	if (!IS_IPA(iob->data))
 640		return NULL;
 641
 642	return (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
 643}
 644
 645static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob)
 646{
 647	return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
 648}
 649
 650static inline struct ccw1 *__ccw_from_cmd(struct qeth_cmd_buffer *iob)
 651{
 652	return (struct ccw1 *)(iob->data + ALIGN(iob->length, 8));
 653}
 654
 655/**
 656 *  OSA card related definitions
 657 */
 658struct qeth_token {
 659	__u32 issuer_rm_w;
 660	__u32 issuer_rm_r;
 661	__u32 cm_filter_w;
 662	__u32 cm_filter_r;
 663	__u32 cm_connection_w;
 664	__u32 cm_connection_r;
 665	__u32 ulp_filter_w;
 666	__u32 ulp_filter_r;
 667	__u32 ulp_connection_w;
 668	__u32 ulp_connection_r;
 669};
 670
 671struct qeth_seqno {
 672	__u32 trans_hdr;
 673	__u32 pdu_hdr;
 674	__u32 pdu_hdr_ack;
 675	__u16 ipa;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 676};
 677
 
 678struct qeth_card_blkt {
 679	int time_total;
 680	int inter_packet;
 681	int inter_packet_jumbo;
 682};
 683
 684enum qeth_pnso_mode {
 685	QETH_PNSO_NONE,
 686	QETH_PNSO_BRIDGEPORT,
 687	QETH_PNSO_ADDR_INFO,
 688};
 689
 690enum qeth_link_mode {
 691	QETH_LINK_MODE_UNKNOWN,
 692	QETH_LINK_MODE_FIBRE_SHORT,
 693	QETH_LINK_MODE_FIBRE_LONG,
 694};
 695
 696struct qeth_link_info {
 697	u32 speed;
 698	u8 duplex;
 699	u8 port;
 700	enum qeth_link_mode link_mode;
 701};
 702
 703#define QETH_BROADCAST_WITH_ECHO    0x01
 704#define QETH_BROADCAST_WITHOUT_ECHO 0x02
 
 
 705struct qeth_card_info {
 706	unsigned short unit_addr2;
 707	unsigned short cula;
 
 708	__u16 func_level;
 709	char mcl_level[QETH_MCL_LENGTH + 1];
 710	/* doubleword below corresponds to net_if_token */
 711	u16 ddev_devno;
 712	u8 cssid;
 713	u8 iid;
 714	u8 ssid;
 715	u8 chpid;
 716	u16 chid;
 717	u8 ids_valid:1; /* cssid,iid,chid */
 718	u8 dev_addr_is_registered:1;
 719	u8 promisc_mode:1;
 720	u8 use_v1_blkt:1;
 721	u8 is_vm_nic:1;
 722	/* no bitfield, we take a pointer on these two: */
 723	u8 has_lp2lp_cso_v6;
 724	u8 has_lp2lp_cso_v4;
 725	enum qeth_pnso_mode pnso_mode;
 726	enum qeth_card_types type;
 727	enum qeth_link_types link_type;
 
 
 
 728	int broadcast_capable;
 729	bool layer_enforced;
 730	struct qeth_card_blkt blkt;
 
 
 
 731	__u32 diagass_support;
 732	__u32 hwtrap;
 733	struct qeth_link_info link_info;
 734};
 735
 736enum qeth_discipline_id {
 737	QETH_DISCIPLINE_UNDETERMINED = -1,
 738	QETH_DISCIPLINE_LAYER3 = 0,
 739	QETH_DISCIPLINE_LAYER2 = 1,
 740};
 741
 742struct qeth_card_options {
 743	struct qeth_ipa_caps ipa4;
 744	struct qeth_ipa_caps ipa6;
 745	struct qeth_routing_info route4;
 
 
 746	struct qeth_routing_info route6;
 747	struct qeth_ipa_caps adp; /* Adapter parameters */
 748	struct qeth_sbp_info sbp; /* SETBRIDGEPORT options */
 749	struct qeth_vnicc_info vnicc; /* VNICC options */
 750	enum qeth_discipline_id layer;
 
 
 
 751	enum qeth_ipa_isolation_modes isolation;
 
 752	int sniffer;
 753	enum qeth_cq cq;
 754	char hsuid[9];
 755};
 756
 757#define	IS_LAYER2(card)	((card)->options.layer == QETH_DISCIPLINE_LAYER2)
 758#define	IS_LAYER3(card)	((card)->options.layer == QETH_DISCIPLINE_LAYER3)
 759
 760/*
 761 * thread bits for qeth_card thread masks
 762 */
 763enum qeth_threads {
 764	QETH_RECOVER_THREAD = 1,
 765};
 766
 
 
 
 
 
 
 
 
 
 
 767struct qeth_discipline {
 
 
 
 
 768	int (*setup) (struct ccwgroup_device *);
 769	void (*remove) (struct ccwgroup_device *);
 770	int (*set_online)(struct qeth_card *card, bool carrier_ok);
 771	void (*set_offline)(struct qeth_card *card);
 
 
 
 
 
 
 772	int (*control_event_handler)(struct qeth_card *card,
 773					struct qeth_ipa_cmd *cmd);
 774};
 775
 
 
 
 
 
 776enum qeth_addr_disposition {
 777	QETH_DISP_ADDR_DELETE = 0,
 778	QETH_DISP_ADDR_DO_NOTHING = 1,
 779	QETH_DISP_ADDR_ADD = 2,
 780};
 781
 782struct qeth_rx {
 783	int b_count;
 784	int b_index;
 785	u8 buf_element;
 786	int e_offset;
 787	int qdio_err;
 788	u8 bufs_refill;
 
 
 
 
 
 789};
 790
 791struct qeth_switch_info {
 792	__u32 capabilities;
 793	__u32 settings;
 794};
 795
 796struct qeth_priv {
 797	unsigned int rx_copybreak;
 798	unsigned int tx_wanted_queues;
 799	u32 brport_hw_features;
 800	u32 brport_features;
 801};
 802
 803struct qeth_card {
 
 804	enum qeth_card_states state;
 
 805	spinlock_t lock;
 806	struct ccwgroup_device *gdev;
 807	struct qeth_cmd_buffer *read_cmd;
 808	struct qeth_channel read;
 809	struct qeth_channel write;
 810	struct qeth_channel data;
 811
 812	struct net_device *dev;
 813	struct dentry *debugfs;
 814	struct qeth_card_stats stats;
 815	struct qeth_card_info info;
 816	struct qeth_token token;
 817	struct qeth_seqno seqno;
 818	struct qeth_card_options options;
 819
 820	struct workqueue_struct *event_wq;
 821	struct workqueue_struct *cmd_wq;
 822	wait_queue_head_t wait_q;
 823
 824	struct mutex ip_lock;
 825	/* protected by ip_lock: */
 
 
 826	DECLARE_HASHTABLE(ip_htable, 4);
 827	struct qeth_ipato ipato;
 828
 829	DECLARE_HASHTABLE(local_addrs4, 4);
 830	DECLARE_HASHTABLE(local_addrs6, 4);
 831	spinlock_t local_addrs4_lock;
 832	spinlock_t local_addrs6_lock;
 833	DECLARE_HASHTABLE(rx_mode_addrs, 4);
 834	struct work_struct rx_mode_work;
 835	struct work_struct kernel_thread_starter;
 836	spinlock_t thread_mask_lock;
 837	unsigned long thread_start_mask;
 838	unsigned long thread_allowed_mask;
 839	unsigned long thread_running_mask;
 
 
 
 840	struct list_head cmd_waiter_list;
 841	/* QDIO buffer handling */
 842	struct qeth_qdio_info qdio;
 
 843	int read_or_write_problem;
 844	const struct qeth_discipline *discipline;
 
 845	atomic_t force_alloc_skb;
 846	struct service_level qeth_service_level;
 847	struct qdio_ssqd_desc ssqd;
 848	debug_info_t *debug;
 849	struct mutex sbp_lock;
 850	struct mutex conf_mutex;
 851	struct mutex discipline_mutex;
 852	struct napi_struct napi;
 853	struct qeth_rx rx;
 854	struct delayed_work buffer_reclaim_work;
 
 
 855};
 856
 857static inline bool qeth_card_hw_is_reachable(struct qeth_card *card)
 858{
 859	return card->state == CARD_STATE_SOFTSETUP;
 860}
 861
 862static inline bool qeth_use_tx_irqs(struct qeth_card *card)
 863{
 864	return !IS_IQD(card);
 865}
 866
 867static inline void qeth_unlock_channel(struct qeth_card *card,
 868				       struct qeth_channel *channel)
 869{
 870	xchg(&channel->active_cmd, NULL);
 871	wake_up(&card->wait_q);
 872}
 873
 874static inline bool qeth_trylock_channel(struct qeth_channel *channel,
 875					struct qeth_cmd_buffer *cmd)
 876{
 877	return cmpxchg(&channel->active_cmd, NULL, cmd) == NULL;
 878}
 879
 880struct qeth_trap_id {
 881	__u16 lparnr;
 882	char vmname[8];
 883	__u8 chpid;
 884	__u8 ssid;
 885	__u16 devno;
 886} __packed;
 887
 888static inline bool qeth_uses_tx_prio_queueing(struct qeth_card *card)
 889{
 890	return card->qdio.do_prio_queueing != QETH_NO_PRIO_QUEUEING;
 891}
 892
 893static inline unsigned int qeth_tx_actual_queues(struct qeth_card *card)
 894{
 895	struct qeth_priv *priv = netdev_priv(card->dev);
 896
 897	if (qeth_uses_tx_prio_queueing(card))
 898		return min(card->dev->num_tx_queues, card->qdio.no_out_queues);
 899
 900	return min(priv->tx_wanted_queues, card->qdio.no_out_queues);
 901}
 902
 903static inline u16 qeth_iqd_translate_txq(struct net_device *dev, u16 txq)
 904{
 905	if (txq == QETH_IQD_MCAST_TXQ)
 906		return dev->num_tx_queues - 1;
 907	if (txq == dev->num_tx_queues - 1)
 908		return QETH_IQD_MCAST_TXQ;
 909	return txq;
 910}
 911
 912static inline bool qeth_iqd_is_mcast_queue(struct qeth_card *card,
 913					   struct qeth_qdio_out_q *queue)
 914{
 915	return qeth_iqd_translate_txq(card->dev, queue->queue_no) ==
 916	       QETH_IQD_MCAST_TXQ;
 917}
 918
 919static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
 920					  unsigned int elements)
 921{
 922	unsigned int i;
 923
 924	for (i = 0; i < elements; i++)
 925		memset(&buf->element[i], 0, sizeof(struct qdio_buffer_element));
 926	buf->element[14].sflags = 0;
 927	buf->element[15].sflags = 0;
 928}
 929
 930/**
 931 * qeth_get_elements_for_range() -	find number of SBALEs to cover range.
 932 * @start:				Start of the address range.
 933 * @end:				Address after the end of the range.
 934 *
 935 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
 936 * the specified address range.
 937 */
 938static inline int qeth_get_elements_for_range(addr_t start, addr_t end)
 939{
 940	return PFN_UP(end) - PFN_DOWN(start);
 941}
 942
 943static inline int qeth_get_ether_cast_type(struct sk_buff *skb)
 944{
 945	u8 *addr = eth_hdr(skb)->h_dest;
 946
 947	if (is_multicast_ether_addr(addr))
 948		return is_broadcast_ether_addr(addr) ? RTN_BROADCAST :
 949						       RTN_MULTICAST;
 950	return RTN_UNICAST;
 951}
 952
 953static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb,
 954						   __be16 proto)
 955{
 956	struct dst_entry *dst = skb_dst(skb);
 957	struct rt6_info *rt;
 958
 959	rt = dst_rt6_info(dst);
 960	if (dst) {
 961		if (proto == htons(ETH_P_IPV6))
 962			dst = dst_check(dst, rt6_get_cookie(rt));
 963		else
 964			dst = dst_check(dst, 0);
 
 
 
 965	}
 966
 967	return dst;
 968}
 969
 970static inline __be32 qeth_next_hop_v4_rcu(struct sk_buff *skb,
 971					  struct dst_entry *dst)
 972{
 973	return (dst) ? rt_nexthop(dst_rtable(dst), ip_hdr(skb)->daddr) :
 974		       ip_hdr(skb)->daddr;
 975}
 976
 977static inline struct in6_addr *qeth_next_hop_v6_rcu(struct sk_buff *skb,
 978						    struct dst_entry *dst)
 979{
 980	struct rt6_info *rt = dst_rt6_info(dst);
 981
 982	if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
 983		return &rt->rt6i_gateway;
 984	else
 985		return &ipv6_hdr(skb)->daddr;
 986}
 987
 988static inline void qeth_tx_csum(struct sk_buff *skb, u8 *flags, __be16 proto)
 989{
 990	*flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ;
 991	if ((proto == htons(ETH_P_IP) && ip_hdr(skb)->protocol == IPPROTO_UDP) ||
 992	    (proto == htons(ETH_P_IPV6) && ipv6_hdr(skb)->nexthdr == IPPROTO_UDP))
 993		*flags |= QETH_HDR_EXT_UDP;
 994}
 995
 996static inline void qeth_put_buffer_pool_entry(struct qeth_card *card,
 997		struct qeth_buffer_pool_entry *entry)
 998{
 999	list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
1000}
1001
1002static inline int qeth_is_diagass_supported(struct qeth_card *card,
1003		enum qeth_diags_cmds cmd)
1004{
1005	return card->info.diagass_support & (__u32)cmd;
1006}
1007
1008int qeth_send_simple_setassparms_prot(struct qeth_card *card,
1009				      enum qeth_ipa_funcs ipa_func,
1010				      u16 cmd_code, u32 *data,
1011				      enum qeth_prot_versions prot);
1012/* IPv4 variant */
1013static inline int qeth_send_simple_setassparms(struct qeth_card *card,
1014					       enum qeth_ipa_funcs ipa_func,
1015					       u16 cmd_code, u32 *data)
1016{
1017	return qeth_send_simple_setassparms_prot(card, ipa_func, cmd_code,
1018						 data, QETH_PROT_IPV4);
1019}
1020
1021static inline int qeth_send_simple_setassparms_v6(struct qeth_card *card,
1022						  enum qeth_ipa_funcs ipa_func,
1023						  u16 cmd_code, u32 *data)
1024{
1025	return qeth_send_simple_setassparms_prot(card, ipa_func, cmd_code,
1026						 data, QETH_PROT_IPV6);
1027}
1028
1029extern const struct qeth_discipline qeth_l2_discipline;
1030extern const struct qeth_discipline qeth_l3_discipline;
1031extern const struct ethtool_ops qeth_ethtool_ops;
1032extern const struct attribute_group *qeth_dev_groups[];
1033
 
1034const char *qeth_get_cardname_short(struct qeth_card *);
1035int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count);
1036int qeth_setup_discipline(struct qeth_card *card, enum qeth_discipline_id disc);
1037void qeth_remove_discipline(struct qeth_card *card);
1038
1039/* exports for qeth discipline device drivers */
 
 
1040extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS];
1041
1042struct net_device *qeth_clone_netdev(struct net_device *orig);
1043void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
1044			      int clear_start_mask);
1045int qeth_threads_running(struct qeth_card *, unsigned long);
1046int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
1047		     bool resetting);
1048
 
 
 
 
 
1049int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
1050		  int (*reply_cb)
1051		  (struct qeth_card *, struct qeth_reply *, unsigned long),
1052		  void *);
1053struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
1054					   enum qeth_ipa_cmds cmd_code,
1055					   enum qeth_prot_versions prot,
1056					   unsigned int data_length);
1057struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
1058						 enum qeth_ipa_funcs ipa_func,
1059						 u16 cmd_code,
1060						 unsigned int data_length,
1061						 enum qeth_prot_versions prot);
1062struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
1063					  enum qeth_diags_cmds sub_cmd,
1064					  unsigned int data_length);
1065
1066int qeth_schedule_recovery(struct qeth_card *card);
1067int qeth_poll(struct napi_struct *napi, int budget);
1068void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable);
 
 
 
 
 
 
 
 
1069int qeth_setadpparms_change_macaddr(struct qeth_card *);
1070void qeth_tx_timeout(struct net_device *, unsigned int txqueue);
 
 
 
 
 
 
 
 
1071int qeth_query_switch_attributes(struct qeth_card *card,
1072				  struct qeth_switch_info *sw_info);
1073int qeth_query_card_info(struct qeth_card *card,
1074			 struct qeth_link_info *link_info);
1075int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
1076				     enum qeth_ipa_isolation_modes mode);
1077
1078int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
1079int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq,
1080			void __user *data, int cmd);
1081__printf(3, 4)
 
 
 
 
 
 
 
 
 
 
1082void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
 
 
 
1083int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
1084int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
 
 
 
 
 
 
 
 
 
 
1085int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long);
 
 
 
 
1086int qeth_set_features(struct net_device *, netdev_features_t);
1087void qeth_enable_hw_features(struct net_device *dev);
1088netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
1089netdev_features_t qeth_features_check(struct sk_buff *skb,
1090				      struct net_device *dev,
1091				      netdev_features_t features);
1092void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
1093int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count);
1094u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
1095			  u8 cast_type, struct net_device *sb_dev);
1096u16 qeth_osa_select_queue(struct net_device *dev, struct sk_buff *skb,
1097			  struct net_device *sb_dev);
1098int qeth_open(struct net_device *dev);
1099int qeth_stop(struct net_device *dev);
1100
1101int qeth_vm_request_mac(struct qeth_card *card);
1102int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
1103	      struct qeth_qdio_out_q *queue, __be16 proto,
1104	      void (*fill_header)(struct qeth_qdio_out_q *queue,
1105				  struct qeth_hdr *hdr, struct sk_buff *skb,
1106				  __be16 proto, unsigned int data_len));
1107
1108#endif /* __QETH_CORE_H__ */
v4.10.11
 
   1/*
   2 *    Copyright IBM Corp. 2007
   3 *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
   4 *		 Frank Pavlic <fpavlic@de.ibm.com>,
   5 *		 Thomas Spatzier <tspat@de.ibm.com>,
   6 *		 Frank Blaschka <frank.blaschka@de.ibm.com>
   7 */
   8
   9#ifndef __QETH_CORE_H__
  10#define __QETH_CORE_H__
  11
 
 
  12#include <linux/if.h>
  13#include <linux/if_arp.h>
  14#include <linux/etherdevice.h>
  15#include <linux/if_vlan.h>
  16#include <linux/ctype.h>
  17#include <linux/in6.h>
  18#include <linux/bitops.h>
  19#include <linux/seq_file.h>
  20#include <linux/ethtool.h>
  21#include <linux/hashtable.h>
  22#include <linux/ip.h>
 
 
 
 
 
 
  23
 
 
  24#include <net/ipv6.h>
  25#include <net/if_inet6.h>
  26#include <net/addrconf.h>
 
 
 
  27
  28#include <asm/debug.h>
  29#include <asm/qdio.h>
  30#include <asm/ccwdev.h>
  31#include <asm/ccwgroup.h>
  32#include <asm/sysinfo.h>
  33
 
 
  34#include "qeth_core_mpc.h"
  35
  36/**
  37 * Debug Facility stuff
  38 */
  39enum qeth_dbf_names {
  40	QETH_DBF_SETUP,
  41	QETH_DBF_MSG,
  42	QETH_DBF_CTRL,
  43	QETH_DBF_INFOS	/* must be last element */
  44};
  45
  46struct qeth_dbf_info {
  47	char name[DEBUG_MAX_NAME_LEN];
  48	int pages;
  49	int areas;
  50	int len;
  51	int level;
  52	struct debug_view *view;
  53	debug_info_t *id;
  54};
  55
  56#define QETH_DBF_CTRL_LEN 256
  57
  58#define QETH_DBF_TEXT(name, level, text) \
  59	debug_text_event(qeth_dbf[QETH_DBF_##name].id, level, text)
  60
  61#define QETH_DBF_HEX(name, level, addr, len) \
  62	debug_event(qeth_dbf[QETH_DBF_##name].id, level, (void *)(addr), len)
  63
  64#define QETH_DBF_MESSAGE(level, text...) \
  65	debug_sprintf_event(qeth_dbf[QETH_DBF_MSG].id, level, text)
  66
  67#define QETH_DBF_TEXT_(name, level, text...) \
  68	qeth_dbf_longtext(qeth_dbf[QETH_DBF_##name].id, level, text)
  69
  70#define QETH_CARD_TEXT(card, level, text) \
  71	debug_text_event(card->debug, level, text)
  72
  73#define QETH_CARD_HEX(card, level, addr, len) \
  74	debug_event(card->debug, level, (void *)(addr), len)
  75
  76#define QETH_CARD_MESSAGE(card, text...) \
  77	debug_sprintf_event(card->debug, level, text)
  78
  79#define QETH_CARD_TEXT_(card, level, text...) \
  80	qeth_dbf_longtext(card->debug, level, text)
  81
  82#define SENSE_COMMAND_REJECT_BYTE 0
  83#define SENSE_COMMAND_REJECT_FLAG 0x80
  84#define SENSE_RESETTING_EVENT_BYTE 1
  85#define SENSE_RESETTING_EVENT_FLAG 0x80
  86
 
 
 
 
 
 
 
 
 
 
 
 
  87/*
  88 * Common IO related definitions
  89 */
  90#define CARD_RDEV(card) card->read.ccwdev
  91#define CARD_WDEV(card) card->write.ccwdev
  92#define CARD_DDEV(card) card->data.ccwdev
  93#define CARD_BUS_ID(card) dev_name(&card->gdev->dev)
  94#define CARD_RDEV_ID(card) dev_name(&card->read.ccwdev->dev)
  95#define CARD_WDEV_ID(card) dev_name(&card->write.ccwdev->dev)
  96#define CARD_DDEV_ID(card) dev_name(&card->data.ccwdev->dev)
  97#define CHANNEL_ID(channel) dev_name(&channel->ccwdev->dev)
  98
  99/**
 100 * card stuff
 101 */
 102struct qeth_perf_stats {
 103	unsigned int bufs_rec;
 104	unsigned int bufs_sent;
 105
 106	unsigned int skbs_sent_pack;
 107	unsigned int bufs_sent_pack;
 108
 109	unsigned int sc_dp_p;
 110	unsigned int sc_p_dp;
 111	/* qdio_cq_handler: number of times called, time spent in */
 112	__u64 cq_start_time;
 113	unsigned int cq_cnt;
 114	unsigned int cq_time;
 115	/* qdio_input_handler: number of times called, time spent in */
 116	__u64 inbound_start_time;
 117	unsigned int inbound_cnt;
 118	unsigned int inbound_time;
 119	/* qeth_send_packet: number of times called, time spent in */
 120	__u64 outbound_start_time;
 121	unsigned int outbound_cnt;
 122	unsigned int outbound_time;
 123	/* qdio_output_handler: number of times called, time spent in */
 124	__u64 outbound_handler_start_time;
 125	unsigned int outbound_handler_cnt;
 126	unsigned int outbound_handler_time;
 127	/* number of calls to and time spent in do_QDIO for inbound queue */
 128	__u64 inbound_do_qdio_start_time;
 129	unsigned int inbound_do_qdio_cnt;
 130	unsigned int inbound_do_qdio_time;
 131	/* number of calls to and time spent in do_QDIO for outbound queues */
 132	__u64 outbound_do_qdio_start_time;
 133	unsigned int outbound_do_qdio_cnt;
 134	unsigned int outbound_do_qdio_time;
 135	unsigned int large_send_bytes;
 136	unsigned int large_send_cnt;
 137	unsigned int sg_skbs_sent;
 138	unsigned int sg_frags_sent;
 139	/* initial values when measuring starts */
 140	unsigned long initial_rx_packets;
 141	unsigned long initial_tx_packets;
 142	/* inbound scatter gather data */
 143	unsigned int sg_skbs_rx;
 144	unsigned int sg_frags_rx;
 145	unsigned int sg_alloc_page_rx;
 146	unsigned int tx_csum;
 147	unsigned int tx_lin;
 148	unsigned int tx_linfail;
 149};
 150
 151/* Routing stuff */
 152struct qeth_routing_info {
 153	enum qeth_routing_types type;
 154};
 155
 156/* IPA stuff */
 157struct qeth_ipa_info {
 158	__u32 supported_funcs;
 159	__u32 enabled_funcs;
 160};
 161
 162/* SETBRIDGEPORT stuff */
 163enum qeth_sbp_roles {
 164	QETH_SBP_ROLE_NONE	= 0,
 165	QETH_SBP_ROLE_PRIMARY	= 1,
 166	QETH_SBP_ROLE_SECONDARY	= 2,
 167};
 168
 169enum qeth_sbp_states {
 170	QETH_SBP_STATE_INACTIVE	= 0,
 171	QETH_SBP_STATE_STANDBY	= 1,
 172	QETH_SBP_STATE_ACTIVE	= 2,
 173};
 174
 175#define QETH_SBP_HOST_NOTIFICATION 1
 176
 177struct qeth_sbp_info {
 178	__u32 supported_funcs;
 179	enum qeth_sbp_roles role;
 180	__u32 hostnotification:1;
 181	__u32 reflect_promisc:1;
 182	__u32 reflect_promisc_primary:1;
 183};
 184
 185static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa,
 186		enum qeth_ipa_funcs func)
 187{
 188	return (ipa->supported_funcs & func);
 189}
 190
 191static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
 192		enum qeth_ipa_funcs func)
 193{
 194	return (ipa->supported_funcs & ipa->enabled_funcs & func);
 195}
 196
 197#define qeth_adp_supported(c, f) \
 198	qeth_is_ipa_supported(&c->options.adp, f)
 199#define qeth_adp_enabled(c, f) \
 200	qeth_is_ipa_enabled(&c->options.adp, f)
 201#define qeth_is_supported(c, f) \
 202	qeth_is_ipa_supported(&c->options.ipa4, f)
 203#define qeth_is_enabled(c, f) \
 204	qeth_is_ipa_enabled(&c->options.ipa4, f)
 205#define qeth_is_supported6(c, f) \
 206	qeth_is_ipa_supported(&c->options.ipa6, f)
 207#define qeth_is_enabled6(c, f) \
 208	qeth_is_ipa_enabled(&c->options.ipa6, f)
 209#define qeth_is_ipafunc_supported(c, prot, f) \
 210	 ((prot == QETH_PROT_IPV6) ? \
 211		qeth_is_supported6(c, f) : qeth_is_supported(c, f))
 212#define qeth_is_ipafunc_enabled(c, prot, f) \
 213	 ((prot == QETH_PROT_IPV6) ? \
 214		qeth_is_enabled6(c, f) : qeth_is_enabled(c, f))
 215
 216#define QETH_IDX_FUNC_LEVEL_OSD		 0x0101
 217#define QETH_IDX_FUNC_LEVEL_IQD		 0x4108
 218
 219#define QETH_MODELLIST_ARRAY \
 220	{{0x1731, 0x01, 0x1732, QETH_CARD_TYPE_OSD, QETH_MAX_QUEUES, 0}, \
 221	 {0x1731, 0x05, 0x1732, QETH_CARD_TYPE_IQD, QETH_MAX_QUEUES, 0x103}, \
 222	 {0x1731, 0x06, 0x1732, QETH_CARD_TYPE_OSN, QETH_MAX_QUEUES, 0}, \
 223	 {0x1731, 0x02, 0x1732, QETH_CARD_TYPE_OSM, QETH_MAX_QUEUES, 0}, \
 224	 {0x1731, 0x02, 0x1732, QETH_CARD_TYPE_OSX, QETH_MAX_QUEUES, 0}, \
 225	 {0, 0, 0, 0, 0, 0} }
 226#define QETH_CU_TYPE_IND	0
 227#define QETH_CU_MODEL_IND	1
 228#define QETH_DEV_TYPE_IND	2
 229#define QETH_DEV_MODEL_IND	3
 230#define QETH_QUEUE_NO_IND	4
 231#define QETH_MULTICAST_IND	5
 232
 233#define QETH_REAL_CARD		1
 234#define QETH_VLAN_CARD		2
 235#define QETH_BUFSIZE		4096
 
 
 236
 237/**
 238 * some more defs
 239 */
 240#define QETH_TX_TIMEOUT		100 * HZ
 241#define QETH_RCD_TIMEOUT	60 * HZ
 242#define QETH_RECLAIM_WORK_TIME	HZ
 243#define QETH_HEADER_SIZE	32
 244#define QETH_MAX_PORTNO		15
 245
 246/*IPv6 address autoconfiguration stuff*/
 247#define UNIQUE_ID_IF_CREATE_ADDR_FAILED 0xfffe
 248#define UNIQUE_ID_NOT_BY_CARD		0x10000
 249
 250/*****************************************************************************/
 251/* QDIO queue and buffer handling                                            */
 252/*****************************************************************************/
 253#define QETH_MAX_QUEUES 4
 
 
 
 
 
 
 254#define QETH_IN_BUF_SIZE_DEFAULT 65536
 255#define QETH_IN_BUF_COUNT_DEFAULT 64
 256#define QETH_IN_BUF_COUNT_HSDEFAULT 128
 257#define QETH_IN_BUF_COUNT_MIN 8
 258#define QETH_IN_BUF_COUNT_MAX 128
 259#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12)
 260#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \
 261		 ((card)->qdio.in_buf_pool.buf_count / 2)
 262
 263/* buffers we have to be behind before we get a PCI */
 264#define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1)
 265/*enqueued free buffers left before we get a PCI*/
 266#define QETH_PCI_THRESHOLD_B(card) 0
 267/*not used unless the microcode gets patched*/
 268#define QETH_PCI_TIMER_VALUE(card) 3
 269
 270/* priority queing */
 271#define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING
 272#define QETH_DEFAULT_QUEUE    2
 273#define QETH_NO_PRIO_QUEUEING 0
 274#define QETH_PRIO_Q_ING_PREC  1
 275#define QETH_PRIO_Q_ING_TOS   2
 276#define QETH_PRIO_Q_ING_SKB   3
 277#define QETH_PRIO_Q_ING_VLAN  4
 
 278
 279/* Packing */
 280#define QETH_LOW_WATERMARK_PACK  2
 281#define QETH_HIGH_WATERMARK_PACK 5
 282#define QETH_WATERMARK_PACK_FUZZ 1
 283
 284#define QETH_IP_HEADER_SIZE 40
 285
 286/* large receive scatter gather copy break */
 287#define QETH_RX_SG_CB (PAGE_SIZE >> 1)
 288#define QETH_RX_PULL_LEN 256
 289
 290struct qeth_hdr_layer3 {
 291	__u8  id;
 292	__u8  flags;
 293	__u16 inbound_checksum; /*TSO:__u16 seqno */
 294	__u32 token;		/*TSO: __u32 reserved */
 295	__u16 length;
 296	__u8  vlan_prio;
 297	__u8  ext_flags;
 298	__u16 vlan_id;
 299	__u16 frame_offset;
 300	__u8  dest_addr[16];
 301} __attribute__ ((packed));
 
 
 
 
 
 
 
 
 
 
 
 302
 303struct qeth_hdr_layer2 {
 304	__u8 id;
 305	__u8 flags[3];
 306	__u8 port_no;
 307	__u8 hdr_length;
 308	__u16 pkt_length;
 309	__u16 seq_no;
 310	__u16 vlan_id;
 311	__u32 reserved;
 312	__u8 reserved2[16];
 313} __attribute__ ((packed));
 314
 315struct qeth_hdr_osn {
 316	__u8 id;
 317	__u8 reserved;
 318	__u16 seq_no;
 319	__u16 reserved2;
 320	__u16 control_flags;
 321	__u16 pdu_length;
 322	__u8 reserved3[18];
 323	__u32 ccid;
 324} __attribute__ ((packed));
 325
 326struct qeth_hdr {
 327	union {
 328		struct qeth_hdr_layer2 l2;
 329		struct qeth_hdr_layer3 l3;
 330		struct qeth_hdr_osn    osn;
 331	} hdr;
 332} __attribute__ ((packed));
 333
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 334/*TCP Segmentation Offload header*/
 335struct qeth_hdr_ext_tso {
 336	__u16 hdr_tot_len;
 337	__u8  imb_hdr_no;
 338	__u8  reserved;
 339	__u8  hdr_type;
 340	__u8  hdr_version;
 341	__u16 hdr_len;
 342	__u32 payload_len;
 343	__u16 mss;
 344	__u16 dg_hdr_len;
 345	__u8  padding[16];
 346} __attribute__ ((packed));
 347
 348struct qeth_hdr_tso {
 349	struct qeth_hdr hdr;	/*hdr->hdr.l3.xxx*/
 350	struct qeth_hdr_ext_tso ext;
 351} __attribute__ ((packed));
 352
 353
 354/* flags for qeth_hdr.flags */
 355#define QETH_HDR_PASSTHRU 0x10
 356#define QETH_HDR_IPV6     0x80
 357#define QETH_HDR_CAST_MASK 0x07
 358enum qeth_cast_flags {
 359	QETH_CAST_UNICAST   = 0x06,
 360	QETH_CAST_MULTICAST = 0x04,
 361	QETH_CAST_BROADCAST = 0x05,
 362	QETH_CAST_ANYCAST   = 0x07,
 363	QETH_CAST_NOCAST    = 0x00,
 364};
 365
 366enum qeth_layer2_frame_flags {
 367	QETH_LAYER2_FLAG_MULTICAST = 0x01,
 368	QETH_LAYER2_FLAG_BROADCAST = 0x02,
 369	QETH_LAYER2_FLAG_UNICAST   = 0x04,
 370	QETH_LAYER2_FLAG_VLAN      = 0x10,
 371};
 372
 373enum qeth_header_ids {
 374	QETH_HEADER_TYPE_LAYER3 = 0x01,
 375	QETH_HEADER_TYPE_LAYER2 = 0x02,
 376	QETH_HEADER_TYPE_TSO	= 0x03,
 377	QETH_HEADER_TYPE_OSN    = 0x04,
 
 378};
 379/* flags for qeth_hdr.ext_flags */
 380#define QETH_HDR_EXT_VLAN_FRAME       0x01
 381#define QETH_HDR_EXT_TOKEN_ID         0x02
 382#define QETH_HDR_EXT_INCLUDE_VLAN_TAG 0x04
 383#define QETH_HDR_EXT_SRC_MAC_ADDR     0x08
 384#define QETH_HDR_EXT_CSUM_HDR_REQ     0x10
 385#define QETH_HDR_EXT_CSUM_TRANSP_REQ  0x20
 386#define QETH_HDR_EXT_UDP	      0x40 /*bit off for TCP*/
 387
 388enum qeth_qdio_buffer_states {
 389	/*
 390	 * inbound: read out by driver; owned by hardware in order to be filled
 391	 * outbound: owned by driver in order to be filled
 392	 */
 393	QETH_QDIO_BUF_EMPTY,
 394	/*
 395	 * inbound: filled by hardware; owned by driver in order to be read out
 396	 * outbound: filled by driver; owned by hardware in order to be sent
 397	 */
 398	QETH_QDIO_BUF_PRIMED,
 399	/*
 400	 * inbound: not applicable
 401	 * outbound: identified to be pending in TPQ
 402	 */
 403	QETH_QDIO_BUF_PENDING,
 404	/*
 405	 * inbound: not applicable
 406	 * outbound: found in completion queue
 407	 */
 408	QETH_QDIO_BUF_IN_CQ,
 409	/*
 410	 * inbound: not applicable
 411	 * outbound: handled via transfer pending / completion queue
 412	 */
 413	QETH_QDIO_BUF_HANDLED_DELAYED,
 414};
 415
 416enum qeth_qdio_info_states {
 417	QETH_QDIO_UNINITIALIZED,
 418	QETH_QDIO_ALLOCATED,
 419	QETH_QDIO_ESTABLISHED,
 420	QETH_QDIO_CLEANING
 421};
 422
 423struct qeth_buffer_pool_entry {
 424	struct list_head list;
 425	struct list_head init_list;
 426	void *elements[QDIO_MAX_ELEMENTS_PER_BUFFER];
 427};
 428
 429struct qeth_qdio_buffer_pool {
 430	struct list_head entry_list;
 431	int buf_count;
 432};
 433
 434struct qeth_qdio_buffer {
 435	struct qdio_buffer *buffer;
 436	/* the buffer pool entry currently associated to this buffer */
 437	struct qeth_buffer_pool_entry *pool_entry;
 438	struct sk_buff *rx_skb;
 439};
 440
 441struct qeth_qdio_q {
 442	struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
 443	struct qeth_qdio_buffer bufs[QDIO_MAX_BUFFERS_PER_Q];
 444	int next_buf_to_init;
 445};
 446
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 447struct qeth_qdio_out_buffer {
 448	struct qdio_buffer *buffer;
 449	atomic_t state;
 450	int next_element_to_fill;
 
 
 451	struct sk_buff_head skb_list;
 452	int is_header[16];
 453
 
 454	struct qaob *aob;
 455	struct qeth_qdio_out_q *q;
 456	struct qeth_qdio_out_buffer *next_pending;
 457};
 458
 459struct qeth_card;
 460
 461enum qeth_out_q_states {
 462       QETH_OUT_Q_UNLOCKED,
 463       QETH_OUT_Q_LOCKED,
 464       QETH_OUT_Q_LOCKED_FLUSH,
 465};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 466
 467struct qeth_qdio_out_q {
 468	struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
 469	struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
 470	struct qdio_outbuf_state *bufstates; /* convenience pointer */
 471	int queue_no;
 
 
 
 
 
 
 472	struct qeth_card *card;
 473	atomic_t state;
 474	int do_pack;
 475	/*
 476	 * index of buffer to be filled by driver; state EMPTY or PACKING
 477	 */
 478	int next_buf_to_fill;
 479	/*
 480	 * number of buffers that are currently filled (PRIMED)
 481	 * -> these buffers are hardware-owned
 482	 */
 483	atomic_t used_buffers;
 484	/* indicates whether PCI flag must be set (or if one is outstanding) */
 485	atomic_t set_pci_flags_count;
 486};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 487
 488struct qeth_qdio_info {
 489	atomic_t state;
 490	/* input */
 491	int no_in_queues;
 492	struct qeth_qdio_q *in_q;
 493	struct qeth_qdio_q *c_q;
 494	struct qeth_qdio_buffer_pool in_buf_pool;
 495	struct qeth_qdio_buffer_pool init_pool;
 496	int in_buf_size;
 497
 498	/* output */
 499	int no_out_queues;
 500	struct qeth_qdio_out_q **out_qs;
 501	struct qdio_outbuf_state *out_bufstates;
 502
 503	/* priority queueing */
 504	int do_prio_queueing;
 505	int default_out_queue;
 506};
 507
 508enum qeth_send_errors {
 509	QETH_SEND_ERROR_NONE,
 510	QETH_SEND_ERROR_LINK_FAILURE,
 511	QETH_SEND_ERROR_RETRY,
 512	QETH_SEND_ERROR_KICK_IT,
 513};
 514
 515#define QETH_ETH_MAC_V4      0x0100 /* like v4 */
 516#define QETH_ETH_MAC_V6      0x3333 /* like v6 */
 517/* tr mc mac is longer, but that will be enough to detect mc frames */
 518#define QETH_TR_MAC_NC       0xc000 /* non-canonical */
 519#define QETH_TR_MAC_C        0x0300 /* canonical */
 520
 521#define DEFAULT_ADD_HHLEN 0
 522#define MAX_ADD_HHLEN 1024
 523
 524/**
 525 * buffer stuff for read channel
 526 */
 527#define QETH_CMD_BUFFER_NO	8
 528
 529/**
 530 *  channel state machine
 531 */
 532enum qeth_channel_states {
 533	CH_STATE_UP,
 534	CH_STATE_DOWN,
 535	CH_STATE_ACTIVATING,
 536	CH_STATE_HALTED,
 537	CH_STATE_STOPPED,
 538	CH_STATE_RCD,
 539	CH_STATE_RCD_DONE,
 540};
 541/**
 542 * card state machine
 543 */
 544enum qeth_card_states {
 545	CARD_STATE_DOWN,
 546	CARD_STATE_HARDSETUP,
 547	CARD_STATE_SOFTSETUP,
 548	CARD_STATE_UP,
 549	CARD_STATE_RECOVER,
 550};
 551
 552/**
 553 * Protocol versions
 554 */
 555enum qeth_prot_versions {
 
 556	QETH_PROT_IPV4 = 0x0004,
 557	QETH_PROT_IPV6 = 0x0006,
 558};
 559
 560enum qeth_ip_types {
 561	QETH_IP_TYPE_NORMAL,
 562	QETH_IP_TYPE_VIPA,
 563	QETH_IP_TYPE_RXIP,
 564};
 565
 566enum qeth_cmd_buffer_state {
 567	BUF_STATE_FREE,
 568	BUF_STATE_LOCKED,
 569	BUF_STATE_PROCESSED,
 570};
 571
 572enum qeth_cq {
 573	QETH_CQ_DISABLED = 0,
 574	QETH_CQ_ENABLED = 1,
 575	QETH_CQ_NOTAVAILABLE = 2,
 576};
 577
 578struct qeth_ipato {
 579	int enabled;
 580	int invert4;
 581	int invert6;
 582	struct list_head entries;
 583};
 584
 585struct qeth_channel;
 
 
 
 
 
 
 
 
 
 
 586
 587struct qeth_cmd_buffer {
 588	enum qeth_cmd_buffer_state state;
 
 
 
 
 589	struct qeth_channel *channel;
 
 
 590	unsigned char *data;
 
 
 
 
 
 591	int rc;
 592	void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *);
 593};
 594
 595/**
 596 * definition of a qeth channel, used for read and write
 597 */
 598struct qeth_channel {
 599	enum qeth_channel_states state;
 600	struct ccw1 ccw;
 601	spinlock_t iob_lock;
 602	wait_queue_head_t wait_q;
 603	struct ccw_device *ccwdev;
 604/*command buffer for control data*/
 605	struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO];
 606	atomic_t irq_pending;
 607	int io_buf_no;
 608	int buf_no;
 609};
 
 
 
 
 
 
 
 610
 611/**
 612 *  OSA card related definitions
 613 */
 614struct qeth_token {
 615	__u32 issuer_rm_w;
 616	__u32 issuer_rm_r;
 617	__u32 cm_filter_w;
 618	__u32 cm_filter_r;
 619	__u32 cm_connection_w;
 620	__u32 cm_connection_r;
 621	__u32 ulp_filter_w;
 622	__u32 ulp_filter_r;
 623	__u32 ulp_connection_w;
 624	__u32 ulp_connection_r;
 625};
 626
 627struct qeth_seqno {
 628	__u32 trans_hdr;
 629	__u32 pdu_hdr;
 630	__u32 pdu_hdr_ack;
 631	__u16 ipa;
 632	__u32 pkt_seqno;
 633};
 634
 635struct qeth_reply {
 636	struct list_head list;
 637	wait_queue_head_t wait_q;
 638	int (*callback)(struct qeth_card *, struct qeth_reply *,
 639		unsigned long);
 640	u32 seqno;
 641	unsigned long offset;
 642	atomic_t received;
 643	int rc;
 644	void *param;
 645	struct qeth_card *card;
 646	atomic_t refcnt;
 647};
 648
 649
 650struct qeth_card_blkt {
 651	int time_total;
 652	int inter_packet;
 653	int inter_packet_jumbo;
 654};
 655
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 656#define QETH_BROADCAST_WITH_ECHO    0x01
 657#define QETH_BROADCAST_WITHOUT_ECHO 0x02
 658#define QETH_LAYER2_MAC_READ	    0x01
 659#define QETH_LAYER2_MAC_REGISTERED  0x02
 660struct qeth_card_info {
 661	unsigned short unit_addr2;
 662	unsigned short cula;
 663	unsigned short chpid;
 664	__u16 func_level;
 665	char mcl_level[QETH_MCL_LENGTH + 1];
 666	int guestlan;
 667	int mac_bits;
 668	int portno;
 
 
 
 
 
 
 
 
 
 
 
 
 
 669	enum qeth_card_types type;
 670	enum qeth_link_types link_type;
 671	int is_multicast_different;
 672	int initial_mtu;
 673	int max_mtu;
 674	int broadcast_capable;
 675	int unique_id;
 676	struct qeth_card_blkt blkt;
 677	__u32 csum_mask;
 678	__u32 tx_csum_mask;
 679	enum qeth_ipa_promisc_modes promisc_mode;
 680	__u32 diagass_support;
 681	__u32 hwtrap;
 
 
 
 
 
 
 
 682};
 683
 684struct qeth_card_options {
 
 
 685	struct qeth_routing_info route4;
 686	struct qeth_ipa_info ipa4;
 687	struct qeth_ipa_info adp; /*Adapter parameters*/
 688	struct qeth_routing_info route6;
 689	struct qeth_ipa_info ipa6;
 690	struct qeth_sbp_info sbp; /* SETBRIDGEPORT options */
 691	int fake_broadcast;
 692	int add_hhlen;
 693	int layer2;
 694	int performance_stats;
 695	int rx_sg_cb;
 696	enum qeth_ipa_isolation_modes isolation;
 697	enum qeth_ipa_isolation_modes prev_isolation;
 698	int sniffer;
 699	enum qeth_cq cq;
 700	char hsuid[9];
 701};
 702
 
 
 
 703/*
 704 * thread bits for qeth_card thread masks
 705 */
 706enum qeth_threads {
 707	QETH_RECOVER_THREAD = 1,
 708};
 709
 710struct qeth_osn_info {
 711	int (*assist_cb)(struct net_device *dev, void *data);
 712	int (*data_cb)(struct sk_buff *skb);
 713};
 714
 715enum qeth_discipline_id {
 716	QETH_DISCIPLINE_LAYER3 = 0,
 717	QETH_DISCIPLINE_LAYER2 = 1,
 718};
 719
 720struct qeth_discipline {
 721	void (*start_poll)(struct ccw_device *, int, unsigned long);
 722	qdio_handler_t *input_handler;
 723	qdio_handler_t *output_handler;
 724	int (*recover)(void *ptr);
 725	int (*setup) (struct ccwgroup_device *);
 726	void (*remove) (struct ccwgroup_device *);
 727	int (*set_online) (struct ccwgroup_device *);
 728	int (*set_offline) (struct ccwgroup_device *);
 729	void (*shutdown)(struct ccwgroup_device *);
 730	int (*prepare) (struct ccwgroup_device *);
 731	void (*complete) (struct ccwgroup_device *);
 732	int (*freeze)(struct ccwgroup_device *);
 733	int (*thaw) (struct ccwgroup_device *);
 734	int (*restore)(struct ccwgroup_device *);
 735	int (*control_event_handler)(struct qeth_card *card,
 736					struct qeth_ipa_cmd *cmd);
 737};
 738
 739struct qeth_vlan_vid {
 740	struct list_head list;
 741	unsigned short vid;
 742};
 743
 744enum qeth_addr_disposition {
 745	QETH_DISP_ADDR_DELETE = 0,
 746	QETH_DISP_ADDR_DO_NOTHING = 1,
 747	QETH_DISP_ADDR_ADD = 2,
 748};
 749
 750struct qeth_rx {
 751	int b_count;
 752	int b_index;
 753	struct qdio_buffer_element *b_element;
 754	int e_offset;
 755	int qdio_err;
 756};
 757
 758struct carrier_info {
 759	__u8  card_type;
 760	__u16 port_mode;
 761	__u32 port_speed;
 762};
 763
 764struct qeth_switch_info {
 765	__u32 capabilities;
 766	__u32 settings;
 767};
 768
 769#define QETH_NAPI_WEIGHT NAPI_POLL_WEIGHT
 
 
 
 
 
 770
 771struct qeth_card {
 772	struct list_head list;
 773	enum qeth_card_states state;
 774	int lan_online;
 775	spinlock_t lock;
 776	struct ccwgroup_device *gdev;
 
 777	struct qeth_channel read;
 778	struct qeth_channel write;
 779	struct qeth_channel data;
 780
 781	struct net_device *dev;
 782	struct net_device_stats stats;
 783
 784	struct qeth_card_info info;
 785	struct qeth_token token;
 786	struct qeth_seqno seqno;
 787	struct qeth_card_options options;
 788
 
 
 789	wait_queue_head_t wait_q;
 790	spinlock_t vlanlock;
 791	spinlock_t mclock;
 792	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
 793	struct list_head vid_list;
 794	DECLARE_HASHTABLE(mac_htable, 4);
 795	DECLARE_HASHTABLE(ip_htable, 4);
 796	DECLARE_HASHTABLE(ip_mc_htable, 4);
 
 
 
 
 
 
 
 797	struct work_struct kernel_thread_starter;
 798	spinlock_t thread_mask_lock;
 799	unsigned long thread_start_mask;
 800	unsigned long thread_allowed_mask;
 801	unsigned long thread_running_mask;
 802	struct task_struct *recovery_task;
 803	spinlock_t ip_lock;
 804	struct qeth_ipato ipato;
 805	struct list_head cmd_waiter_list;
 806	/* QDIO buffer handling */
 807	struct qeth_qdio_info qdio;
 808	struct qeth_perf_stats perf_stats;
 809	int read_or_write_problem;
 810	struct qeth_osn_info osn_info;
 811	struct qeth_discipline *discipline;
 812	atomic_t force_alloc_skb;
 813	struct service_level qeth_service_level;
 814	struct qdio_ssqd_desc ssqd;
 815	debug_info_t *debug;
 
 816	struct mutex conf_mutex;
 817	struct mutex discipline_mutex;
 818	struct napi_struct napi;
 819	struct qeth_rx rx;
 820	struct delayed_work buffer_reclaim_work;
 821	int reclaim_index;
 822	struct work_struct close_dev_work;
 823};
 824
 825struct qeth_card_list_struct {
 826	struct list_head list;
 827	rwlock_t rwlock;
 828};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 829
 830struct qeth_trap_id {
 831	__u16 lparnr;
 832	char vmname[8];
 833	__u8 chpid;
 834	__u8 ssid;
 835	__u16 devno;
 836} __packed;
 837
 838/*some helper functions*/
 839#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 840
 841/**
 842 * qeth_get_elements_for_range() -	find number of SBALEs to cover range.
 843 * @start:				Start of the address range.
 844 * @end:				Address after the end of the range.
 845 *
 846 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
 847 * the specified address range.
 848 */
 849static inline int qeth_get_elements_for_range(addr_t start, addr_t end)
 850{
 851	return PFN_UP(end - 1) - PFN_DOWN(start);
 852}
 853
 854static inline int qeth_get_micros(void)
 855{
 856	return (int) (get_tod_clock() >> 12);
 
 
 
 
 
 857}
 858
 859static inline int qeth_get_ip_version(struct sk_buff *skb)
 
 860{
 861	__be16 *p = &((struct ethhdr *)skb->data)->h_proto;
 
 862
 863	if (*p == ETH_P_8021Q)
 864		p += 2;
 865	switch (*p) {
 866	case ETH_P_IPV6:
 867		return 6;
 868	case ETH_P_IP:
 869		return 4;
 870	default:
 871		return 0;
 872	}
 
 
 873}
 874
 875static inline int qeth_get_ip_protocol(struct sk_buff *skb)
 
 876{
 877	return ip_hdr(skb)->protocol;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 878}
 879
 880static inline void qeth_put_buffer_pool_entry(struct qeth_card *card,
 881		struct qeth_buffer_pool_entry *entry)
 882{
 883	list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
 884}
 885
 886static inline int qeth_is_diagass_supported(struct qeth_card *card,
 887		enum qeth_diags_cmds cmd)
 888{
 889	return card->info.diagass_support & (__u32)cmd;
 890}
 891
 892extern struct qeth_discipline qeth_l2_discipline;
 893extern struct qeth_discipline qeth_l3_discipline;
 894extern const struct attribute_group *qeth_generic_attr_groups[];
 895extern const struct attribute_group *qeth_osn_attr_groups[];
 896extern struct workqueue_struct *qeth_wq;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 897
 898int qeth_card_hw_is_reachable(struct qeth_card *);
 899const char *qeth_get_cardname_short(struct qeth_card *);
 900int qeth_realloc_buffer_pool(struct qeth_card *, int);
 901int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
 902void qeth_core_free_discipline(struct qeth_card *);
 903
 904/* exports for qeth discipline device drivers */
 905extern struct qeth_card_list_struct qeth_core_card_list;
 906extern struct kmem_cache *qeth_core_header_cache;
 907extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS];
 908
 909void qeth_set_recovery_task(struct qeth_card *);
 910void qeth_clear_recovery_task(struct qeth_card *);
 911void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
 912int qeth_threads_running(struct qeth_card *, unsigned long);
 913int qeth_wait_for_threads(struct qeth_card *, unsigned long);
 914int qeth_do_run_thread(struct qeth_card *, unsigned long);
 915void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long);
 916void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
 917int qeth_core_hardsetup_card(struct qeth_card *);
 918void qeth_print_status_message(struct qeth_card *);
 919int qeth_init_qdio_queues(struct qeth_card *);
 920int qeth_send_startlan(struct qeth_card *);
 921int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
 922		  int (*reply_cb)
 923		  (struct qeth_card *, struct qeth_reply *, unsigned long),
 924		  void *);
 925struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
 926			enum qeth_ipa_cmds, enum qeth_prot_versions);
 927int qeth_query_setadapterparms(struct qeth_card *);
 928int qeth_check_qdio_errors(struct qeth_card *, struct qdio_buffer *,
 929		unsigned int, const char *);
 930void qeth_queue_input_buffer(struct qeth_card *, int);
 931struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
 932		struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *,
 933		struct qeth_hdr **);
 934void qeth_schedule_recovery(struct qeth_card *);
 935void qeth_qdio_start_poll(struct ccw_device *, int, unsigned long);
 936void qeth_qdio_input_handler(struct ccw_device *,
 937		unsigned int, unsigned int, int,
 938		int, unsigned long);
 939void qeth_qdio_output_handler(struct ccw_device *, unsigned int,
 940			int, int, int, unsigned long);
 941void qeth_clear_ipacmd_list(struct qeth_card *);
 942int qeth_qdio_clear_card(struct qeth_card *, int);
 943void qeth_clear_working_pool_list(struct qeth_card *);
 944void qeth_clear_cmd_buffers(struct qeth_channel *);
 945void qeth_clear_qdio_buffers(struct qeth_card *);
 946void qeth_setadp_promisc_mode(struct qeth_card *);
 947struct net_device_stats *qeth_get_stats(struct net_device *);
 948int qeth_change_mtu(struct net_device *, int);
 949int qeth_setadpparms_change_macaddr(struct qeth_card *);
 950void qeth_tx_timeout(struct net_device *);
 951void qeth_prepare_control_data(struct qeth_card *, int,
 952				struct qeth_cmd_buffer *);
 953void qeth_release_buffer(struct qeth_channel *, struct qeth_cmd_buffer *);
 954void qeth_prepare_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, char);
 955struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *);
 956int qeth_mdio_read(struct net_device *, int, int);
 957int qeth_snmp_command(struct qeth_card *, char __user *);
 958int qeth_query_oat_command(struct qeth_card *, char __user *);
 959int qeth_query_switch_attributes(struct qeth_card *card,
 960				  struct qeth_switch_info *sw_info);
 961int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
 962	int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long),
 963	void *reply_param);
 964int qeth_bridgeport_query_ports(struct qeth_card *card,
 965	enum qeth_sbp_roles *role, enum qeth_sbp_states *state);
 966int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role);
 967int qeth_bridgeport_an_set(struct qeth_card *card, int enable);
 968int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
 969int qeth_get_elements_no(struct qeth_card *, struct sk_buff *, int);
 970int qeth_get_elements_for_frags(struct sk_buff *);
 971int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
 972			struct sk_buff *, struct qeth_hdr *, int, int, int);
 973int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *,
 974		    struct sk_buff *, struct qeth_hdr *, int);
 975int qeth_core_get_sset_count(struct net_device *, int);
 976void qeth_core_get_ethtool_stats(struct net_device *,
 977				struct ethtool_stats *, u64 *);
 978void qeth_core_get_strings(struct net_device *, u32, u8 *);
 979void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
 980void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
 981int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *);
 982int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback);
 983int qeth_hdr_chk_and_bounce(struct sk_buff *, struct qeth_hdr **, int);
 984int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
 985int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
 986int qeth_query_ipassists(struct qeth_card *, enum qeth_prot_versions prot);
 987void qeth_trace_features(struct qeth_card *);
 988void qeth_close_dev(struct qeth_card *);
 989int qeth_send_simple_setassparms(struct qeth_card *, enum qeth_ipa_funcs,
 990				 __u16, long);
 991int qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *, __u16,
 992			  long,
 993			  int (*reply_cb)(struct qeth_card *,
 994					  struct qeth_reply *, unsigned long),
 995			  void *);
 996int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long);
 997struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
 998						 enum qeth_ipa_funcs,
 999						 __u16, __u16,
1000						 enum qeth_prot_versions);
1001int qeth_set_features(struct net_device *, netdev_features_t);
1002int qeth_recover_features(struct net_device *);
1003netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
1004
1005/* exports for OSN */
1006int qeth_osn_assist(struct net_device *, void *, int);
1007int qeth_osn_register(unsigned char *read_dev_no, struct net_device **,
1008		int (*assist_cb)(struct net_device *, void *),
1009		int (*data_cb)(struct sk_buff *));
1010void qeth_osn_deregister(struct net_device *);
 
 
 
 
 
 
 
 
 
 
 
1011
1012#endif /* __QETH_CORE_H__ */