Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * IUCV network driver
   4 *
   5 * Copyright IBM Corp. 2001, 2009
   6 *
   7 * Author(s):
   8 *	Original netiucv driver:
   9 *		Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
  10 *	Sysfs integration and all bugs therein:
  11 *		Cornelia Huck (cornelia.huck@de.ibm.com)
  12 *	PM functions:
  13 *		Ursula Braun (ursula.braun@de.ibm.com)
  14 *
  15 * Documentation used:
  16 *  the source of the original IUCV driver by:
  17 *    Stefan Hegewald <hegewald@de.ibm.com>
  18 *    Hartmut Penner <hpenner@de.ibm.com>
  19 *    Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
  20 *    Martin Schwidefsky (schwidefsky@de.ibm.com)
  21 *    Alan Altmark (Alan_Altmark@us.ibm.com)  Sept. 2000
  22 */
  23
  24#define KMSG_COMPONENT "netiucv"
  25#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  26
  27#undef DEBUG
  28
  29#include <linux/module.h>
  30#include <linux/init.h>
  31#include <linux/kernel.h>
  32#include <linux/slab.h>
  33#include <linux/errno.h>
  34#include <linux/types.h>
  35#include <linux/interrupt.h>
  36#include <linux/timer.h>
  37#include <linux/bitops.h>
  38
  39#include <linux/signal.h>
  40#include <linux/string.h>
  41#include <linux/device.h>
  42
  43#include <linux/ip.h>
  44#include <linux/if_arp.h>
  45#include <linux/tcp.h>
  46#include <linux/skbuff.h>
  47#include <linux/ctype.h>
  48#include <net/dst.h>
  49
  50#include <asm/io.h>
  51#include <linux/uaccess.h>
  52#include <asm/ebcdic.h>
  53
  54#include <net/iucv/iucv.h>
  55#include "fsm.h"
  56
  57MODULE_AUTHOR
  58    ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
  59MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
  60
  61/**
  62 * Debug Facility stuff
  63 */
  64#define IUCV_DBF_SETUP_NAME "iucv_setup"
  65#define IUCV_DBF_SETUP_LEN 64
  66#define IUCV_DBF_SETUP_PAGES 2
  67#define IUCV_DBF_SETUP_NR_AREAS 1
  68#define IUCV_DBF_SETUP_LEVEL 3
  69
  70#define IUCV_DBF_DATA_NAME "iucv_data"
  71#define IUCV_DBF_DATA_LEN 128
  72#define IUCV_DBF_DATA_PAGES 2
  73#define IUCV_DBF_DATA_NR_AREAS 1
  74#define IUCV_DBF_DATA_LEVEL 2
  75
  76#define IUCV_DBF_TRACE_NAME "iucv_trace"
  77#define IUCV_DBF_TRACE_LEN 16
  78#define IUCV_DBF_TRACE_PAGES 4
  79#define IUCV_DBF_TRACE_NR_AREAS 1
  80#define IUCV_DBF_TRACE_LEVEL 3
  81
  82#define IUCV_DBF_TEXT(name,level,text) \
  83	do { \
  84		debug_text_event(iucv_dbf_##name,level,text); \
  85	} while (0)
  86
  87#define IUCV_DBF_HEX(name,level,addr,len) \
  88	do { \
  89		debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
  90	} while (0)
  91
  92DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
  93
  94#define IUCV_DBF_TEXT_(name, level, text...) \
  95	do { \
  96		if (debug_level_enabled(iucv_dbf_##name, level)) { \
  97			char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
  98			sprintf(__buf, text); \
  99			debug_text_event(iucv_dbf_##name, level, __buf); \
 100			put_cpu_var(iucv_dbf_txt_buf); \
 101		} \
 102	} while (0)
 103
 104#define IUCV_DBF_SPRINTF(name,level,text...) \
 105	do { \
 106		debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
 107		debug_sprintf_event(iucv_dbf_trace, level, text ); \
 108	} while (0)
 109
 110/**
 111 * some more debug stuff
 112 */
 113#define PRINTK_HEADER " iucv: "       /* for debugging */
 114
 115static struct device_driver netiucv_driver = {
 116	.owner = THIS_MODULE,
 117	.name = "netiucv",
 118	.bus  = &iucv_bus,
 119};
 120
 121static int netiucv_callback_connreq(struct iucv_path *, u8 *, u8 *);
 122static void netiucv_callback_connack(struct iucv_path *, u8 *);
 123static void netiucv_callback_connrej(struct iucv_path *, u8 *);
 124static void netiucv_callback_connsusp(struct iucv_path *, u8 *);
 125static void netiucv_callback_connres(struct iucv_path *, u8 *);
 126static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
 127static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
 128
 129static struct iucv_handler netiucv_handler = {
 130	.path_pending	  = netiucv_callback_connreq,
 131	.path_complete	  = netiucv_callback_connack,
 132	.path_severed	  = netiucv_callback_connrej,
 133	.path_quiesced	  = netiucv_callback_connsusp,
 134	.path_resumed	  = netiucv_callback_connres,
 135	.message_pending  = netiucv_callback_rx,
 136	.message_complete = netiucv_callback_txdone
 137};
 138
 139/**
 140 * Per connection profiling data
 141 */
 142struct connection_profile {
 143	unsigned long maxmulti;
 144	unsigned long maxcqueue;
 145	unsigned long doios_single;
 146	unsigned long doios_multi;
 147	unsigned long txlen;
 148	unsigned long tx_time;
 149	unsigned long send_stamp;
 150	unsigned long tx_pending;
 151	unsigned long tx_max_pending;
 152};
 153
 154/**
 155 * Representation of one iucv connection
 156 */
 157struct iucv_connection {
 158	struct list_head	  list;
 159	struct iucv_path	  *path;
 160	struct sk_buff            *rx_buff;
 161	struct sk_buff            *tx_buff;
 162	struct sk_buff_head       collect_queue;
 163	struct sk_buff_head	  commit_queue;
 164	spinlock_t                collect_lock;
 165	int                       collect_len;
 166	int                       max_buffsize;
 167	fsm_timer                 timer;
 168	fsm_instance              *fsm;
 169	struct net_device         *netdev;
 170	struct connection_profile prof;
 171	char                      userid[9];
 172	char			  userdata[17];
 173};
 174
 175/**
 176 * Linked list of all connection structs.
 177 */
 178static LIST_HEAD(iucv_connection_list);
 179static DEFINE_RWLOCK(iucv_connection_rwlock);
 180
 181/**
 182 * Representation of event-data for the
 183 * connection state machine.
 184 */
 185struct iucv_event {
 186	struct iucv_connection *conn;
 187	void                   *data;
 188};
 189
 190/**
 191 * Private part of the network device structure
 192 */
 193struct netiucv_priv {
 194	struct net_device_stats stats;
 195	unsigned long           tbusy;
 196	fsm_instance            *fsm;
 197        struct iucv_connection  *conn;
 198	struct device           *dev;
 199};
 200
 201/**
 202 * Link level header for a packet.
 203 */
 204struct ll_header {
 205	u16 next;
 206};
 207
 208#define NETIUCV_HDRLEN		 (sizeof(struct ll_header))
 209#define NETIUCV_BUFSIZE_MAX	 65537
 210#define NETIUCV_BUFSIZE_DEFAULT  NETIUCV_BUFSIZE_MAX
 211#define NETIUCV_MTU_MAX          (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
 212#define NETIUCV_MTU_DEFAULT      9216
 213#define NETIUCV_QUEUELEN_DEFAULT 50
 214#define NETIUCV_TIMEOUT_5SEC     5000
 215
 216/**
 217 * Compatibility macros for busy handling
 218 * of network devices.
 219 */
 220static void netiucv_clear_busy(struct net_device *dev)
 221{
 222	struct netiucv_priv *priv = netdev_priv(dev);
 223	clear_bit(0, &priv->tbusy);
 224	netif_wake_queue(dev);
 225}
 226
 227static int netiucv_test_and_set_busy(struct net_device *dev)
 228{
 229	struct netiucv_priv *priv = netdev_priv(dev);
 230	netif_stop_queue(dev);
 231	return test_and_set_bit(0, &priv->tbusy);
 232}
 233
 234static u8 iucvMagic_ascii[16] = {
 235	0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
 236	0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
 237};
 238
 239static u8 iucvMagic_ebcdic[16] = {
 240	0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
 241	0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
 242};
 243
 244/**
 245 * Convert an iucv userId to its printable
 246 * form (strip whitespace at end).
 247 *
 248 * @param An iucv userId
 249 *
 250 * @returns The printable string (static data!!)
 251 */
 252static char *netiucv_printname(char *name, int len)
 253{
 254	static char tmp[17];
 255	char *p = tmp;
 256	memcpy(tmp, name, len);
 257	tmp[len] = '\0';
 258	while (*p && ((p - tmp) < len) && (!isspace(*p)))
 259		p++;
 260	*p = '\0';
 261	return tmp;
 262}
 263
 264static char *netiucv_printuser(struct iucv_connection *conn)
 265{
 266	static char tmp_uid[9];
 267	static char tmp_udat[17];
 268	static char buf[100];
 269
 270	if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
 271		tmp_uid[8] = '\0';
 272		tmp_udat[16] = '\0';
 273		memcpy(tmp_uid, netiucv_printname(conn->userid, 8), 8);
 274		memcpy(tmp_udat, conn->userdata, 16);
 275		EBCASC(tmp_udat, 16);
 276		memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16);
 277		sprintf(buf, "%s.%s", tmp_uid, tmp_udat);
 278		return buf;
 279	} else
 280		return netiucv_printname(conn->userid, 8);
 281}
 282
 283/**
 284 * States of the interface statemachine.
 285 */
 286enum dev_states {
 287	DEV_STATE_STOPPED,
 288	DEV_STATE_STARTWAIT,
 289	DEV_STATE_STOPWAIT,
 290	DEV_STATE_RUNNING,
 291	/**
 292	 * MUST be always the last element!!
 293	 */
 294	NR_DEV_STATES
 295};
 296
 297static const char *dev_state_names[] = {
 298	"Stopped",
 299	"StartWait",
 300	"StopWait",
 301	"Running",
 302};
 303
 304/**
 305 * Events of the interface statemachine.
 306 */
 307enum dev_events {
 308	DEV_EVENT_START,
 309	DEV_EVENT_STOP,
 310	DEV_EVENT_CONUP,
 311	DEV_EVENT_CONDOWN,
 312	/**
 313	 * MUST be always the last element!!
 314	 */
 315	NR_DEV_EVENTS
 316};
 317
 318static const char *dev_event_names[] = {
 319	"Start",
 320	"Stop",
 321	"Connection up",
 322	"Connection down",
 323};
 324
 325/**
 326 * Events of the connection statemachine
 327 */
 328enum conn_events {
 329	/**
 330	 * Events, representing callbacks from
 331	 * lowlevel iucv layer)
 332	 */
 333	CONN_EVENT_CONN_REQ,
 334	CONN_EVENT_CONN_ACK,
 335	CONN_EVENT_CONN_REJ,
 336	CONN_EVENT_CONN_SUS,
 337	CONN_EVENT_CONN_RES,
 338	CONN_EVENT_RX,
 339	CONN_EVENT_TXDONE,
 340
 341	/**
 342	 * Events, representing errors return codes from
 343	 * calls to lowlevel iucv layer
 344	 */
 345
 346	/**
 347	 * Event, representing timer expiry.
 348	 */
 349	CONN_EVENT_TIMER,
 350
 351	/**
 352	 * Events, representing commands from upper levels.
 353	 */
 354	CONN_EVENT_START,
 355	CONN_EVENT_STOP,
 356
 357	/**
 358	 * MUST be always the last element!!
 359	 */
 360	NR_CONN_EVENTS,
 361};
 362
 363static const char *conn_event_names[] = {
 364	"Remote connection request",
 365	"Remote connection acknowledge",
 366	"Remote connection reject",
 367	"Connection suspended",
 368	"Connection resumed",
 369	"Data received",
 370	"Data sent",
 371
 372	"Timer",
 373
 374	"Start",
 375	"Stop",
 376};
 377
 378/**
 379 * States of the connection statemachine.
 380 */
 381enum conn_states {
 382	/**
 383	 * Connection not assigned to any device,
 384	 * initial state, invalid
 385	 */
 386	CONN_STATE_INVALID,
 387
 388	/**
 389	 * Userid assigned but not operating
 390	 */
 391	CONN_STATE_STOPPED,
 392
 393	/**
 394	 * Connection registered,
 395	 * no connection request sent yet,
 396	 * no connection request received
 397	 */
 398	CONN_STATE_STARTWAIT,
 399
 400	/**
 401	 * Connection registered and connection request sent,
 402	 * no acknowledge and no connection request received yet.
 403	 */
 404	CONN_STATE_SETUPWAIT,
 405
 406	/**
 407	 * Connection up and running idle
 408	 */
 409	CONN_STATE_IDLE,
 410
 411	/**
 412	 * Data sent, awaiting CONN_EVENT_TXDONE
 413	 */
 414	CONN_STATE_TX,
 415
 416	/**
 417	 * Error during registration.
 418	 */
 419	CONN_STATE_REGERR,
 420
 421	/**
 422	 * Error during registration.
 423	 */
 424	CONN_STATE_CONNERR,
 425
 426	/**
 427	 * MUST be always the last element!!
 428	 */
 429	NR_CONN_STATES,
 430};
 431
 432static const char *conn_state_names[] = {
 433	"Invalid",
 434	"Stopped",
 435	"StartWait",
 436	"SetupWait",
 437	"Idle",
 438	"TX",
 439	"Terminating",
 440	"Registration error",
 441	"Connect error",
 442};
 443
 444
 445/**
 446 * Debug Facility Stuff
 447 */
 448static debug_info_t *iucv_dbf_setup = NULL;
 449static debug_info_t *iucv_dbf_data = NULL;
 450static debug_info_t *iucv_dbf_trace = NULL;
 451
 452DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
 453
 454static void iucv_unregister_dbf_views(void)
 455{
 456	debug_unregister(iucv_dbf_setup);
 457	debug_unregister(iucv_dbf_data);
 458	debug_unregister(iucv_dbf_trace);
 459}
 460static int iucv_register_dbf_views(void)
 461{
 462	iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
 463					IUCV_DBF_SETUP_PAGES,
 464					IUCV_DBF_SETUP_NR_AREAS,
 465					IUCV_DBF_SETUP_LEN);
 466	iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
 467				       IUCV_DBF_DATA_PAGES,
 468				       IUCV_DBF_DATA_NR_AREAS,
 469				       IUCV_DBF_DATA_LEN);
 470	iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
 471					IUCV_DBF_TRACE_PAGES,
 472					IUCV_DBF_TRACE_NR_AREAS,
 473					IUCV_DBF_TRACE_LEN);
 474
 475	if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
 476	    (iucv_dbf_trace == NULL)) {
 477		iucv_unregister_dbf_views();
 478		return -ENOMEM;
 479	}
 480	debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
 481	debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
 482
 483	debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
 484	debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
 485
 486	debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
 487	debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
 488
 489	return 0;
 490}
 491
 492/*
 493 * Callback-wrappers, called from lowlevel iucv layer.
 494 */
 495
 496static void netiucv_callback_rx(struct iucv_path *path,
 497				struct iucv_message *msg)
 498{
 499	struct iucv_connection *conn = path->private;
 500	struct iucv_event ev;
 501
 502	ev.conn = conn;
 503	ev.data = msg;
 504	fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
 505}
 506
 507static void netiucv_callback_txdone(struct iucv_path *path,
 508				    struct iucv_message *msg)
 509{
 510	struct iucv_connection *conn = path->private;
 511	struct iucv_event ev;
 512
 513	ev.conn = conn;
 514	ev.data = msg;
 515	fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
 516}
 517
 518static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
 519{
 520	struct iucv_connection *conn = path->private;
 521
 522	fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
 523}
 524
 525static int netiucv_callback_connreq(struct iucv_path *path, u8 *ipvmid,
 526				    u8 *ipuser)
 527{
 528	struct iucv_connection *conn = path->private;
 529	struct iucv_event ev;
 530	static char tmp_user[9];
 531	static char tmp_udat[17];
 532	int rc;
 533
 534	rc = -EINVAL;
 535	memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8);
 536	memcpy(tmp_udat, ipuser, 16);
 537	EBCASC(tmp_udat, 16);
 538	read_lock_bh(&iucv_connection_rwlock);
 539	list_for_each_entry(conn, &iucv_connection_list, list) {
 540		if (strncmp(ipvmid, conn->userid, 8) ||
 541		    strncmp(ipuser, conn->userdata, 16))
 542			continue;
 543		/* Found a matching connection for this path. */
 544		conn->path = path;
 545		ev.conn = conn;
 546		ev.data = path;
 547		fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
 548		rc = 0;
 549	}
 550	IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n",
 551		       tmp_user, netiucv_printname(tmp_udat, 16));
 552	read_unlock_bh(&iucv_connection_rwlock);
 553	return rc;
 554}
 555
 556static void netiucv_callback_connrej(struct iucv_path *path, u8 *ipuser)
 557{
 558	struct iucv_connection *conn = path->private;
 559
 560	fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
 561}
 562
 563static void netiucv_callback_connsusp(struct iucv_path *path, u8 *ipuser)
 564{
 565	struct iucv_connection *conn = path->private;
 566
 567	fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
 568}
 569
 570static void netiucv_callback_connres(struct iucv_path *path, u8 *ipuser)
 571{
 572	struct iucv_connection *conn = path->private;
 573
 574	fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
 575}
 576
 577/**
 578 * NOP action for statemachines
 579 */
 580static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
 581{
 582}
 583
 584/*
 585 * Actions of the connection statemachine
 586 */
 587
 588/**
 589 * netiucv_unpack_skb
 590 * @conn: The connection where this skb has been received.
 591 * @pskb: The received skb.
 592 *
 593 * Unpack a just received skb and hand it over to upper layers.
 594 * Helper function for conn_action_rx.
 595 */
 596static void netiucv_unpack_skb(struct iucv_connection *conn,
 597			       struct sk_buff *pskb)
 598{
 599	struct net_device     *dev = conn->netdev;
 600	struct netiucv_priv   *privptr = netdev_priv(dev);
 601	u16 offset = 0;
 602
 603	skb_put(pskb, NETIUCV_HDRLEN);
 604	pskb->dev = dev;
 605	pskb->ip_summed = CHECKSUM_NONE;
 606	pskb->protocol = cpu_to_be16(ETH_P_IP);
 607
 608	while (1) {
 609		struct sk_buff *skb;
 610		struct ll_header *header = (struct ll_header *) pskb->data;
 611
 612		if (!header->next)
 613			break;
 614
 615		skb_pull(pskb, NETIUCV_HDRLEN);
 616		header->next -= offset;
 617		offset += header->next;
 618		header->next -= NETIUCV_HDRLEN;
 619		if (skb_tailroom(pskb) < header->next) {
 620			IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
 621				header->next, skb_tailroom(pskb));
 622			return;
 623		}
 624		skb_put(pskb, header->next);
 625		skb_reset_mac_header(pskb);
 626		skb = dev_alloc_skb(pskb->len);
 627		if (!skb) {
 628			IUCV_DBF_TEXT(data, 2,
 629				"Out of memory in netiucv_unpack_skb\n");
 630			privptr->stats.rx_dropped++;
 631			return;
 632		}
 633		skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
 634					  pskb->len);
 635		skb_reset_mac_header(skb);
 636		skb->dev = pskb->dev;
 637		skb->protocol = pskb->protocol;
 638		pskb->ip_summed = CHECKSUM_UNNECESSARY;
 639		privptr->stats.rx_packets++;
 640		privptr->stats.rx_bytes += skb->len;
 641		/*
 642		 * Since receiving is always initiated from a tasklet (in iucv.c),
 643		 * we must use netif_rx_ni() instead of netif_rx()
 644		 */
 645		netif_rx_ni(skb);
 646		skb_pull(pskb, header->next);
 647		skb_put(pskb, NETIUCV_HDRLEN);
 648	}
 649}
 650
 651static void conn_action_rx(fsm_instance *fi, int event, void *arg)
 652{
 653	struct iucv_event *ev = arg;
 654	struct iucv_connection *conn = ev->conn;
 655	struct iucv_message *msg = ev->data;
 656	struct netiucv_priv *privptr = netdev_priv(conn->netdev);
 657	int rc;
 658
 659	IUCV_DBF_TEXT(trace, 4, __func__);
 660
 661	if (!conn->netdev) {
 662		iucv_message_reject(conn->path, msg);
 663		IUCV_DBF_TEXT(data, 2,
 664			      "Received data for unlinked connection\n");
 665		return;
 666	}
 667	if (msg->length > conn->max_buffsize) {
 668		iucv_message_reject(conn->path, msg);
 669		privptr->stats.rx_dropped++;
 670		IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
 671			       msg->length, conn->max_buffsize);
 672		return;
 673	}
 674	conn->rx_buff->data = conn->rx_buff->head;
 675	skb_reset_tail_pointer(conn->rx_buff);
 676	conn->rx_buff->len = 0;
 677	rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
 678				  msg->length, NULL);
 679	if (rc || msg->length < 5) {
 680		privptr->stats.rx_errors++;
 681		IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
 682		return;
 683	}
 684	netiucv_unpack_skb(conn, conn->rx_buff);
 685}
 686
 687static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
 688{
 689	struct iucv_event *ev = arg;
 690	struct iucv_connection *conn = ev->conn;
 691	struct iucv_message *msg = ev->data;
 692	struct iucv_message txmsg;
 693	struct netiucv_priv *privptr = NULL;
 694	u32 single_flag = msg->tag;
 695	u32 txbytes = 0;
 696	u32 txpackets = 0;
 697	u32 stat_maxcq = 0;
 698	struct sk_buff *skb;
 699	unsigned long saveflags;
 700	struct ll_header header;
 701	int rc;
 702
 703	IUCV_DBF_TEXT(trace, 4, __func__);
 704
 705	if (!conn || !conn->netdev) {
 706		IUCV_DBF_TEXT(data, 2,
 707			      "Send confirmation for unlinked connection\n");
 708		return;
 709	}
 710	privptr = netdev_priv(conn->netdev);
 711	conn->prof.tx_pending--;
 712	if (single_flag) {
 713		if ((skb = skb_dequeue(&conn->commit_queue))) {
 714			refcount_dec(&skb->users);
 715			if (privptr) {
 716				privptr->stats.tx_packets++;
 717				privptr->stats.tx_bytes +=
 718					(skb->len - NETIUCV_HDRLEN
 719						  - NETIUCV_HDRLEN);
 720			}
 721			dev_kfree_skb_any(skb);
 722		}
 723	}
 724	conn->tx_buff->data = conn->tx_buff->head;
 725	skb_reset_tail_pointer(conn->tx_buff);
 726	conn->tx_buff->len = 0;
 727	spin_lock_irqsave(&conn->collect_lock, saveflags);
 728	while ((skb = skb_dequeue(&conn->collect_queue))) {
 729		header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
 730		skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN);
 731		skb_copy_from_linear_data(skb,
 732					  skb_put(conn->tx_buff, skb->len),
 733					  skb->len);
 734		txbytes += skb->len;
 735		txpackets++;
 736		stat_maxcq++;
 737		refcount_dec(&skb->users);
 738		dev_kfree_skb_any(skb);
 739	}
 740	if (conn->collect_len > conn->prof.maxmulti)
 741		conn->prof.maxmulti = conn->collect_len;
 742	conn->collect_len = 0;
 743	spin_unlock_irqrestore(&conn->collect_lock, saveflags);
 744	if (conn->tx_buff->len == 0) {
 745		fsm_newstate(fi, CONN_STATE_IDLE);
 746		return;
 747	}
 748
 749	header.next = 0;
 750	skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN);
 751	conn->prof.send_stamp = jiffies;
 752	txmsg.class = 0;
 753	txmsg.tag = 0;
 754	rc = iucv_message_send(conn->path, &txmsg, 0, 0,
 755			       conn->tx_buff->data, conn->tx_buff->len);
 756	conn->prof.doios_multi++;
 757	conn->prof.txlen += conn->tx_buff->len;
 758	conn->prof.tx_pending++;
 759	if (conn->prof.tx_pending > conn->prof.tx_max_pending)
 760		conn->prof.tx_max_pending = conn->prof.tx_pending;
 761	if (rc) {
 762		conn->prof.tx_pending--;
 763		fsm_newstate(fi, CONN_STATE_IDLE);
 764		if (privptr)
 765			privptr->stats.tx_errors += txpackets;
 766		IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
 767	} else {
 768		if (privptr) {
 769			privptr->stats.tx_packets += txpackets;
 770			privptr->stats.tx_bytes += txbytes;
 771		}
 772		if (stat_maxcq > conn->prof.maxcqueue)
 773			conn->prof.maxcqueue = stat_maxcq;
 774	}
 775}
 776
 
 
 
 
 
 
 
 
 
 
 777static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
 778{
 779	struct iucv_event *ev = arg;
 780	struct iucv_connection *conn = ev->conn;
 781	struct iucv_path *path = ev->data;
 782	struct net_device *netdev = conn->netdev;
 783	struct netiucv_priv *privptr = netdev_priv(netdev);
 784	int rc;
 785
 786	IUCV_DBF_TEXT(trace, 3, __func__);
 787
 788	conn->path = path;
 789	path->msglim = NETIUCV_QUEUELEN_DEFAULT;
 790	path->flags = 0;
 791	rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn);
 792	if (rc) {
 793		IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
 794		return;
 795	}
 796	fsm_newstate(fi, CONN_STATE_IDLE);
 797	netdev->tx_queue_len = conn->path->msglim;
 798	fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
 799}
 800
 801static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
 802{
 803	struct iucv_event *ev = arg;
 804	struct iucv_path *path = ev->data;
 805
 806	IUCV_DBF_TEXT(trace, 3, __func__);
 807	iucv_path_sever(path, NULL);
 808}
 809
 810static void conn_action_connack(fsm_instance *fi, int event, void *arg)
 811{
 812	struct iucv_connection *conn = arg;
 813	struct net_device *netdev = conn->netdev;
 814	struct netiucv_priv *privptr = netdev_priv(netdev);
 815
 816	IUCV_DBF_TEXT(trace, 3, __func__);
 817	fsm_deltimer(&conn->timer);
 818	fsm_newstate(fi, CONN_STATE_IDLE);
 819	netdev->tx_queue_len = conn->path->msglim;
 820	fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
 821}
 822
 823static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
 824{
 825	struct iucv_connection *conn = arg;
 826
 827	IUCV_DBF_TEXT(trace, 3, __func__);
 828	fsm_deltimer(&conn->timer);
 829	iucv_path_sever(conn->path, conn->userdata);
 830	fsm_newstate(fi, CONN_STATE_STARTWAIT);
 831}
 832
 833static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
 834{
 835	struct iucv_connection *conn = arg;
 836	struct net_device *netdev = conn->netdev;
 837	struct netiucv_priv *privptr = netdev_priv(netdev);
 838
 839	IUCV_DBF_TEXT(trace, 3, __func__);
 840
 841	fsm_deltimer(&conn->timer);
 842	iucv_path_sever(conn->path, conn->userdata);
 843	dev_info(privptr->dev, "The peer z/VM guest %s has closed the "
 844			       "connection\n", netiucv_printuser(conn));
 845	IUCV_DBF_TEXT(data, 2,
 846		      "conn_action_connsever: Remote dropped connection\n");
 847	fsm_newstate(fi, CONN_STATE_STARTWAIT);
 848	fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
 849}
 850
 851static void conn_action_start(fsm_instance *fi, int event, void *arg)
 852{
 853	struct iucv_connection *conn = arg;
 854	struct net_device *netdev = conn->netdev;
 855	struct netiucv_priv *privptr = netdev_priv(netdev);
 856	int rc;
 857
 858	IUCV_DBF_TEXT(trace, 3, __func__);
 859
 860	fsm_newstate(fi, CONN_STATE_STARTWAIT);
 861
 862	/*
 863	 * We must set the state before calling iucv_connect because the
 864	 * callback handler could be called at any point after the connection
 865	 * request is sent
 866	 */
 867
 868	fsm_newstate(fi, CONN_STATE_SETUPWAIT);
 869	conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
 870	IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n",
 871		netdev->name, netiucv_printuser(conn));
 872
 873	rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
 874			       NULL, conn->userdata, conn);
 875	switch (rc) {
 876	case 0:
 877		netdev->tx_queue_len = conn->path->msglim;
 878		fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
 879			     CONN_EVENT_TIMER, conn);
 880		return;
 881	case 11:
 882		dev_warn(privptr->dev,
 883			"The IUCV device failed to connect to z/VM guest %s\n",
 884			netiucv_printname(conn->userid, 8));
 885		fsm_newstate(fi, CONN_STATE_STARTWAIT);
 886		break;
 887	case 12:
 888		dev_warn(privptr->dev,
 889			"The IUCV device failed to connect to the peer on z/VM"
 890			" guest %s\n", netiucv_printname(conn->userid, 8));
 891		fsm_newstate(fi, CONN_STATE_STARTWAIT);
 892		break;
 893	case 13:
 894		dev_err(privptr->dev,
 895			"Connecting the IUCV device would exceed the maximum"
 896			" number of IUCV connections\n");
 897		fsm_newstate(fi, CONN_STATE_CONNERR);
 898		break;
 899	case 14:
 900		dev_err(privptr->dev,
 901			"z/VM guest %s has too many IUCV connections"
 902			" to connect with the IUCV device\n",
 903			netiucv_printname(conn->userid, 8));
 904		fsm_newstate(fi, CONN_STATE_CONNERR);
 905		break;
 906	case 15:
 907		dev_err(privptr->dev,
 908			"The IUCV device cannot connect to a z/VM guest with no"
 909			" IUCV authorization\n");
 910		fsm_newstate(fi, CONN_STATE_CONNERR);
 911		break;
 912	default:
 913		dev_err(privptr->dev,
 914			"Connecting the IUCV device failed with error %d\n",
 915			rc);
 916		fsm_newstate(fi, CONN_STATE_CONNERR);
 917		break;
 918	}
 919	IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
 920	kfree(conn->path);
 921	conn->path = NULL;
 922}
 923
 924static void netiucv_purge_skb_queue(struct sk_buff_head *q)
 925{
 926	struct sk_buff *skb;
 927
 928	while ((skb = skb_dequeue(q))) {
 929		refcount_dec(&skb->users);
 930		dev_kfree_skb_any(skb);
 931	}
 932}
 933
 934static void conn_action_stop(fsm_instance *fi, int event, void *arg)
 935{
 936	struct iucv_event *ev = arg;
 937	struct iucv_connection *conn = ev->conn;
 938	struct net_device *netdev = conn->netdev;
 939	struct netiucv_priv *privptr = netdev_priv(netdev);
 940
 941	IUCV_DBF_TEXT(trace, 3, __func__);
 942
 943	fsm_deltimer(&conn->timer);
 944	fsm_newstate(fi, CONN_STATE_STOPPED);
 945	netiucv_purge_skb_queue(&conn->collect_queue);
 946	if (conn->path) {
 947		IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
 948		iucv_path_sever(conn->path, conn->userdata);
 949		kfree(conn->path);
 950		conn->path = NULL;
 951	}
 952	netiucv_purge_skb_queue(&conn->commit_queue);
 953	fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
 954}
 955
 956static void conn_action_inval(fsm_instance *fi, int event, void *arg)
 957{
 958	struct iucv_connection *conn = arg;
 959	struct net_device *netdev = conn->netdev;
 960
 961	IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n",
 962		netdev->name, conn->userid);
 963}
 964
 965static const fsm_node conn_fsm[] = {
 966	{ CONN_STATE_INVALID,   CONN_EVENT_START,    conn_action_inval      },
 967	{ CONN_STATE_STOPPED,   CONN_EVENT_START,    conn_action_start      },
 968
 969	{ CONN_STATE_STOPPED,   CONN_EVENT_STOP,     conn_action_stop       },
 970	{ CONN_STATE_STARTWAIT, CONN_EVENT_STOP,     conn_action_stop       },
 971	{ CONN_STATE_SETUPWAIT, CONN_EVENT_STOP,     conn_action_stop       },
 972	{ CONN_STATE_IDLE,      CONN_EVENT_STOP,     conn_action_stop       },
 973	{ CONN_STATE_TX,        CONN_EVENT_STOP,     conn_action_stop       },
 974	{ CONN_STATE_REGERR,    CONN_EVENT_STOP,     conn_action_stop       },
 975	{ CONN_STATE_CONNERR,   CONN_EVENT_STOP,     conn_action_stop       },
 976
 977	{ CONN_STATE_STOPPED,   CONN_EVENT_CONN_REQ, conn_action_connreject },
 978        { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
 979	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
 980	{ CONN_STATE_IDLE,      CONN_EVENT_CONN_REQ, conn_action_connreject },
 981	{ CONN_STATE_TX,        CONN_EVENT_CONN_REQ, conn_action_connreject },
 982
 983	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack    },
 984	{ CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER,    conn_action_conntimsev },
 985
 986	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever  },
 987	{ CONN_STATE_IDLE,      CONN_EVENT_CONN_REJ, conn_action_connsever  },
 988	{ CONN_STATE_TX,        CONN_EVENT_CONN_REJ, conn_action_connsever  },
 989
 990	{ CONN_STATE_IDLE,      CONN_EVENT_RX,       conn_action_rx         },
 991	{ CONN_STATE_TX,        CONN_EVENT_RX,       conn_action_rx         },
 992
 993	{ CONN_STATE_TX,        CONN_EVENT_TXDONE,   conn_action_txdone     },
 994	{ CONN_STATE_IDLE,      CONN_EVENT_TXDONE,   conn_action_txdone     },
 995};
 996
 997static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
 998
 999
1000/*
1001 * Actions for interface - statemachine.
1002 */
1003
1004/**
1005 * dev_action_start
1006 * @fi: An instance of an interface statemachine.
1007 * @event: The event, just happened.
1008 * @arg: Generic pointer, casted from struct net_device * upon call.
1009 *
1010 * Startup connection by sending CONN_EVENT_START to it.
1011 */
1012static void dev_action_start(fsm_instance *fi, int event, void *arg)
1013{
1014	struct net_device   *dev = arg;
1015	struct netiucv_priv *privptr = netdev_priv(dev);
1016
1017	IUCV_DBF_TEXT(trace, 3, __func__);
1018
1019	fsm_newstate(fi, DEV_STATE_STARTWAIT);
1020	fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
1021}
1022
1023/**
1024 * Shutdown connection by sending CONN_EVENT_STOP to it.
1025 *
1026 * @param fi    An instance of an interface statemachine.
1027 * @param event The event, just happened.
1028 * @param arg   Generic pointer, casted from struct net_device * upon call.
1029 */
1030static void
1031dev_action_stop(fsm_instance *fi, int event, void *arg)
1032{
1033	struct net_device   *dev = arg;
1034	struct netiucv_priv *privptr = netdev_priv(dev);
1035	struct iucv_event   ev;
1036
1037	IUCV_DBF_TEXT(trace, 3, __func__);
1038
1039	ev.conn = privptr->conn;
1040
1041	fsm_newstate(fi, DEV_STATE_STOPWAIT);
1042	fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1043}
1044
1045/**
1046 * Called from connection statemachine
1047 * when a connection is up and running.
1048 *
1049 * @param fi    An instance of an interface statemachine.
1050 * @param event The event, just happened.
1051 * @param arg   Generic pointer, casted from struct net_device * upon call.
1052 */
1053static void
1054dev_action_connup(fsm_instance *fi, int event, void *arg)
1055{
1056	struct net_device   *dev = arg;
1057	struct netiucv_priv *privptr = netdev_priv(dev);
1058
1059	IUCV_DBF_TEXT(trace, 3, __func__);
1060
1061	switch (fsm_getstate(fi)) {
1062		case DEV_STATE_STARTWAIT:
1063			fsm_newstate(fi, DEV_STATE_RUNNING);
1064			dev_info(privptr->dev,
1065				"The IUCV device has been connected"
1066				" successfully to %s\n",
1067				netiucv_printuser(privptr->conn));
1068			IUCV_DBF_TEXT(setup, 3,
1069				"connection is up and running\n");
1070			break;
1071		case DEV_STATE_STOPWAIT:
1072			IUCV_DBF_TEXT(data, 2,
1073				"dev_action_connup: in DEV_STATE_STOPWAIT\n");
1074			break;
1075	}
1076}
1077
1078/**
1079 * Called from connection statemachine
1080 * when a connection has been shutdown.
1081 *
1082 * @param fi    An instance of an interface statemachine.
1083 * @param event The event, just happened.
1084 * @param arg   Generic pointer, casted from struct net_device * upon call.
1085 */
1086static void
1087dev_action_conndown(fsm_instance *fi, int event, void *arg)
1088{
1089	IUCV_DBF_TEXT(trace, 3, __func__);
1090
1091	switch (fsm_getstate(fi)) {
1092		case DEV_STATE_RUNNING:
1093			fsm_newstate(fi, DEV_STATE_STARTWAIT);
1094			break;
1095		case DEV_STATE_STOPWAIT:
1096			fsm_newstate(fi, DEV_STATE_STOPPED);
1097			IUCV_DBF_TEXT(setup, 3, "connection is down\n");
1098			break;
1099	}
1100}
1101
1102static const fsm_node dev_fsm[] = {
1103	{ DEV_STATE_STOPPED,    DEV_EVENT_START,   dev_action_start    },
1104
1105	{ DEV_STATE_STOPWAIT,   DEV_EVENT_START,   dev_action_start    },
1106	{ DEV_STATE_STOPWAIT,   DEV_EVENT_CONDOWN, dev_action_conndown },
1107
1108	{ DEV_STATE_STARTWAIT,  DEV_EVENT_STOP,    dev_action_stop     },
1109	{ DEV_STATE_STARTWAIT,  DEV_EVENT_CONUP,   dev_action_connup   },
1110
1111	{ DEV_STATE_RUNNING,    DEV_EVENT_STOP,    dev_action_stop     },
1112	{ DEV_STATE_RUNNING,    DEV_EVENT_CONDOWN, dev_action_conndown },
1113	{ DEV_STATE_RUNNING,    DEV_EVENT_CONUP,   netiucv_action_nop  },
1114};
1115
1116static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1117
1118/**
1119 * Transmit a packet.
1120 * This is a helper function for netiucv_tx().
1121 *
1122 * @param conn Connection to be used for sending.
1123 * @param skb Pointer to struct sk_buff of packet to send.
1124 *            The linklevel header has already been set up
1125 *            by netiucv_tx().
1126 *
1127 * @return 0 on success, -ERRNO on failure. (Never fails.)
1128 */
1129static int netiucv_transmit_skb(struct iucv_connection *conn,
1130				struct sk_buff *skb)
1131{
1132	struct iucv_message msg;
1133	unsigned long saveflags;
1134	struct ll_header header;
1135	int rc;
1136
1137	if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1138		int l = skb->len + NETIUCV_HDRLEN;
1139
1140		spin_lock_irqsave(&conn->collect_lock, saveflags);
1141		if (conn->collect_len + l >
1142		    (conn->max_buffsize - NETIUCV_HDRLEN)) {
1143			rc = -EBUSY;
1144			IUCV_DBF_TEXT(data, 2,
1145				      "EBUSY from netiucv_transmit_skb\n");
1146		} else {
1147			refcount_inc(&skb->users);
1148			skb_queue_tail(&conn->collect_queue, skb);
1149			conn->collect_len += l;
1150			rc = 0;
1151		}
1152		spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1153	} else {
1154		struct sk_buff *nskb = skb;
1155		/**
1156		 * Copy the skb to a new allocated skb in lowmem only if the
1157		 * data is located above 2G in memory or tailroom is < 2.
1158		 */
1159		unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
1160				    NETIUCV_HDRLEN)) >> 31;
1161		int copied = 0;
1162		if (hi || (skb_tailroom(skb) < 2)) {
1163			nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1164					 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1165			if (!nskb) {
1166				IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1167				rc = -ENOMEM;
1168				return rc;
1169			} else {
1170				skb_reserve(nskb, NETIUCV_HDRLEN);
1171				skb_put_data(nskb, skb->data, skb->len);
1172			}
1173			copied = 1;
1174		}
1175		/**
1176		 * skb now is below 2G and has enough room. Add headers.
1177		 */
1178		header.next = nskb->len + NETIUCV_HDRLEN;
1179		memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1180		header.next = 0;
1181		skb_put_data(nskb, &header, NETIUCV_HDRLEN);
1182
1183		fsm_newstate(conn->fsm, CONN_STATE_TX);
1184		conn->prof.send_stamp = jiffies;
1185
1186		msg.tag = 1;
1187		msg.class = 0;
1188		rc = iucv_message_send(conn->path, &msg, 0, 0,
1189				       nskb->data, nskb->len);
1190		conn->prof.doios_single++;
1191		conn->prof.txlen += skb->len;
1192		conn->prof.tx_pending++;
1193		if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1194			conn->prof.tx_max_pending = conn->prof.tx_pending;
1195		if (rc) {
1196			struct netiucv_priv *privptr;
1197			fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1198			conn->prof.tx_pending--;
1199			privptr = netdev_priv(conn->netdev);
1200			if (privptr)
1201				privptr->stats.tx_errors++;
1202			if (copied)
1203				dev_kfree_skb(nskb);
1204			else {
1205				/**
1206				 * Remove our headers. They get added
1207				 * again on retransmit.
1208				 */
1209				skb_pull(skb, NETIUCV_HDRLEN);
1210				skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1211			}
1212			IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1213		} else {
1214			if (copied)
1215				dev_kfree_skb(skb);
1216			refcount_inc(&nskb->users);
1217			skb_queue_tail(&conn->commit_queue, nskb);
1218		}
1219	}
1220
1221	return rc;
1222}
1223
1224/*
1225 * Interface API for upper network layers
1226 */
1227
1228/**
1229 * Open an interface.
1230 * Called from generic network layer when ifconfig up is run.
1231 *
1232 * @param dev Pointer to interface struct.
1233 *
1234 * @return 0 on success, -ERRNO on failure. (Never fails.)
1235 */
1236static int netiucv_open(struct net_device *dev)
1237{
1238	struct netiucv_priv *priv = netdev_priv(dev);
1239
1240	fsm_event(priv->fsm, DEV_EVENT_START, dev);
1241	return 0;
1242}
1243
1244/**
1245 * Close an interface.
1246 * Called from generic network layer when ifconfig down is run.
1247 *
1248 * @param dev Pointer to interface struct.
1249 *
1250 * @return 0 on success, -ERRNO on failure. (Never fails.)
1251 */
1252static int netiucv_close(struct net_device *dev)
1253{
1254	struct netiucv_priv *priv = netdev_priv(dev);
1255
1256	fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
1257	return 0;
1258}
1259
1260/**
1261 * Start transmission of a packet.
1262 * Called from generic network device layer.
1263 *
1264 * @param skb Pointer to buffer containing the packet.
1265 * @param dev Pointer to interface struct.
1266 *
1267 * @return 0 if packet consumed, !0 if packet rejected.
1268 *         Note: If we return !0, then the packet is free'd by
1269 *               the generic network layer.
1270 */
1271static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1272{
1273	struct netiucv_priv *privptr = netdev_priv(dev);
1274	int rc;
1275
1276	IUCV_DBF_TEXT(trace, 4, __func__);
1277	/**
1278	 * Some sanity checks ...
1279	 */
1280	if (skb == NULL) {
1281		IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1282		privptr->stats.tx_dropped++;
1283		return NETDEV_TX_OK;
1284	}
1285	if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1286		IUCV_DBF_TEXT(data, 2,
1287			"netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1288		dev_kfree_skb(skb);
1289		privptr->stats.tx_dropped++;
1290		return NETDEV_TX_OK;
1291	}
1292
1293	/**
1294	 * If connection is not running, try to restart it
1295	 * and throw away packet.
1296	 */
1297	if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1298		dev_kfree_skb(skb);
1299		privptr->stats.tx_dropped++;
1300		privptr->stats.tx_errors++;
1301		privptr->stats.tx_carrier_errors++;
1302		return NETDEV_TX_OK;
1303	}
1304
1305	if (netiucv_test_and_set_busy(dev)) {
1306		IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
1307		return NETDEV_TX_BUSY;
1308	}
1309	netif_trans_update(dev);
1310	rc = netiucv_transmit_skb(privptr->conn, skb);
1311	netiucv_clear_busy(dev);
1312	return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
1313}
1314
1315/**
1316 * netiucv_stats
1317 * @dev: Pointer to interface struct.
1318 *
1319 * Returns interface statistics of a device.
1320 *
1321 * Returns pointer to stats struct of this interface.
1322 */
1323static struct net_device_stats *netiucv_stats (struct net_device * dev)
1324{
1325	struct netiucv_priv *priv = netdev_priv(dev);
1326
1327	IUCV_DBF_TEXT(trace, 5, __func__);
1328	return &priv->stats;
1329}
1330
1331/*
1332 * attributes in sysfs
1333 */
1334
1335static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1336			 char *buf)
1337{
1338	struct netiucv_priv *priv = dev_get_drvdata(dev);
1339
1340	IUCV_DBF_TEXT(trace, 5, __func__);
1341	return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
1342}
1343
1344static int netiucv_check_user(const char *buf, size_t count, char *username,
1345			      char *userdata)
1346{
1347	const char *p;
1348	int i;
1349
1350	p = strchr(buf, '.');
1351	if ((p && ((count > 26) ||
1352		   ((p - buf) > 8) ||
1353		   (buf + count - p > 18))) ||
1354	    (!p && (count > 9))) {
1355		IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1356		return -EINVAL;
1357	}
1358
1359	for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) {
1360		if (isalnum(*p) || *p == '$') {
1361			username[i] = toupper(*p);
1362			continue;
1363		}
1364		if (*p == '\n')
1365			/* trailing lf, grr */
1366			break;
1367		IUCV_DBF_TEXT_(setup, 2,
1368			       "conn_write: invalid character %02x\n", *p);
1369		return -EINVAL;
1370	}
1371	while (i < 8)
1372		username[i++] = ' ';
1373	username[8] = '\0';
1374
1375	if (*p == '.') {
1376		p++;
1377		for (i = 0; i < 16 && *p; i++, p++) {
1378			if (*p == '\n')
1379				break;
1380			userdata[i] = toupper(*p);
1381		}
1382		while (i > 0 && i < 16)
1383			userdata[i++] = ' ';
1384	} else
1385		memcpy(userdata, iucvMagic_ascii, 16);
1386	userdata[16] = '\0';
1387	ASCEBC(userdata, 16);
1388
1389	return 0;
1390}
1391
1392static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1393			  const char *buf, size_t count)
1394{
1395	struct netiucv_priv *priv = dev_get_drvdata(dev);
1396	struct net_device *ndev = priv->conn->netdev;
1397	char	username[9];
1398	char	userdata[17];
1399	int	rc;
1400	struct iucv_connection *cp;
1401
1402	IUCV_DBF_TEXT(trace, 3, __func__);
1403	rc = netiucv_check_user(buf, count, username, userdata);
1404	if (rc)
1405		return rc;
1406
1407	if (memcmp(username, priv->conn->userid, 9) &&
1408	    (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1409		/* username changed while the interface is active. */
1410		IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
1411		return -EPERM;
1412	}
1413	read_lock_bh(&iucv_connection_rwlock);
1414	list_for_each_entry(cp, &iucv_connection_list, list) {
1415		if (!strncmp(username, cp->userid, 9) &&
1416		   !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) {
1417			read_unlock_bh(&iucv_connection_rwlock);
1418			IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s "
1419				"already exists\n", netiucv_printuser(cp));
1420			return -EEXIST;
1421		}
1422	}
1423	read_unlock_bh(&iucv_connection_rwlock);
1424	memcpy(priv->conn->userid, username, 9);
1425	memcpy(priv->conn->userdata, userdata, 17);
1426	return count;
1427}
1428
1429static DEVICE_ATTR(user, 0644, user_show, user_write);
1430
1431static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1432			    char *buf)
1433{
1434	struct netiucv_priv *priv = dev_get_drvdata(dev);
1435
1436	IUCV_DBF_TEXT(trace, 5, __func__);
1437	return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1438}
1439
1440static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1441			     const char *buf, size_t count)
1442{
1443	struct netiucv_priv *priv = dev_get_drvdata(dev);
1444	struct net_device *ndev = priv->conn->netdev;
1445	unsigned int bs1;
1446	int rc;
1447
1448	IUCV_DBF_TEXT(trace, 3, __func__);
1449	if (count >= 39)
1450		return -EINVAL;
1451
1452	rc = kstrtouint(buf, 0, &bs1);
1453
1454	if (rc == -EINVAL) {
1455		IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %s\n",
1456			buf);
1457		return -EINVAL;
1458	}
1459	if ((rc == -ERANGE) || (bs1 > NETIUCV_BUFSIZE_MAX)) {
1460		IUCV_DBF_TEXT_(setup, 2,
1461			"buffer_write: buffer size %d too large\n",
1462			bs1);
1463		return -EINVAL;
1464	}
1465	if ((ndev->flags & IFF_RUNNING) &&
1466	    (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1467		IUCV_DBF_TEXT_(setup, 2,
1468			"buffer_write: buffer size %d too small\n",
1469			bs1);
1470		return -EINVAL;
1471	}
1472	if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1473		IUCV_DBF_TEXT_(setup, 2,
1474			"buffer_write: buffer size %d too small\n",
1475			bs1);
1476		return -EINVAL;
1477	}
1478
1479	priv->conn->max_buffsize = bs1;
1480	if (!(ndev->flags & IFF_RUNNING))
1481		ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
1482
1483	return count;
1484
1485}
1486
1487static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1488
1489static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1490			     char *buf)
1491{
1492	struct netiucv_priv *priv = dev_get_drvdata(dev);
1493
1494	IUCV_DBF_TEXT(trace, 5, __func__);
1495	return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1496}
1497
1498static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1499
1500static ssize_t conn_fsm_show (struct device *dev,
1501			      struct device_attribute *attr, char *buf)
1502{
1503	struct netiucv_priv *priv = dev_get_drvdata(dev);
1504
1505	IUCV_DBF_TEXT(trace, 5, __func__);
1506	return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1507}
1508
1509static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1510
1511static ssize_t maxmulti_show (struct device *dev,
1512			      struct device_attribute *attr, char *buf)
1513{
1514	struct netiucv_priv *priv = dev_get_drvdata(dev);
1515
1516	IUCV_DBF_TEXT(trace, 5, __func__);
1517	return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1518}
1519
1520static ssize_t maxmulti_write (struct device *dev,
1521			       struct device_attribute *attr,
1522			       const char *buf, size_t count)
1523{
1524	struct netiucv_priv *priv = dev_get_drvdata(dev);
1525
1526	IUCV_DBF_TEXT(trace, 4, __func__);
1527	priv->conn->prof.maxmulti = 0;
1528	return count;
1529}
1530
1531static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1532
1533static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1534			   char *buf)
1535{
1536	struct netiucv_priv *priv = dev_get_drvdata(dev);
1537
1538	IUCV_DBF_TEXT(trace, 5, __func__);
1539	return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1540}
1541
1542static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1543			    const char *buf, size_t count)
1544{
1545	struct netiucv_priv *priv = dev_get_drvdata(dev);
1546
1547	IUCV_DBF_TEXT(trace, 4, __func__);
1548	priv->conn->prof.maxcqueue = 0;
1549	return count;
1550}
1551
1552static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1553
1554static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1555			   char *buf)
1556{
1557	struct netiucv_priv *priv = dev_get_drvdata(dev);
1558
1559	IUCV_DBF_TEXT(trace, 5, __func__);
1560	return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1561}
1562
1563static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1564			    const char *buf, size_t count)
1565{
1566	struct netiucv_priv *priv = dev_get_drvdata(dev);
1567
1568	IUCV_DBF_TEXT(trace, 4, __func__);
1569	priv->conn->prof.doios_single = 0;
1570	return count;
1571}
1572
1573static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1574
1575static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1576			   char *buf)
1577{
1578	struct netiucv_priv *priv = dev_get_drvdata(dev);
1579
1580	IUCV_DBF_TEXT(trace, 5, __func__);
1581	return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1582}
1583
1584static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1585			    const char *buf, size_t count)
1586{
1587	struct netiucv_priv *priv = dev_get_drvdata(dev);
1588
1589	IUCV_DBF_TEXT(trace, 5, __func__);
1590	priv->conn->prof.doios_multi = 0;
1591	return count;
1592}
1593
1594static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1595
1596static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1597			   char *buf)
1598{
1599	struct netiucv_priv *priv = dev_get_drvdata(dev);
1600
1601	IUCV_DBF_TEXT(trace, 5, __func__);
1602	return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1603}
1604
1605static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1606			    const char *buf, size_t count)
1607{
1608	struct netiucv_priv *priv = dev_get_drvdata(dev);
1609
1610	IUCV_DBF_TEXT(trace, 4, __func__);
1611	priv->conn->prof.txlen = 0;
1612	return count;
1613}
1614
1615static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1616
1617static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1618			    char *buf)
1619{
1620	struct netiucv_priv *priv = dev_get_drvdata(dev);
1621
1622	IUCV_DBF_TEXT(trace, 5, __func__);
1623	return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1624}
1625
1626static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1627			     const char *buf, size_t count)
1628{
1629	struct netiucv_priv *priv = dev_get_drvdata(dev);
1630
1631	IUCV_DBF_TEXT(trace, 4, __func__);
1632	priv->conn->prof.tx_time = 0;
1633	return count;
1634}
1635
1636static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1637
1638static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1639			    char *buf)
1640{
1641	struct netiucv_priv *priv = dev_get_drvdata(dev);
1642
1643	IUCV_DBF_TEXT(trace, 5, __func__);
1644	return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1645}
1646
1647static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1648			     const char *buf, size_t count)
1649{
1650	struct netiucv_priv *priv = dev_get_drvdata(dev);
1651
1652	IUCV_DBF_TEXT(trace, 4, __func__);
1653	priv->conn->prof.tx_pending = 0;
1654	return count;
1655}
1656
1657static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1658
1659static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1660			    char *buf)
1661{
1662	struct netiucv_priv *priv = dev_get_drvdata(dev);
1663
1664	IUCV_DBF_TEXT(trace, 5, __func__);
1665	return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1666}
1667
1668static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1669			     const char *buf, size_t count)
1670{
1671	struct netiucv_priv *priv = dev_get_drvdata(dev);
1672
1673	IUCV_DBF_TEXT(trace, 4, __func__);
1674	priv->conn->prof.tx_max_pending = 0;
1675	return count;
1676}
1677
1678static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
1679
1680static struct attribute *netiucv_attrs[] = {
1681	&dev_attr_buffer.attr,
1682	&dev_attr_user.attr,
1683	NULL,
1684};
1685
1686static struct attribute_group netiucv_attr_group = {
1687	.attrs = netiucv_attrs,
1688};
1689
1690static struct attribute *netiucv_stat_attrs[] = {
1691	&dev_attr_device_fsm_state.attr,
1692	&dev_attr_connection_fsm_state.attr,
1693	&dev_attr_max_tx_buffer_used.attr,
1694	&dev_attr_max_chained_skbs.attr,
1695	&dev_attr_tx_single_write_ops.attr,
1696	&dev_attr_tx_multi_write_ops.attr,
1697	&dev_attr_netto_bytes.attr,
1698	&dev_attr_max_tx_io_time.attr,
1699	&dev_attr_tx_pending.attr,
1700	&dev_attr_tx_max_pending.attr,
1701	NULL,
1702};
1703
1704static struct attribute_group netiucv_stat_attr_group = {
1705	.name  = "stats",
1706	.attrs = netiucv_stat_attrs,
1707};
1708
1709static const struct attribute_group *netiucv_attr_groups[] = {
1710	&netiucv_stat_attr_group,
1711	&netiucv_attr_group,
1712	NULL,
1713};
1714
1715static int netiucv_register_device(struct net_device *ndev)
1716{
1717	struct netiucv_priv *priv = netdev_priv(ndev);
1718	struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1719	int ret;
1720
1721	IUCV_DBF_TEXT(trace, 3, __func__);
1722
1723	if (dev) {
1724		dev_set_name(dev, "net%s", ndev->name);
1725		dev->bus = &iucv_bus;
1726		dev->parent = iucv_root;
1727		dev->groups = netiucv_attr_groups;
1728		/*
1729		 * The release function could be called after the
1730		 * module has been unloaded. It's _only_ task is to
1731		 * free the struct. Therefore, we specify kfree()
1732		 * directly here. (Probably a little bit obfuscating
1733		 * but legitime ...).
1734		 */
1735		dev->release = (void (*)(struct device *))kfree;
1736		dev->driver = &netiucv_driver;
1737	} else
1738		return -ENOMEM;
1739
1740	ret = device_register(dev);
1741	if (ret) {
1742		put_device(dev);
1743		return ret;
1744	}
1745	priv->dev = dev;
1746	dev_set_drvdata(dev, priv);
1747	return 0;
1748}
1749
1750static void netiucv_unregister_device(struct device *dev)
1751{
1752	IUCV_DBF_TEXT(trace, 3, __func__);
1753	device_unregister(dev);
1754}
1755
1756/**
1757 * Allocate and initialize a new connection structure.
1758 * Add it to the list of netiucv connections;
1759 */
1760static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
1761						      char *username,
1762						      char *userdata)
1763{
1764	struct iucv_connection *conn;
1765
1766	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
1767	if (!conn)
1768		goto out;
1769	skb_queue_head_init(&conn->collect_queue);
1770	skb_queue_head_init(&conn->commit_queue);
1771	spin_lock_init(&conn->collect_lock);
1772	conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1773	conn->netdev = dev;
1774
1775	conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1776	if (!conn->rx_buff)
1777		goto out_conn;
1778	conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1779	if (!conn->tx_buff)
1780		goto out_rx;
1781	conn->fsm = init_fsm("netiucvconn", conn_state_names,
1782			     conn_event_names, NR_CONN_STATES,
1783			     NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1784			     GFP_KERNEL);
1785	if (!conn->fsm)
1786		goto out_tx;
1787
1788	fsm_settimer(conn->fsm, &conn->timer);
1789	fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1790
1791	if (userdata)
1792		memcpy(conn->userdata, userdata, 17);
1793	if (username) {
1794		memcpy(conn->userid, username, 9);
1795		fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1796	}
1797
1798	write_lock_bh(&iucv_connection_rwlock);
1799	list_add_tail(&conn->list, &iucv_connection_list);
1800	write_unlock_bh(&iucv_connection_rwlock);
1801	return conn;
1802
1803out_tx:
1804	kfree_skb(conn->tx_buff);
1805out_rx:
1806	kfree_skb(conn->rx_buff);
1807out_conn:
1808	kfree(conn);
1809out:
1810	return NULL;
1811}
1812
1813/**
1814 * Release a connection structure and remove it from the
1815 * list of netiucv connections.
1816 */
1817static void netiucv_remove_connection(struct iucv_connection *conn)
1818{
1819
1820	IUCV_DBF_TEXT(trace, 3, __func__);
1821	write_lock_bh(&iucv_connection_rwlock);
1822	list_del_init(&conn->list);
1823	write_unlock_bh(&iucv_connection_rwlock);
1824	fsm_deltimer(&conn->timer);
1825	netiucv_purge_skb_queue(&conn->collect_queue);
1826	if (conn->path) {
1827		iucv_path_sever(conn->path, conn->userdata);
1828		kfree(conn->path);
1829		conn->path = NULL;
1830	}
1831	netiucv_purge_skb_queue(&conn->commit_queue);
1832	kfree_fsm(conn->fsm);
1833	kfree_skb(conn->rx_buff);
1834	kfree_skb(conn->tx_buff);
1835}
1836
1837/**
1838 * Release everything of a net device.
1839 */
1840static void netiucv_free_netdevice(struct net_device *dev)
1841{
1842	struct netiucv_priv *privptr = netdev_priv(dev);
1843
1844	IUCV_DBF_TEXT(trace, 3, __func__);
1845
1846	if (!dev)
1847		return;
1848
1849	if (privptr) {
1850		if (privptr->conn)
1851			netiucv_remove_connection(privptr->conn);
1852		if (privptr->fsm)
1853			kfree_fsm(privptr->fsm);
1854		privptr->conn = NULL; privptr->fsm = NULL;
1855		/* privptr gets freed by free_netdev() */
1856	}
1857}
1858
1859/**
1860 * Initialize a net device. (Called from kernel in alloc_netdev())
1861 */
1862static const struct net_device_ops netiucv_netdev_ops = {
1863	.ndo_open		= netiucv_open,
1864	.ndo_stop		= netiucv_close,
1865	.ndo_get_stats		= netiucv_stats,
1866	.ndo_start_xmit		= netiucv_tx,
1867};
1868
1869static void netiucv_setup_netdevice(struct net_device *dev)
1870{
1871	dev->mtu	         = NETIUCV_MTU_DEFAULT;
1872	dev->min_mtu		 = 576;
1873	dev->max_mtu		 = NETIUCV_MTU_MAX;
1874	dev->needs_free_netdev   = true;
1875	dev->priv_destructor     = netiucv_free_netdevice;
1876	dev->hard_header_len     = NETIUCV_HDRLEN;
1877	dev->addr_len            = 0;
1878	dev->type                = ARPHRD_SLIP;
1879	dev->tx_queue_len        = NETIUCV_QUEUELEN_DEFAULT;
1880	dev->flags	         = IFF_POINTOPOINT | IFF_NOARP;
1881	dev->netdev_ops		 = &netiucv_netdev_ops;
1882}
1883
1884/**
1885 * Allocate and initialize everything of a net device.
1886 */
1887static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
1888{
1889	struct netiucv_priv *privptr;
1890	struct net_device *dev;
1891
1892	dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
1893			   NET_NAME_UNKNOWN, netiucv_setup_netdevice);
1894	if (!dev)
1895		return NULL;
1896	rtnl_lock();
1897	if (dev_alloc_name(dev, dev->name) < 0)
1898		goto out_netdev;
1899
1900	privptr = netdev_priv(dev);
1901	privptr->fsm = init_fsm("netiucvdev", dev_state_names,
1902				dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
1903				dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
1904	if (!privptr->fsm)
1905		goto out_netdev;
1906
1907	privptr->conn = netiucv_new_connection(dev, username, userdata);
1908	if (!privptr->conn) {
1909		IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
1910		goto out_fsm;
1911	}
1912	fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
1913	return dev;
1914
1915out_fsm:
1916	kfree_fsm(privptr->fsm);
1917out_netdev:
1918	rtnl_unlock();
1919	free_netdev(dev);
1920	return NULL;
1921}
1922
1923static ssize_t connection_store(struct device_driver *drv, const char *buf,
1924				size_t count)
1925{
1926	char username[9];
1927	char userdata[17];
1928	int rc;
1929	struct net_device *dev;
1930	struct netiucv_priv *priv;
1931	struct iucv_connection *cp;
1932
1933	IUCV_DBF_TEXT(trace, 3, __func__);
1934	rc = netiucv_check_user(buf, count, username, userdata);
1935	if (rc)
1936		return rc;
1937
1938	read_lock_bh(&iucv_connection_rwlock);
1939	list_for_each_entry(cp, &iucv_connection_list, list) {
1940		if (!strncmp(username, cp->userid, 9) &&
1941		    !strncmp(userdata, cp->userdata, 17)) {
1942			read_unlock_bh(&iucv_connection_rwlock);
1943			IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s "
1944				"already exists\n", netiucv_printuser(cp));
1945			return -EEXIST;
1946		}
1947	}
1948	read_unlock_bh(&iucv_connection_rwlock);
1949
1950	dev = netiucv_init_netdevice(username, userdata);
1951	if (!dev) {
1952		IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
1953		return -ENODEV;
1954	}
1955
1956	rc = netiucv_register_device(dev);
1957	if (rc) {
1958		rtnl_unlock();
1959		IUCV_DBF_TEXT_(setup, 2,
1960			"ret %d from netiucv_register_device\n", rc);
1961		goto out_free_ndev;
1962	}
1963
1964	/* sysfs magic */
1965	priv = netdev_priv(dev);
1966	SET_NETDEV_DEV(dev, priv->dev);
1967
1968	rc = register_netdevice(dev);
1969	rtnl_unlock();
1970	if (rc)
1971		goto out_unreg;
1972
1973	dev_info(priv->dev, "The IUCV interface to %s has been established "
1974			    "successfully\n",
1975		netiucv_printuser(priv->conn));
1976
1977	return count;
1978
1979out_unreg:
1980	netiucv_unregister_device(priv->dev);
1981out_free_ndev:
1982	netiucv_free_netdevice(dev);
1983	return rc;
1984}
1985static DRIVER_ATTR_WO(connection);
1986
1987static ssize_t remove_store(struct device_driver *drv, const char *buf,
1988			    size_t count)
1989{
1990	struct iucv_connection *cp;
1991        struct net_device *ndev;
1992        struct netiucv_priv *priv;
1993        struct device *dev;
1994        char name[IFNAMSIZ];
1995	const char *p;
1996        int i;
1997
1998	IUCV_DBF_TEXT(trace, 3, __func__);
1999
2000        if (count >= IFNAMSIZ)
2001                count = IFNAMSIZ - 1;
2002
2003	for (i = 0, p = buf; i < count && *p; i++, p++) {
2004		if (*p == '\n' || *p == ' ')
2005                        /* trailing lf, grr */
2006                        break;
2007		name[i] = *p;
2008        }
2009        name[i] = '\0';
2010
2011	read_lock_bh(&iucv_connection_rwlock);
2012	list_for_each_entry(cp, &iucv_connection_list, list) {
2013		ndev = cp->netdev;
2014		priv = netdev_priv(ndev);
2015                dev = priv->dev;
2016		if (strncmp(name, ndev->name, count))
2017			continue;
2018		read_unlock_bh(&iucv_connection_rwlock);
2019                if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
2020			dev_warn(dev, "The IUCV device is connected"
2021				" to %s and cannot be removed\n",
2022				priv->conn->userid);
2023			IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
2024			return -EPERM;
2025                }
2026                unregister_netdev(ndev);
2027                netiucv_unregister_device(dev);
2028                return count;
2029        }
2030	read_unlock_bh(&iucv_connection_rwlock);
2031	IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2032        return -EINVAL;
2033}
2034static DRIVER_ATTR_WO(remove);
2035
2036static struct attribute * netiucv_drv_attrs[] = {
2037	&driver_attr_connection.attr,
2038	&driver_attr_remove.attr,
2039	NULL,
2040};
2041
2042static struct attribute_group netiucv_drv_attr_group = {
2043	.attrs = netiucv_drv_attrs,
2044};
2045
2046static const struct attribute_group *netiucv_drv_attr_groups[] = {
2047	&netiucv_drv_attr_group,
2048	NULL,
2049};
2050
2051static void netiucv_banner(void)
2052{
2053	pr_info("driver initialized\n");
2054}
2055
2056static void __exit netiucv_exit(void)
2057{
2058	struct iucv_connection *cp;
2059	struct net_device *ndev;
2060	struct netiucv_priv *priv;
2061	struct device *dev;
2062
2063	IUCV_DBF_TEXT(trace, 3, __func__);
2064	while (!list_empty(&iucv_connection_list)) {
2065		cp = list_entry(iucv_connection_list.next,
2066				struct iucv_connection, list);
2067		ndev = cp->netdev;
2068		priv = netdev_priv(ndev);
2069		dev = priv->dev;
2070
2071		unregister_netdev(ndev);
2072		netiucv_unregister_device(dev);
2073	}
2074
2075	driver_unregister(&netiucv_driver);
2076	iucv_unregister(&netiucv_handler, 1);
2077	iucv_unregister_dbf_views();
2078
2079	pr_info("driver unloaded\n");
2080	return;
2081}
2082
2083static int __init netiucv_init(void)
2084{
2085	int rc;
2086
2087	rc = iucv_register_dbf_views();
2088	if (rc)
2089		goto out;
2090	rc = iucv_register(&netiucv_handler, 1);
2091	if (rc)
2092		goto out_dbf;
2093	IUCV_DBF_TEXT(trace, 3, __func__);
2094	netiucv_driver.groups = netiucv_drv_attr_groups;
2095	rc = driver_register(&netiucv_driver);
2096	if (rc) {
2097		IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2098		goto out_iucv;
2099	}
2100
2101	netiucv_banner();
2102	return rc;
2103
2104out_iucv:
2105	iucv_unregister(&netiucv_handler, 1);
2106out_dbf:
2107	iucv_unregister_dbf_views();
2108out:
2109	return rc;
2110}
2111
2112module_init(netiucv_init);
2113module_exit(netiucv_exit);
2114MODULE_LICENSE("GPL");
v6.8
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * IUCV network driver
   4 *
   5 * Copyright IBM Corp. 2001, 2009
   6 *
   7 * Author(s):
   8 *	Original netiucv driver:
   9 *		Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
  10 *	Sysfs integration and all bugs therein:
  11 *		Cornelia Huck (cornelia.huck@de.ibm.com)
  12 *	PM functions:
  13 *		Ursula Braun (ursula.braun@de.ibm.com)
  14 *
  15 * Documentation used:
  16 *  the source of the original IUCV driver by:
  17 *    Stefan Hegewald <hegewald@de.ibm.com>
  18 *    Hartmut Penner <hpenner@de.ibm.com>
  19 *    Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
  20 *    Martin Schwidefsky (schwidefsky@de.ibm.com)
  21 *    Alan Altmark (Alan_Altmark@us.ibm.com)  Sept. 2000
  22 */
  23
  24#define KMSG_COMPONENT "netiucv"
  25#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  26
  27#undef DEBUG
  28
  29#include <linux/module.h>
  30#include <linux/init.h>
  31#include <linux/kernel.h>
  32#include <linux/slab.h>
  33#include <linux/errno.h>
  34#include <linux/types.h>
  35#include <linux/interrupt.h>
  36#include <linux/timer.h>
  37#include <linux/bitops.h>
  38
  39#include <linux/signal.h>
  40#include <linux/string.h>
  41#include <linux/device.h>
  42
  43#include <linux/ip.h>
  44#include <linux/if_arp.h>
  45#include <linux/tcp.h>
  46#include <linux/skbuff.h>
  47#include <linux/ctype.h>
  48#include <net/dst.h>
  49
  50#include <linux/io.h>
  51#include <linux/uaccess.h>
  52#include <asm/ebcdic.h>
  53
  54#include <net/iucv/iucv.h>
  55#include "fsm.h"
  56
  57MODULE_AUTHOR
  58    ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
  59MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
  60
  61/*
  62 * Debug Facility stuff
  63 */
  64#define IUCV_DBF_SETUP_NAME "iucv_setup"
  65#define IUCV_DBF_SETUP_LEN 64
  66#define IUCV_DBF_SETUP_PAGES 2
  67#define IUCV_DBF_SETUP_NR_AREAS 1
  68#define IUCV_DBF_SETUP_LEVEL 3
  69
  70#define IUCV_DBF_DATA_NAME "iucv_data"
  71#define IUCV_DBF_DATA_LEN 128
  72#define IUCV_DBF_DATA_PAGES 2
  73#define IUCV_DBF_DATA_NR_AREAS 1
  74#define IUCV_DBF_DATA_LEVEL 2
  75
  76#define IUCV_DBF_TRACE_NAME "iucv_trace"
  77#define IUCV_DBF_TRACE_LEN 16
  78#define IUCV_DBF_TRACE_PAGES 4
  79#define IUCV_DBF_TRACE_NR_AREAS 1
  80#define IUCV_DBF_TRACE_LEVEL 3
  81
  82#define IUCV_DBF_TEXT(name,level,text) \
  83	do { \
  84		debug_text_event(iucv_dbf_##name,level,text); \
  85	} while (0)
  86
  87#define IUCV_DBF_HEX(name,level,addr,len) \
  88	do { \
  89		debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
  90	} while (0)
  91
  92DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
  93
  94#define IUCV_DBF_TEXT_(name, level, text...) \
  95	do { \
  96		if (debug_level_enabled(iucv_dbf_##name, level)) { \
  97			char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
  98			sprintf(__buf, text); \
  99			debug_text_event(iucv_dbf_##name, level, __buf); \
 100			put_cpu_var(iucv_dbf_txt_buf); \
 101		} \
 102	} while (0)
 103
 104#define IUCV_DBF_SPRINTF(name,level,text...) \
 105	do { \
 106		debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
 107		debug_sprintf_event(iucv_dbf_trace, level, text ); \
 108	} while (0)
 109
 110/*
 111 * some more debug stuff
 112 */
 113#define PRINTK_HEADER " iucv: "       /* for debugging */
 114
 115static struct device_driver netiucv_driver = {
 116	.owner = THIS_MODULE,
 117	.name = "netiucv",
 118	.bus  = &iucv_bus,
 119};
 120
 121/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 122 * Per connection profiling data
 123 */
 124struct connection_profile {
 125	unsigned long maxmulti;
 126	unsigned long maxcqueue;
 127	unsigned long doios_single;
 128	unsigned long doios_multi;
 129	unsigned long txlen;
 130	unsigned long tx_time;
 131	unsigned long send_stamp;
 132	unsigned long tx_pending;
 133	unsigned long tx_max_pending;
 134};
 135
 136/*
 137 * Representation of one iucv connection
 138 */
 139struct iucv_connection {
 140	struct list_head	  list;
 141	struct iucv_path	  *path;
 142	struct sk_buff            *rx_buff;
 143	struct sk_buff            *tx_buff;
 144	struct sk_buff_head       collect_queue;
 145	struct sk_buff_head	  commit_queue;
 146	spinlock_t                collect_lock;
 147	int                       collect_len;
 148	int                       max_buffsize;
 149	fsm_timer                 timer;
 150	fsm_instance              *fsm;
 151	struct net_device         *netdev;
 152	struct connection_profile prof;
 153	char                      userid[9];
 154	char			  userdata[17];
 155};
 156
 157/*
 158 * Linked list of all connection structs.
 159 */
 160static LIST_HEAD(iucv_connection_list);
 161static DEFINE_RWLOCK(iucv_connection_rwlock);
 162
 163/*
 164 * Representation of event-data for the
 165 * connection state machine.
 166 */
 167struct iucv_event {
 168	struct iucv_connection *conn;
 169	void                   *data;
 170};
 171
 172/*
 173 * Private part of the network device structure
 174 */
 175struct netiucv_priv {
 176	struct net_device_stats stats;
 177	unsigned long           tbusy;
 178	fsm_instance            *fsm;
 179        struct iucv_connection  *conn;
 180	struct device           *dev;
 181};
 182
 183/*
 184 * Link level header for a packet.
 185 */
 186struct ll_header {
 187	u16 next;
 188};
 189
 190#define NETIUCV_HDRLEN		 (sizeof(struct ll_header))
 191#define NETIUCV_BUFSIZE_MAX	 65537
 192#define NETIUCV_BUFSIZE_DEFAULT  NETIUCV_BUFSIZE_MAX
 193#define NETIUCV_MTU_MAX          (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
 194#define NETIUCV_MTU_DEFAULT      9216
 195#define NETIUCV_QUEUELEN_DEFAULT 50
 196#define NETIUCV_TIMEOUT_5SEC     5000
 197
 198/*
 199 * Compatibility macros for busy handling
 200 * of network devices.
 201 */
 202static void netiucv_clear_busy(struct net_device *dev)
 203{
 204	struct netiucv_priv *priv = netdev_priv(dev);
 205	clear_bit(0, &priv->tbusy);
 206	netif_wake_queue(dev);
 207}
 208
 209static int netiucv_test_and_set_busy(struct net_device *dev)
 210{
 211	struct netiucv_priv *priv = netdev_priv(dev);
 212	netif_stop_queue(dev);
 213	return test_and_set_bit(0, &priv->tbusy);
 214}
 215
 216static u8 iucvMagic_ascii[16] = {
 217	0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
 218	0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
 219};
 220
 221static u8 iucvMagic_ebcdic[16] = {
 222	0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
 223	0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
 224};
 225
 226/*
 227 * Convert an iucv userId to its printable
 228 * form (strip whitespace at end).
 229 *
 230 * @param An iucv userId
 231 *
 232 * @returns The printable string (static data!!)
 233 */
 234static char *netiucv_printname(char *name, int len)
 235{
 236	static char tmp[17];
 237	char *p = tmp;
 238	memcpy(tmp, name, len);
 239	tmp[len] = '\0';
 240	while (*p && ((p - tmp) < len) && (!isspace(*p)))
 241		p++;
 242	*p = '\0';
 243	return tmp;
 244}
 245
 246static char *netiucv_printuser(struct iucv_connection *conn)
 247{
 248	static char tmp_uid[9];
 249	static char tmp_udat[17];
 250	static char buf[100];
 251
 252	if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
 253		tmp_uid[8] = '\0';
 254		tmp_udat[16] = '\0';
 255		memcpy(tmp_uid, netiucv_printname(conn->userid, 8), 8);
 256		memcpy(tmp_udat, conn->userdata, 16);
 257		EBCASC(tmp_udat, 16);
 258		memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16);
 259		sprintf(buf, "%s.%s", tmp_uid, tmp_udat);
 260		return buf;
 261	} else
 262		return netiucv_printname(conn->userid, 8);
 263}
 264
 265/*
 266 * States of the interface statemachine.
 267 */
 268enum dev_states {
 269	DEV_STATE_STOPPED,
 270	DEV_STATE_STARTWAIT,
 271	DEV_STATE_STOPWAIT,
 272	DEV_STATE_RUNNING,
 273	/*
 274	 * MUST be always the last element!!
 275	 */
 276	NR_DEV_STATES
 277};
 278
 279static const char *dev_state_names[] = {
 280	"Stopped",
 281	"StartWait",
 282	"StopWait",
 283	"Running",
 284};
 285
 286/*
 287 * Events of the interface statemachine.
 288 */
 289enum dev_events {
 290	DEV_EVENT_START,
 291	DEV_EVENT_STOP,
 292	DEV_EVENT_CONUP,
 293	DEV_EVENT_CONDOWN,
 294	/*
 295	 * MUST be always the last element!!
 296	 */
 297	NR_DEV_EVENTS
 298};
 299
 300static const char *dev_event_names[] = {
 301	"Start",
 302	"Stop",
 303	"Connection up",
 304	"Connection down",
 305};
 306
 307/*
 308 * Events of the connection statemachine
 309 */
 310enum conn_events {
 311	/*
 312	 * Events, representing callbacks from
 313	 * lowlevel iucv layer)
 314	 */
 315	CONN_EVENT_CONN_REQ,
 316	CONN_EVENT_CONN_ACK,
 317	CONN_EVENT_CONN_REJ,
 318	CONN_EVENT_CONN_SUS,
 319	CONN_EVENT_CONN_RES,
 320	CONN_EVENT_RX,
 321	CONN_EVENT_TXDONE,
 322
 323	/*
 324	 * Events, representing errors return codes from
 325	 * calls to lowlevel iucv layer
 326	 */
 327
 328	/*
 329	 * Event, representing timer expiry.
 330	 */
 331	CONN_EVENT_TIMER,
 332
 333	/*
 334	 * Events, representing commands from upper levels.
 335	 */
 336	CONN_EVENT_START,
 337	CONN_EVENT_STOP,
 338
 339	/*
 340	 * MUST be always the last element!!
 341	 */
 342	NR_CONN_EVENTS,
 343};
 344
 345static const char *conn_event_names[] = {
 346	"Remote connection request",
 347	"Remote connection acknowledge",
 348	"Remote connection reject",
 349	"Connection suspended",
 350	"Connection resumed",
 351	"Data received",
 352	"Data sent",
 353
 354	"Timer",
 355
 356	"Start",
 357	"Stop",
 358};
 359
 360/*
 361 * States of the connection statemachine.
 362 */
 363enum conn_states {
 364	/*
 365	 * Connection not assigned to any device,
 366	 * initial state, invalid
 367	 */
 368	CONN_STATE_INVALID,
 369
 370	/*
 371	 * Userid assigned but not operating
 372	 */
 373	CONN_STATE_STOPPED,
 374
 375	/*
 376	 * Connection registered,
 377	 * no connection request sent yet,
 378	 * no connection request received
 379	 */
 380	CONN_STATE_STARTWAIT,
 381
 382	/*
 383	 * Connection registered and connection request sent,
 384	 * no acknowledge and no connection request received yet.
 385	 */
 386	CONN_STATE_SETUPWAIT,
 387
 388	/*
 389	 * Connection up and running idle
 390	 */
 391	CONN_STATE_IDLE,
 392
 393	/*
 394	 * Data sent, awaiting CONN_EVENT_TXDONE
 395	 */
 396	CONN_STATE_TX,
 397
 398	/*
 399	 * Error during registration.
 400	 */
 401	CONN_STATE_REGERR,
 402
 403	/*
 404	 * Error during registration.
 405	 */
 406	CONN_STATE_CONNERR,
 407
 408	/*
 409	 * MUST be always the last element!!
 410	 */
 411	NR_CONN_STATES,
 412};
 413
 414static const char *conn_state_names[] = {
 415	"Invalid",
 416	"Stopped",
 417	"StartWait",
 418	"SetupWait",
 419	"Idle",
 420	"TX",
 421	"Terminating",
 422	"Registration error",
 423	"Connect error",
 424};
 425
 426
 427/*
 428 * Debug Facility Stuff
 429 */
 430static debug_info_t *iucv_dbf_setup = NULL;
 431static debug_info_t *iucv_dbf_data = NULL;
 432static debug_info_t *iucv_dbf_trace = NULL;
 433
 434DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
 435
 436static void iucv_unregister_dbf_views(void)
 437{
 438	debug_unregister(iucv_dbf_setup);
 439	debug_unregister(iucv_dbf_data);
 440	debug_unregister(iucv_dbf_trace);
 441}
 442static int iucv_register_dbf_views(void)
 443{
 444	iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
 445					IUCV_DBF_SETUP_PAGES,
 446					IUCV_DBF_SETUP_NR_AREAS,
 447					IUCV_DBF_SETUP_LEN);
 448	iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
 449				       IUCV_DBF_DATA_PAGES,
 450				       IUCV_DBF_DATA_NR_AREAS,
 451				       IUCV_DBF_DATA_LEN);
 452	iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
 453					IUCV_DBF_TRACE_PAGES,
 454					IUCV_DBF_TRACE_NR_AREAS,
 455					IUCV_DBF_TRACE_LEN);
 456
 457	if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
 458	    (iucv_dbf_trace == NULL)) {
 459		iucv_unregister_dbf_views();
 460		return -ENOMEM;
 461	}
 462	debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
 463	debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
 464
 465	debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
 466	debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
 467
 468	debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
 469	debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
 470
 471	return 0;
 472}
 473
 474/*
 475 * Callback-wrappers, called from lowlevel iucv layer.
 476 */
 477
 478static void netiucv_callback_rx(struct iucv_path *path,
 479				struct iucv_message *msg)
 480{
 481	struct iucv_connection *conn = path->private;
 482	struct iucv_event ev;
 483
 484	ev.conn = conn;
 485	ev.data = msg;
 486	fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
 487}
 488
 489static void netiucv_callback_txdone(struct iucv_path *path,
 490				    struct iucv_message *msg)
 491{
 492	struct iucv_connection *conn = path->private;
 493	struct iucv_event ev;
 494
 495	ev.conn = conn;
 496	ev.data = msg;
 497	fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
 498}
 499
 500static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
 501{
 502	struct iucv_connection *conn = path->private;
 503
 504	fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
 505}
 506
 507static int netiucv_callback_connreq(struct iucv_path *path, u8 *ipvmid,
 508				    u8 *ipuser)
 509{
 510	struct iucv_connection *conn = path->private;
 511	struct iucv_event ev;
 512	static char tmp_user[9];
 513	static char tmp_udat[17];
 514	int rc;
 515
 516	rc = -EINVAL;
 517	memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8);
 518	memcpy(tmp_udat, ipuser, 16);
 519	EBCASC(tmp_udat, 16);
 520	read_lock_bh(&iucv_connection_rwlock);
 521	list_for_each_entry(conn, &iucv_connection_list, list) {
 522		if (strncmp(ipvmid, conn->userid, 8) ||
 523		    strncmp(ipuser, conn->userdata, 16))
 524			continue;
 525		/* Found a matching connection for this path. */
 526		conn->path = path;
 527		ev.conn = conn;
 528		ev.data = path;
 529		fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
 530		rc = 0;
 531	}
 532	IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n",
 533		       tmp_user, netiucv_printname(tmp_udat, 16));
 534	read_unlock_bh(&iucv_connection_rwlock);
 535	return rc;
 536}
 537
 538static void netiucv_callback_connrej(struct iucv_path *path, u8 *ipuser)
 539{
 540	struct iucv_connection *conn = path->private;
 541
 542	fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
 543}
 544
 545static void netiucv_callback_connsusp(struct iucv_path *path, u8 *ipuser)
 546{
 547	struct iucv_connection *conn = path->private;
 548
 549	fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
 550}
 551
 552static void netiucv_callback_connres(struct iucv_path *path, u8 *ipuser)
 553{
 554	struct iucv_connection *conn = path->private;
 555
 556	fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
 557}
 558
 559/*
 560 * NOP action for statemachines
 561 */
 562static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
 563{
 564}
 565
 566/*
 567 * Actions of the connection statemachine
 568 */
 569
 570/*
 571 * netiucv_unpack_skb
 572 * @conn: The connection where this skb has been received.
 573 * @pskb: The received skb.
 574 *
 575 * Unpack a just received skb and hand it over to upper layers.
 576 * Helper function for conn_action_rx.
 577 */
 578static void netiucv_unpack_skb(struct iucv_connection *conn,
 579			       struct sk_buff *pskb)
 580{
 581	struct net_device     *dev = conn->netdev;
 582	struct netiucv_priv   *privptr = netdev_priv(dev);
 583	u16 offset = 0;
 584
 585	skb_put(pskb, NETIUCV_HDRLEN);
 586	pskb->dev = dev;
 587	pskb->ip_summed = CHECKSUM_NONE;
 588	pskb->protocol = cpu_to_be16(ETH_P_IP);
 589
 590	while (1) {
 591		struct sk_buff *skb;
 592		struct ll_header *header = (struct ll_header *) pskb->data;
 593
 594		if (!header->next)
 595			break;
 596
 597		skb_pull(pskb, NETIUCV_HDRLEN);
 598		header->next -= offset;
 599		offset += header->next;
 600		header->next -= NETIUCV_HDRLEN;
 601		if (skb_tailroom(pskb) < header->next) {
 602			IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
 603				header->next, skb_tailroom(pskb));
 604			return;
 605		}
 606		skb_put(pskb, header->next);
 607		skb_reset_mac_header(pskb);
 608		skb = dev_alloc_skb(pskb->len);
 609		if (!skb) {
 610			IUCV_DBF_TEXT(data, 2,
 611				"Out of memory in netiucv_unpack_skb\n");
 612			privptr->stats.rx_dropped++;
 613			return;
 614		}
 615		skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
 616					  pskb->len);
 617		skb_reset_mac_header(skb);
 618		skb->dev = pskb->dev;
 619		skb->protocol = pskb->protocol;
 620		pskb->ip_summed = CHECKSUM_UNNECESSARY;
 621		privptr->stats.rx_packets++;
 622		privptr->stats.rx_bytes += skb->len;
 623		netif_rx(skb);
 
 
 
 
 624		skb_pull(pskb, header->next);
 625		skb_put(pskb, NETIUCV_HDRLEN);
 626	}
 627}
 628
 629static void conn_action_rx(fsm_instance *fi, int event, void *arg)
 630{
 631	struct iucv_event *ev = arg;
 632	struct iucv_connection *conn = ev->conn;
 633	struct iucv_message *msg = ev->data;
 634	struct netiucv_priv *privptr = netdev_priv(conn->netdev);
 635	int rc;
 636
 637	IUCV_DBF_TEXT(trace, 4, __func__);
 638
 639	if (!conn->netdev) {
 640		iucv_message_reject(conn->path, msg);
 641		IUCV_DBF_TEXT(data, 2,
 642			      "Received data for unlinked connection\n");
 643		return;
 644	}
 645	if (msg->length > conn->max_buffsize) {
 646		iucv_message_reject(conn->path, msg);
 647		privptr->stats.rx_dropped++;
 648		IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
 649			       msg->length, conn->max_buffsize);
 650		return;
 651	}
 652	conn->rx_buff->data = conn->rx_buff->head;
 653	skb_reset_tail_pointer(conn->rx_buff);
 654	conn->rx_buff->len = 0;
 655	rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
 656				  msg->length, NULL);
 657	if (rc || msg->length < 5) {
 658		privptr->stats.rx_errors++;
 659		IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
 660		return;
 661	}
 662	netiucv_unpack_skb(conn, conn->rx_buff);
 663}
 664
 665static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
 666{
 667	struct iucv_event *ev = arg;
 668	struct iucv_connection *conn = ev->conn;
 669	struct iucv_message *msg = ev->data;
 670	struct iucv_message txmsg;
 671	struct netiucv_priv *privptr = NULL;
 672	u32 single_flag = msg->tag;
 673	u32 txbytes = 0;
 674	u32 txpackets = 0;
 675	u32 stat_maxcq = 0;
 676	struct sk_buff *skb;
 677	unsigned long saveflags;
 678	struct ll_header header;
 679	int rc;
 680
 681	IUCV_DBF_TEXT(trace, 4, __func__);
 682
 683	if (!conn || !conn->netdev) {
 684		IUCV_DBF_TEXT(data, 2,
 685			      "Send confirmation for unlinked connection\n");
 686		return;
 687	}
 688	privptr = netdev_priv(conn->netdev);
 689	conn->prof.tx_pending--;
 690	if (single_flag) {
 691		if ((skb = skb_dequeue(&conn->commit_queue))) {
 692			refcount_dec(&skb->users);
 693			if (privptr) {
 694				privptr->stats.tx_packets++;
 695				privptr->stats.tx_bytes +=
 696					(skb->len - NETIUCV_HDRLEN
 697						  - NETIUCV_HDRLEN);
 698			}
 699			dev_kfree_skb_any(skb);
 700		}
 701	}
 702	conn->tx_buff->data = conn->tx_buff->head;
 703	skb_reset_tail_pointer(conn->tx_buff);
 704	conn->tx_buff->len = 0;
 705	spin_lock_irqsave(&conn->collect_lock, saveflags);
 706	while ((skb = skb_dequeue(&conn->collect_queue))) {
 707		header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
 708		skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN);
 709		skb_copy_from_linear_data(skb,
 710					  skb_put(conn->tx_buff, skb->len),
 711					  skb->len);
 712		txbytes += skb->len;
 713		txpackets++;
 714		stat_maxcq++;
 715		refcount_dec(&skb->users);
 716		dev_kfree_skb_any(skb);
 717	}
 718	if (conn->collect_len > conn->prof.maxmulti)
 719		conn->prof.maxmulti = conn->collect_len;
 720	conn->collect_len = 0;
 721	spin_unlock_irqrestore(&conn->collect_lock, saveflags);
 722	if (conn->tx_buff->len == 0) {
 723		fsm_newstate(fi, CONN_STATE_IDLE);
 724		return;
 725	}
 726
 727	header.next = 0;
 728	skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN);
 729	conn->prof.send_stamp = jiffies;
 730	txmsg.class = 0;
 731	txmsg.tag = 0;
 732	rc = iucv_message_send(conn->path, &txmsg, 0, 0,
 733			       conn->tx_buff->data, conn->tx_buff->len);
 734	conn->prof.doios_multi++;
 735	conn->prof.txlen += conn->tx_buff->len;
 736	conn->prof.tx_pending++;
 737	if (conn->prof.tx_pending > conn->prof.tx_max_pending)
 738		conn->prof.tx_max_pending = conn->prof.tx_pending;
 739	if (rc) {
 740		conn->prof.tx_pending--;
 741		fsm_newstate(fi, CONN_STATE_IDLE);
 742		if (privptr)
 743			privptr->stats.tx_errors += txpackets;
 744		IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
 745	} else {
 746		if (privptr) {
 747			privptr->stats.tx_packets += txpackets;
 748			privptr->stats.tx_bytes += txbytes;
 749		}
 750		if (stat_maxcq > conn->prof.maxcqueue)
 751			conn->prof.maxcqueue = stat_maxcq;
 752	}
 753}
 754
 755static struct iucv_handler netiucv_handler = {
 756	.path_pending	  = netiucv_callback_connreq,
 757	.path_complete	  = netiucv_callback_connack,
 758	.path_severed	  = netiucv_callback_connrej,
 759	.path_quiesced	  = netiucv_callback_connsusp,
 760	.path_resumed	  = netiucv_callback_connres,
 761	.message_pending  = netiucv_callback_rx,
 762	.message_complete = netiucv_callback_txdone,
 763};
 764
 765static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
 766{
 767	struct iucv_event *ev = arg;
 768	struct iucv_connection *conn = ev->conn;
 769	struct iucv_path *path = ev->data;
 770	struct net_device *netdev = conn->netdev;
 771	struct netiucv_priv *privptr = netdev_priv(netdev);
 772	int rc;
 773
 774	IUCV_DBF_TEXT(trace, 3, __func__);
 775
 776	conn->path = path;
 777	path->msglim = NETIUCV_QUEUELEN_DEFAULT;
 778	path->flags = 0;
 779	rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn);
 780	if (rc) {
 781		IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
 782		return;
 783	}
 784	fsm_newstate(fi, CONN_STATE_IDLE);
 785	netdev->tx_queue_len = conn->path->msglim;
 786	fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
 787}
 788
 789static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
 790{
 791	struct iucv_event *ev = arg;
 792	struct iucv_path *path = ev->data;
 793
 794	IUCV_DBF_TEXT(trace, 3, __func__);
 795	iucv_path_sever(path, NULL);
 796}
 797
 798static void conn_action_connack(fsm_instance *fi, int event, void *arg)
 799{
 800	struct iucv_connection *conn = arg;
 801	struct net_device *netdev = conn->netdev;
 802	struct netiucv_priv *privptr = netdev_priv(netdev);
 803
 804	IUCV_DBF_TEXT(trace, 3, __func__);
 805	fsm_deltimer(&conn->timer);
 806	fsm_newstate(fi, CONN_STATE_IDLE);
 807	netdev->tx_queue_len = conn->path->msglim;
 808	fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
 809}
 810
 811static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
 812{
 813	struct iucv_connection *conn = arg;
 814
 815	IUCV_DBF_TEXT(trace, 3, __func__);
 816	fsm_deltimer(&conn->timer);
 817	iucv_path_sever(conn->path, conn->userdata);
 818	fsm_newstate(fi, CONN_STATE_STARTWAIT);
 819}
 820
 821static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
 822{
 823	struct iucv_connection *conn = arg;
 824	struct net_device *netdev = conn->netdev;
 825	struct netiucv_priv *privptr = netdev_priv(netdev);
 826
 827	IUCV_DBF_TEXT(trace, 3, __func__);
 828
 829	fsm_deltimer(&conn->timer);
 830	iucv_path_sever(conn->path, conn->userdata);
 831	dev_info(privptr->dev, "The peer z/VM guest %s has closed the "
 832			       "connection\n", netiucv_printuser(conn));
 833	IUCV_DBF_TEXT(data, 2,
 834		      "conn_action_connsever: Remote dropped connection\n");
 835	fsm_newstate(fi, CONN_STATE_STARTWAIT);
 836	fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
 837}
 838
 839static void conn_action_start(fsm_instance *fi, int event, void *arg)
 840{
 841	struct iucv_connection *conn = arg;
 842	struct net_device *netdev = conn->netdev;
 843	struct netiucv_priv *privptr = netdev_priv(netdev);
 844	int rc;
 845
 846	IUCV_DBF_TEXT(trace, 3, __func__);
 847
 848	fsm_newstate(fi, CONN_STATE_STARTWAIT);
 849
 850	/*
 851	 * We must set the state before calling iucv_connect because the
 852	 * callback handler could be called at any point after the connection
 853	 * request is sent
 854	 */
 855
 856	fsm_newstate(fi, CONN_STATE_SETUPWAIT);
 857	conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
 858	IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n",
 859		netdev->name, netiucv_printuser(conn));
 860
 861	rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
 862			       NULL, conn->userdata, conn);
 863	switch (rc) {
 864	case 0:
 865		netdev->tx_queue_len = conn->path->msglim;
 866		fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
 867			     CONN_EVENT_TIMER, conn);
 868		return;
 869	case 11:
 870		dev_warn(privptr->dev,
 871			"The IUCV device failed to connect to z/VM guest %s\n",
 872			netiucv_printname(conn->userid, 8));
 873		fsm_newstate(fi, CONN_STATE_STARTWAIT);
 874		break;
 875	case 12:
 876		dev_warn(privptr->dev,
 877			"The IUCV device failed to connect to the peer on z/VM"
 878			" guest %s\n", netiucv_printname(conn->userid, 8));
 879		fsm_newstate(fi, CONN_STATE_STARTWAIT);
 880		break;
 881	case 13:
 882		dev_err(privptr->dev,
 883			"Connecting the IUCV device would exceed the maximum"
 884			" number of IUCV connections\n");
 885		fsm_newstate(fi, CONN_STATE_CONNERR);
 886		break;
 887	case 14:
 888		dev_err(privptr->dev,
 889			"z/VM guest %s has too many IUCV connections"
 890			" to connect with the IUCV device\n",
 891			netiucv_printname(conn->userid, 8));
 892		fsm_newstate(fi, CONN_STATE_CONNERR);
 893		break;
 894	case 15:
 895		dev_err(privptr->dev,
 896			"The IUCV device cannot connect to a z/VM guest with no"
 897			" IUCV authorization\n");
 898		fsm_newstate(fi, CONN_STATE_CONNERR);
 899		break;
 900	default:
 901		dev_err(privptr->dev,
 902			"Connecting the IUCV device failed with error %d\n",
 903			rc);
 904		fsm_newstate(fi, CONN_STATE_CONNERR);
 905		break;
 906	}
 907	IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
 908	kfree(conn->path);
 909	conn->path = NULL;
 910}
 911
 912static void netiucv_purge_skb_queue(struct sk_buff_head *q)
 913{
 914	struct sk_buff *skb;
 915
 916	while ((skb = skb_dequeue(q))) {
 917		refcount_dec(&skb->users);
 918		dev_kfree_skb_any(skb);
 919	}
 920}
 921
 922static void conn_action_stop(fsm_instance *fi, int event, void *arg)
 923{
 924	struct iucv_event *ev = arg;
 925	struct iucv_connection *conn = ev->conn;
 926	struct net_device *netdev = conn->netdev;
 927	struct netiucv_priv *privptr = netdev_priv(netdev);
 928
 929	IUCV_DBF_TEXT(trace, 3, __func__);
 930
 931	fsm_deltimer(&conn->timer);
 932	fsm_newstate(fi, CONN_STATE_STOPPED);
 933	netiucv_purge_skb_queue(&conn->collect_queue);
 934	if (conn->path) {
 935		IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
 936		iucv_path_sever(conn->path, conn->userdata);
 937		kfree(conn->path);
 938		conn->path = NULL;
 939	}
 940	netiucv_purge_skb_queue(&conn->commit_queue);
 941	fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
 942}
 943
 944static void conn_action_inval(fsm_instance *fi, int event, void *arg)
 945{
 946	struct iucv_connection *conn = arg;
 947	struct net_device *netdev = conn->netdev;
 948
 949	IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n",
 950		netdev->name, conn->userid);
 951}
 952
 953static const fsm_node conn_fsm[] = {
 954	{ CONN_STATE_INVALID,   CONN_EVENT_START,    conn_action_inval      },
 955	{ CONN_STATE_STOPPED,   CONN_EVENT_START,    conn_action_start      },
 956
 957	{ CONN_STATE_STOPPED,   CONN_EVENT_STOP,     conn_action_stop       },
 958	{ CONN_STATE_STARTWAIT, CONN_EVENT_STOP,     conn_action_stop       },
 959	{ CONN_STATE_SETUPWAIT, CONN_EVENT_STOP,     conn_action_stop       },
 960	{ CONN_STATE_IDLE,      CONN_EVENT_STOP,     conn_action_stop       },
 961	{ CONN_STATE_TX,        CONN_EVENT_STOP,     conn_action_stop       },
 962	{ CONN_STATE_REGERR,    CONN_EVENT_STOP,     conn_action_stop       },
 963	{ CONN_STATE_CONNERR,   CONN_EVENT_STOP,     conn_action_stop       },
 964
 965	{ CONN_STATE_STOPPED,   CONN_EVENT_CONN_REQ, conn_action_connreject },
 966        { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
 967	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
 968	{ CONN_STATE_IDLE,      CONN_EVENT_CONN_REQ, conn_action_connreject },
 969	{ CONN_STATE_TX,        CONN_EVENT_CONN_REQ, conn_action_connreject },
 970
 971	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack    },
 972	{ CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER,    conn_action_conntimsev },
 973
 974	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever  },
 975	{ CONN_STATE_IDLE,      CONN_EVENT_CONN_REJ, conn_action_connsever  },
 976	{ CONN_STATE_TX,        CONN_EVENT_CONN_REJ, conn_action_connsever  },
 977
 978	{ CONN_STATE_IDLE,      CONN_EVENT_RX,       conn_action_rx         },
 979	{ CONN_STATE_TX,        CONN_EVENT_RX,       conn_action_rx         },
 980
 981	{ CONN_STATE_TX,        CONN_EVENT_TXDONE,   conn_action_txdone     },
 982	{ CONN_STATE_IDLE,      CONN_EVENT_TXDONE,   conn_action_txdone     },
 983};
 984
 985static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
 986
 987
 988/*
 989 * Actions for interface - statemachine.
 990 */
 991
 992/*
 993 * dev_action_start
 994 * @fi: An instance of an interface statemachine.
 995 * @event: The event, just happened.
 996 * @arg: Generic pointer, casted from struct net_device * upon call.
 997 *
 998 * Startup connection by sending CONN_EVENT_START to it.
 999 */
1000static void dev_action_start(fsm_instance *fi, int event, void *arg)
1001{
1002	struct net_device   *dev = arg;
1003	struct netiucv_priv *privptr = netdev_priv(dev);
1004
1005	IUCV_DBF_TEXT(trace, 3, __func__);
1006
1007	fsm_newstate(fi, DEV_STATE_STARTWAIT);
1008	fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
1009}
1010
1011/*
1012 * Shutdown connection by sending CONN_EVENT_STOP to it.
1013 *
1014 * @param fi    An instance of an interface statemachine.
1015 * @param event The event, just happened.
1016 * @param arg   Generic pointer, casted from struct net_device * upon call.
1017 */
1018static void
1019dev_action_stop(fsm_instance *fi, int event, void *arg)
1020{
1021	struct net_device   *dev = arg;
1022	struct netiucv_priv *privptr = netdev_priv(dev);
1023	struct iucv_event   ev;
1024
1025	IUCV_DBF_TEXT(trace, 3, __func__);
1026
1027	ev.conn = privptr->conn;
1028
1029	fsm_newstate(fi, DEV_STATE_STOPWAIT);
1030	fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1031}
1032
1033/*
1034 * Called from connection statemachine
1035 * when a connection is up and running.
1036 *
1037 * @param fi    An instance of an interface statemachine.
1038 * @param event The event, just happened.
1039 * @param arg   Generic pointer, casted from struct net_device * upon call.
1040 */
1041static void
1042dev_action_connup(fsm_instance *fi, int event, void *arg)
1043{
1044	struct net_device   *dev = arg;
1045	struct netiucv_priv *privptr = netdev_priv(dev);
1046
1047	IUCV_DBF_TEXT(trace, 3, __func__);
1048
1049	switch (fsm_getstate(fi)) {
1050		case DEV_STATE_STARTWAIT:
1051			fsm_newstate(fi, DEV_STATE_RUNNING);
1052			dev_info(privptr->dev,
1053				"The IUCV device has been connected"
1054				" successfully to %s\n",
1055				netiucv_printuser(privptr->conn));
1056			IUCV_DBF_TEXT(setup, 3,
1057				"connection is up and running\n");
1058			break;
1059		case DEV_STATE_STOPWAIT:
1060			IUCV_DBF_TEXT(data, 2,
1061				"dev_action_connup: in DEV_STATE_STOPWAIT\n");
1062			break;
1063	}
1064}
1065
1066/*
1067 * Called from connection statemachine
1068 * when a connection has been shutdown.
1069 *
1070 * @param fi    An instance of an interface statemachine.
1071 * @param event The event, just happened.
1072 * @param arg   Generic pointer, casted from struct net_device * upon call.
1073 */
1074static void
1075dev_action_conndown(fsm_instance *fi, int event, void *arg)
1076{
1077	IUCV_DBF_TEXT(trace, 3, __func__);
1078
1079	switch (fsm_getstate(fi)) {
1080		case DEV_STATE_RUNNING:
1081			fsm_newstate(fi, DEV_STATE_STARTWAIT);
1082			break;
1083		case DEV_STATE_STOPWAIT:
1084			fsm_newstate(fi, DEV_STATE_STOPPED);
1085			IUCV_DBF_TEXT(setup, 3, "connection is down\n");
1086			break;
1087	}
1088}
1089
1090static const fsm_node dev_fsm[] = {
1091	{ DEV_STATE_STOPPED,    DEV_EVENT_START,   dev_action_start    },
1092
1093	{ DEV_STATE_STOPWAIT,   DEV_EVENT_START,   dev_action_start    },
1094	{ DEV_STATE_STOPWAIT,   DEV_EVENT_CONDOWN, dev_action_conndown },
1095
1096	{ DEV_STATE_STARTWAIT,  DEV_EVENT_STOP,    dev_action_stop     },
1097	{ DEV_STATE_STARTWAIT,  DEV_EVENT_CONUP,   dev_action_connup   },
1098
1099	{ DEV_STATE_RUNNING,    DEV_EVENT_STOP,    dev_action_stop     },
1100	{ DEV_STATE_RUNNING,    DEV_EVENT_CONDOWN, dev_action_conndown },
1101	{ DEV_STATE_RUNNING,    DEV_EVENT_CONUP,   netiucv_action_nop  },
1102};
1103
1104static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1105
1106/*
1107 * Transmit a packet.
1108 * This is a helper function for netiucv_tx().
1109 *
1110 * @param conn Connection to be used for sending.
1111 * @param skb Pointer to struct sk_buff of packet to send.
1112 *            The linklevel header has already been set up
1113 *            by netiucv_tx().
1114 *
1115 * @return 0 on success, -ERRNO on failure. (Never fails.)
1116 */
1117static int netiucv_transmit_skb(struct iucv_connection *conn,
1118				struct sk_buff *skb)
1119{
1120	struct iucv_message msg;
1121	unsigned long saveflags;
1122	struct ll_header header;
1123	int rc;
1124
1125	if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1126		int l = skb->len + NETIUCV_HDRLEN;
1127
1128		spin_lock_irqsave(&conn->collect_lock, saveflags);
1129		if (conn->collect_len + l >
1130		    (conn->max_buffsize - NETIUCV_HDRLEN)) {
1131			rc = -EBUSY;
1132			IUCV_DBF_TEXT(data, 2,
1133				      "EBUSY from netiucv_transmit_skb\n");
1134		} else {
1135			refcount_inc(&skb->users);
1136			skb_queue_tail(&conn->collect_queue, skb);
1137			conn->collect_len += l;
1138			rc = 0;
1139		}
1140		spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1141	} else {
1142		struct sk_buff *nskb = skb;
1143		/*
1144		 * Copy the skb to a new allocated skb in lowmem only if the
1145		 * data is located above 2G in memory or tailroom is < 2.
1146		 */
1147		unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
1148				    NETIUCV_HDRLEN)) >> 31;
1149		int copied = 0;
1150		if (hi || (skb_tailroom(skb) < 2)) {
1151			nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1152					 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1153			if (!nskb) {
1154				IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1155				rc = -ENOMEM;
1156				return rc;
1157			} else {
1158				skb_reserve(nskb, NETIUCV_HDRLEN);
1159				skb_put_data(nskb, skb->data, skb->len);
1160			}
1161			copied = 1;
1162		}
1163		/*
1164		 * skb now is below 2G and has enough room. Add headers.
1165		 */
1166		header.next = nskb->len + NETIUCV_HDRLEN;
1167		memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1168		header.next = 0;
1169		skb_put_data(nskb, &header, NETIUCV_HDRLEN);
1170
1171		fsm_newstate(conn->fsm, CONN_STATE_TX);
1172		conn->prof.send_stamp = jiffies;
1173
1174		msg.tag = 1;
1175		msg.class = 0;
1176		rc = iucv_message_send(conn->path, &msg, 0, 0,
1177				       nskb->data, nskb->len);
1178		conn->prof.doios_single++;
1179		conn->prof.txlen += skb->len;
1180		conn->prof.tx_pending++;
1181		if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1182			conn->prof.tx_max_pending = conn->prof.tx_pending;
1183		if (rc) {
1184			struct netiucv_priv *privptr;
1185			fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1186			conn->prof.tx_pending--;
1187			privptr = netdev_priv(conn->netdev);
1188			if (privptr)
1189				privptr->stats.tx_errors++;
1190			if (copied)
1191				dev_kfree_skb(nskb);
1192			else {
1193				/*
1194				 * Remove our headers. They get added
1195				 * again on retransmit.
1196				 */
1197				skb_pull(skb, NETIUCV_HDRLEN);
1198				skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1199			}
1200			IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1201		} else {
1202			if (copied)
1203				dev_kfree_skb(skb);
1204			refcount_inc(&nskb->users);
1205			skb_queue_tail(&conn->commit_queue, nskb);
1206		}
1207	}
1208
1209	return rc;
1210}
1211
1212/*
1213 * Interface API for upper network layers
1214 */
1215
1216/*
1217 * Open an interface.
1218 * Called from generic network layer when ifconfig up is run.
1219 *
1220 * @param dev Pointer to interface struct.
1221 *
1222 * @return 0 on success, -ERRNO on failure. (Never fails.)
1223 */
1224static int netiucv_open(struct net_device *dev)
1225{
1226	struct netiucv_priv *priv = netdev_priv(dev);
1227
1228	fsm_event(priv->fsm, DEV_EVENT_START, dev);
1229	return 0;
1230}
1231
1232/*
1233 * Close an interface.
1234 * Called from generic network layer when ifconfig down is run.
1235 *
1236 * @param dev Pointer to interface struct.
1237 *
1238 * @return 0 on success, -ERRNO on failure. (Never fails.)
1239 */
1240static int netiucv_close(struct net_device *dev)
1241{
1242	struct netiucv_priv *priv = netdev_priv(dev);
1243
1244	fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
1245	return 0;
1246}
1247
1248/*
1249 * Start transmission of a packet.
1250 * Called from generic network device layer.
 
 
 
 
 
 
 
1251 */
1252static netdev_tx_t netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1253{
1254	struct netiucv_priv *privptr = netdev_priv(dev);
1255	int rc;
1256
1257	IUCV_DBF_TEXT(trace, 4, __func__);
1258	/*
1259	 * Some sanity checks ...
1260	 */
1261	if (skb == NULL) {
1262		IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1263		privptr->stats.tx_dropped++;
1264		return NETDEV_TX_OK;
1265	}
1266	if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1267		IUCV_DBF_TEXT(data, 2,
1268			"netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1269		dev_kfree_skb(skb);
1270		privptr->stats.tx_dropped++;
1271		return NETDEV_TX_OK;
1272	}
1273
1274	/*
1275	 * If connection is not running, try to restart it
1276	 * and throw away packet.
1277	 */
1278	if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1279		dev_kfree_skb(skb);
1280		privptr->stats.tx_dropped++;
1281		privptr->stats.tx_errors++;
1282		privptr->stats.tx_carrier_errors++;
1283		return NETDEV_TX_OK;
1284	}
1285
1286	if (netiucv_test_and_set_busy(dev)) {
1287		IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
1288		return NETDEV_TX_BUSY;
1289	}
1290	netif_trans_update(dev);
1291	rc = netiucv_transmit_skb(privptr->conn, skb);
1292	netiucv_clear_busy(dev);
1293	return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
1294}
1295
1296/*
1297 * netiucv_stats
1298 * @dev: Pointer to interface struct.
1299 *
1300 * Returns interface statistics of a device.
1301 *
1302 * Returns pointer to stats struct of this interface.
1303 */
1304static struct net_device_stats *netiucv_stats (struct net_device * dev)
1305{
1306	struct netiucv_priv *priv = netdev_priv(dev);
1307
1308	IUCV_DBF_TEXT(trace, 5, __func__);
1309	return &priv->stats;
1310}
1311
1312/*
1313 * attributes in sysfs
1314 */
1315
1316static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1317			 char *buf)
1318{
1319	struct netiucv_priv *priv = dev_get_drvdata(dev);
1320
1321	IUCV_DBF_TEXT(trace, 5, __func__);
1322	return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
1323}
1324
1325static int netiucv_check_user(const char *buf, size_t count, char *username,
1326			      char *userdata)
1327{
1328	const char *p;
1329	int i;
1330
1331	p = strchr(buf, '.');
1332	if ((p && ((count > 26) ||
1333		   ((p - buf) > 8) ||
1334		   (buf + count - p > 18))) ||
1335	    (!p && (count > 9))) {
1336		IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1337		return -EINVAL;
1338	}
1339
1340	for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) {
1341		if (isalnum(*p) || *p == '$') {
1342			username[i] = toupper(*p);
1343			continue;
1344		}
1345		if (*p == '\n')
1346			/* trailing lf, grr */
1347			break;
1348		IUCV_DBF_TEXT_(setup, 2,
1349			       "conn_write: invalid character %02x\n", *p);
1350		return -EINVAL;
1351	}
1352	while (i < 8)
1353		username[i++] = ' ';
1354	username[8] = '\0';
1355
1356	if (*p == '.') {
1357		p++;
1358		for (i = 0; i < 16 && *p; i++, p++) {
1359			if (*p == '\n')
1360				break;
1361			userdata[i] = toupper(*p);
1362		}
1363		while (i > 0 && i < 16)
1364			userdata[i++] = ' ';
1365	} else
1366		memcpy(userdata, iucvMagic_ascii, 16);
1367	userdata[16] = '\0';
1368	ASCEBC(userdata, 16);
1369
1370	return 0;
1371}
1372
1373static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1374			  const char *buf, size_t count)
1375{
1376	struct netiucv_priv *priv = dev_get_drvdata(dev);
1377	struct net_device *ndev = priv->conn->netdev;
1378	char	username[9];
1379	char	userdata[17];
1380	int	rc;
1381	struct iucv_connection *cp;
1382
1383	IUCV_DBF_TEXT(trace, 3, __func__);
1384	rc = netiucv_check_user(buf, count, username, userdata);
1385	if (rc)
1386		return rc;
1387
1388	if (memcmp(username, priv->conn->userid, 9) &&
1389	    (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1390		/* username changed while the interface is active. */
1391		IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
1392		return -EPERM;
1393	}
1394	read_lock_bh(&iucv_connection_rwlock);
1395	list_for_each_entry(cp, &iucv_connection_list, list) {
1396		if (!strncmp(username, cp->userid, 9) &&
1397		   !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) {
1398			read_unlock_bh(&iucv_connection_rwlock);
1399			IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s "
1400				"already exists\n", netiucv_printuser(cp));
1401			return -EEXIST;
1402		}
1403	}
1404	read_unlock_bh(&iucv_connection_rwlock);
1405	memcpy(priv->conn->userid, username, 9);
1406	memcpy(priv->conn->userdata, userdata, 17);
1407	return count;
1408}
1409
1410static DEVICE_ATTR(user, 0644, user_show, user_write);
1411
1412static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1413			    char *buf)
1414{
1415	struct netiucv_priv *priv = dev_get_drvdata(dev);
1416
1417	IUCV_DBF_TEXT(trace, 5, __func__);
1418	return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1419}
1420
1421static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1422			     const char *buf, size_t count)
1423{
1424	struct netiucv_priv *priv = dev_get_drvdata(dev);
1425	struct net_device *ndev = priv->conn->netdev;
1426	unsigned int bs1;
1427	int rc;
1428
1429	IUCV_DBF_TEXT(trace, 3, __func__);
1430	if (count >= 39)
1431		return -EINVAL;
1432
1433	rc = kstrtouint(buf, 0, &bs1);
1434
1435	if (rc == -EINVAL) {
1436		IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %s\n",
1437			buf);
1438		return -EINVAL;
1439	}
1440	if ((rc == -ERANGE) || (bs1 > NETIUCV_BUFSIZE_MAX)) {
1441		IUCV_DBF_TEXT_(setup, 2,
1442			"buffer_write: buffer size %d too large\n",
1443			bs1);
1444		return -EINVAL;
1445	}
1446	if ((ndev->flags & IFF_RUNNING) &&
1447	    (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1448		IUCV_DBF_TEXT_(setup, 2,
1449			"buffer_write: buffer size %d too small\n",
1450			bs1);
1451		return -EINVAL;
1452	}
1453	if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1454		IUCV_DBF_TEXT_(setup, 2,
1455			"buffer_write: buffer size %d too small\n",
1456			bs1);
1457		return -EINVAL;
1458	}
1459
1460	priv->conn->max_buffsize = bs1;
1461	if (!(ndev->flags & IFF_RUNNING))
1462		ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
1463
1464	return count;
1465
1466}
1467
1468static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1469
1470static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1471			     char *buf)
1472{
1473	struct netiucv_priv *priv = dev_get_drvdata(dev);
1474
1475	IUCV_DBF_TEXT(trace, 5, __func__);
1476	return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1477}
1478
1479static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1480
1481static ssize_t conn_fsm_show (struct device *dev,
1482			      struct device_attribute *attr, char *buf)
1483{
1484	struct netiucv_priv *priv = dev_get_drvdata(dev);
1485
1486	IUCV_DBF_TEXT(trace, 5, __func__);
1487	return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1488}
1489
1490static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1491
1492static ssize_t maxmulti_show (struct device *dev,
1493			      struct device_attribute *attr, char *buf)
1494{
1495	struct netiucv_priv *priv = dev_get_drvdata(dev);
1496
1497	IUCV_DBF_TEXT(trace, 5, __func__);
1498	return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1499}
1500
1501static ssize_t maxmulti_write (struct device *dev,
1502			       struct device_attribute *attr,
1503			       const char *buf, size_t count)
1504{
1505	struct netiucv_priv *priv = dev_get_drvdata(dev);
1506
1507	IUCV_DBF_TEXT(trace, 4, __func__);
1508	priv->conn->prof.maxmulti = 0;
1509	return count;
1510}
1511
1512static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1513
1514static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1515			   char *buf)
1516{
1517	struct netiucv_priv *priv = dev_get_drvdata(dev);
1518
1519	IUCV_DBF_TEXT(trace, 5, __func__);
1520	return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1521}
1522
1523static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1524			    const char *buf, size_t count)
1525{
1526	struct netiucv_priv *priv = dev_get_drvdata(dev);
1527
1528	IUCV_DBF_TEXT(trace, 4, __func__);
1529	priv->conn->prof.maxcqueue = 0;
1530	return count;
1531}
1532
1533static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1534
1535static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1536			   char *buf)
1537{
1538	struct netiucv_priv *priv = dev_get_drvdata(dev);
1539
1540	IUCV_DBF_TEXT(trace, 5, __func__);
1541	return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1542}
1543
1544static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1545			    const char *buf, size_t count)
1546{
1547	struct netiucv_priv *priv = dev_get_drvdata(dev);
1548
1549	IUCV_DBF_TEXT(trace, 4, __func__);
1550	priv->conn->prof.doios_single = 0;
1551	return count;
1552}
1553
1554static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1555
1556static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1557			   char *buf)
1558{
1559	struct netiucv_priv *priv = dev_get_drvdata(dev);
1560
1561	IUCV_DBF_TEXT(trace, 5, __func__);
1562	return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1563}
1564
1565static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1566			    const char *buf, size_t count)
1567{
1568	struct netiucv_priv *priv = dev_get_drvdata(dev);
1569
1570	IUCV_DBF_TEXT(trace, 5, __func__);
1571	priv->conn->prof.doios_multi = 0;
1572	return count;
1573}
1574
1575static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1576
1577static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1578			   char *buf)
1579{
1580	struct netiucv_priv *priv = dev_get_drvdata(dev);
1581
1582	IUCV_DBF_TEXT(trace, 5, __func__);
1583	return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1584}
1585
1586static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1587			    const char *buf, size_t count)
1588{
1589	struct netiucv_priv *priv = dev_get_drvdata(dev);
1590
1591	IUCV_DBF_TEXT(trace, 4, __func__);
1592	priv->conn->prof.txlen = 0;
1593	return count;
1594}
1595
1596static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1597
1598static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1599			    char *buf)
1600{
1601	struct netiucv_priv *priv = dev_get_drvdata(dev);
1602
1603	IUCV_DBF_TEXT(trace, 5, __func__);
1604	return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1605}
1606
1607static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1608			     const char *buf, size_t count)
1609{
1610	struct netiucv_priv *priv = dev_get_drvdata(dev);
1611
1612	IUCV_DBF_TEXT(trace, 4, __func__);
1613	priv->conn->prof.tx_time = 0;
1614	return count;
1615}
1616
1617static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1618
1619static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1620			    char *buf)
1621{
1622	struct netiucv_priv *priv = dev_get_drvdata(dev);
1623
1624	IUCV_DBF_TEXT(trace, 5, __func__);
1625	return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1626}
1627
1628static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1629			     const char *buf, size_t count)
1630{
1631	struct netiucv_priv *priv = dev_get_drvdata(dev);
1632
1633	IUCV_DBF_TEXT(trace, 4, __func__);
1634	priv->conn->prof.tx_pending = 0;
1635	return count;
1636}
1637
1638static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1639
1640static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1641			    char *buf)
1642{
1643	struct netiucv_priv *priv = dev_get_drvdata(dev);
1644
1645	IUCV_DBF_TEXT(trace, 5, __func__);
1646	return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1647}
1648
1649static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1650			     const char *buf, size_t count)
1651{
1652	struct netiucv_priv *priv = dev_get_drvdata(dev);
1653
1654	IUCV_DBF_TEXT(trace, 4, __func__);
1655	priv->conn->prof.tx_max_pending = 0;
1656	return count;
1657}
1658
1659static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
1660
1661static struct attribute *netiucv_attrs[] = {
1662	&dev_attr_buffer.attr,
1663	&dev_attr_user.attr,
1664	NULL,
1665};
1666
1667static struct attribute_group netiucv_attr_group = {
1668	.attrs = netiucv_attrs,
1669};
1670
1671static struct attribute *netiucv_stat_attrs[] = {
1672	&dev_attr_device_fsm_state.attr,
1673	&dev_attr_connection_fsm_state.attr,
1674	&dev_attr_max_tx_buffer_used.attr,
1675	&dev_attr_max_chained_skbs.attr,
1676	&dev_attr_tx_single_write_ops.attr,
1677	&dev_attr_tx_multi_write_ops.attr,
1678	&dev_attr_netto_bytes.attr,
1679	&dev_attr_max_tx_io_time.attr,
1680	&dev_attr_tx_pending.attr,
1681	&dev_attr_tx_max_pending.attr,
1682	NULL,
1683};
1684
1685static struct attribute_group netiucv_stat_attr_group = {
1686	.name  = "stats",
1687	.attrs = netiucv_stat_attrs,
1688};
1689
1690static const struct attribute_group *netiucv_attr_groups[] = {
1691	&netiucv_stat_attr_group,
1692	&netiucv_attr_group,
1693	NULL,
1694};
1695
1696static int netiucv_register_device(struct net_device *ndev)
1697{
1698	struct netiucv_priv *priv = netdev_priv(ndev);
1699	struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1700	int ret;
1701
1702	IUCV_DBF_TEXT(trace, 3, __func__);
1703
1704	if (dev) {
1705		dev_set_name(dev, "net%s", ndev->name);
1706		dev->bus = &iucv_bus;
1707		dev->parent = iucv_root;
1708		dev->groups = netiucv_attr_groups;
1709		/*
1710		 * The release function could be called after the
1711		 * module has been unloaded. It's _only_ task is to
1712		 * free the struct. Therefore, we specify kfree()
1713		 * directly here. (Probably a little bit obfuscating
1714		 * but legitime ...).
1715		 */
1716		dev->release = (void (*)(struct device *))kfree;
1717		dev->driver = &netiucv_driver;
1718	} else
1719		return -ENOMEM;
1720
1721	ret = device_register(dev);
1722	if (ret) {
1723		put_device(dev);
1724		return ret;
1725	}
1726	priv->dev = dev;
1727	dev_set_drvdata(dev, priv);
1728	return 0;
1729}
1730
1731static void netiucv_unregister_device(struct device *dev)
1732{
1733	IUCV_DBF_TEXT(trace, 3, __func__);
1734	device_unregister(dev);
1735}
1736
1737/*
1738 * Allocate and initialize a new connection structure.
1739 * Add it to the list of netiucv connections;
1740 */
1741static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
1742						      char *username,
1743						      char *userdata)
1744{
1745	struct iucv_connection *conn;
1746
1747	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
1748	if (!conn)
1749		goto out;
1750	skb_queue_head_init(&conn->collect_queue);
1751	skb_queue_head_init(&conn->commit_queue);
1752	spin_lock_init(&conn->collect_lock);
1753	conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1754	conn->netdev = dev;
1755
1756	conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1757	if (!conn->rx_buff)
1758		goto out_conn;
1759	conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1760	if (!conn->tx_buff)
1761		goto out_rx;
1762	conn->fsm = init_fsm("netiucvconn", conn_state_names,
1763			     conn_event_names, NR_CONN_STATES,
1764			     NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1765			     GFP_KERNEL);
1766	if (!conn->fsm)
1767		goto out_tx;
1768
1769	fsm_settimer(conn->fsm, &conn->timer);
1770	fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1771
1772	if (userdata)
1773		memcpy(conn->userdata, userdata, 17);
1774	if (username) {
1775		memcpy(conn->userid, username, 9);
1776		fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1777	}
1778
1779	write_lock_bh(&iucv_connection_rwlock);
1780	list_add_tail(&conn->list, &iucv_connection_list);
1781	write_unlock_bh(&iucv_connection_rwlock);
1782	return conn;
1783
1784out_tx:
1785	kfree_skb(conn->tx_buff);
1786out_rx:
1787	kfree_skb(conn->rx_buff);
1788out_conn:
1789	kfree(conn);
1790out:
1791	return NULL;
1792}
1793
1794/*
1795 * Release a connection structure and remove it from the
1796 * list of netiucv connections.
1797 */
1798static void netiucv_remove_connection(struct iucv_connection *conn)
1799{
1800
1801	IUCV_DBF_TEXT(trace, 3, __func__);
1802	write_lock_bh(&iucv_connection_rwlock);
1803	list_del_init(&conn->list);
1804	write_unlock_bh(&iucv_connection_rwlock);
1805	fsm_deltimer(&conn->timer);
1806	netiucv_purge_skb_queue(&conn->collect_queue);
1807	if (conn->path) {
1808		iucv_path_sever(conn->path, conn->userdata);
1809		kfree(conn->path);
1810		conn->path = NULL;
1811	}
1812	netiucv_purge_skb_queue(&conn->commit_queue);
1813	kfree_fsm(conn->fsm);
1814	kfree_skb(conn->rx_buff);
1815	kfree_skb(conn->tx_buff);
1816}
1817
1818/*
1819 * Release everything of a net device.
1820 */
1821static void netiucv_free_netdevice(struct net_device *dev)
1822{
1823	struct netiucv_priv *privptr = netdev_priv(dev);
1824
1825	IUCV_DBF_TEXT(trace, 3, __func__);
1826
1827	if (!dev)
1828		return;
1829
1830	if (privptr) {
1831		if (privptr->conn)
1832			netiucv_remove_connection(privptr->conn);
1833		if (privptr->fsm)
1834			kfree_fsm(privptr->fsm);
1835		privptr->conn = NULL; privptr->fsm = NULL;
1836		/* privptr gets freed by free_netdev() */
1837	}
1838}
1839
1840/*
1841 * Initialize a net device. (Called from kernel in alloc_netdev())
1842 */
1843static const struct net_device_ops netiucv_netdev_ops = {
1844	.ndo_open		= netiucv_open,
1845	.ndo_stop		= netiucv_close,
1846	.ndo_get_stats		= netiucv_stats,
1847	.ndo_start_xmit		= netiucv_tx,
1848};
1849
1850static void netiucv_setup_netdevice(struct net_device *dev)
1851{
1852	dev->mtu	         = NETIUCV_MTU_DEFAULT;
1853	dev->min_mtu		 = 576;
1854	dev->max_mtu		 = NETIUCV_MTU_MAX;
1855	dev->needs_free_netdev   = true;
1856	dev->priv_destructor     = netiucv_free_netdevice;
1857	dev->hard_header_len     = NETIUCV_HDRLEN;
1858	dev->addr_len            = 0;
1859	dev->type                = ARPHRD_SLIP;
1860	dev->tx_queue_len        = NETIUCV_QUEUELEN_DEFAULT;
1861	dev->flags	         = IFF_POINTOPOINT | IFF_NOARP;
1862	dev->netdev_ops		 = &netiucv_netdev_ops;
1863}
1864
1865/*
1866 * Allocate and initialize everything of a net device.
1867 */
1868static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
1869{
1870	struct netiucv_priv *privptr;
1871	struct net_device *dev;
1872
1873	dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
1874			   NET_NAME_UNKNOWN, netiucv_setup_netdevice);
1875	if (!dev)
1876		return NULL;
1877	rtnl_lock();
1878	if (dev_alloc_name(dev, dev->name) < 0)
1879		goto out_netdev;
1880
1881	privptr = netdev_priv(dev);
1882	privptr->fsm = init_fsm("netiucvdev", dev_state_names,
1883				dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
1884				dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
1885	if (!privptr->fsm)
1886		goto out_netdev;
1887
1888	privptr->conn = netiucv_new_connection(dev, username, userdata);
1889	if (!privptr->conn) {
1890		IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
1891		goto out_fsm;
1892	}
1893	fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
1894	return dev;
1895
1896out_fsm:
1897	kfree_fsm(privptr->fsm);
1898out_netdev:
1899	rtnl_unlock();
1900	free_netdev(dev);
1901	return NULL;
1902}
1903
1904static ssize_t connection_store(struct device_driver *drv, const char *buf,
1905				size_t count)
1906{
1907	char username[9];
1908	char userdata[17];
1909	int rc;
1910	struct net_device *dev;
1911	struct netiucv_priv *priv;
1912	struct iucv_connection *cp;
1913
1914	IUCV_DBF_TEXT(trace, 3, __func__);
1915	rc = netiucv_check_user(buf, count, username, userdata);
1916	if (rc)
1917		return rc;
1918
1919	read_lock_bh(&iucv_connection_rwlock);
1920	list_for_each_entry(cp, &iucv_connection_list, list) {
1921		if (!strncmp(username, cp->userid, 9) &&
1922		    !strncmp(userdata, cp->userdata, 17)) {
1923			read_unlock_bh(&iucv_connection_rwlock);
1924			IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s "
1925				"already exists\n", netiucv_printuser(cp));
1926			return -EEXIST;
1927		}
1928	}
1929	read_unlock_bh(&iucv_connection_rwlock);
1930
1931	dev = netiucv_init_netdevice(username, userdata);
1932	if (!dev) {
1933		IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
1934		return -ENODEV;
1935	}
1936
1937	rc = netiucv_register_device(dev);
1938	if (rc) {
1939		rtnl_unlock();
1940		IUCV_DBF_TEXT_(setup, 2,
1941			"ret %d from netiucv_register_device\n", rc);
1942		goto out_free_ndev;
1943	}
1944
1945	/* sysfs magic */
1946	priv = netdev_priv(dev);
1947	SET_NETDEV_DEV(dev, priv->dev);
1948
1949	rc = register_netdevice(dev);
1950	rtnl_unlock();
1951	if (rc)
1952		goto out_unreg;
1953
1954	dev_info(priv->dev, "The IUCV interface to %s has been established "
1955			    "successfully\n",
1956		netiucv_printuser(priv->conn));
1957
1958	return count;
1959
1960out_unreg:
1961	netiucv_unregister_device(priv->dev);
1962out_free_ndev:
1963	netiucv_free_netdevice(dev);
1964	return rc;
1965}
1966static DRIVER_ATTR_WO(connection);
1967
1968static ssize_t remove_store(struct device_driver *drv, const char *buf,
1969			    size_t count)
1970{
1971	struct iucv_connection *cp;
1972        struct net_device *ndev;
1973        struct netiucv_priv *priv;
1974        struct device *dev;
1975        char name[IFNAMSIZ];
1976	const char *p;
1977        int i;
1978
1979	IUCV_DBF_TEXT(trace, 3, __func__);
1980
1981        if (count >= IFNAMSIZ)
1982                count = IFNAMSIZ - 1;
1983
1984	for (i = 0, p = buf; i < count && *p; i++, p++) {
1985		if (*p == '\n' || *p == ' ')
1986                        /* trailing lf, grr */
1987                        break;
1988		name[i] = *p;
1989        }
1990        name[i] = '\0';
1991
1992	read_lock_bh(&iucv_connection_rwlock);
1993	list_for_each_entry(cp, &iucv_connection_list, list) {
1994		ndev = cp->netdev;
1995		priv = netdev_priv(ndev);
1996                dev = priv->dev;
1997		if (strncmp(name, ndev->name, count))
1998			continue;
1999		read_unlock_bh(&iucv_connection_rwlock);
2000                if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
2001			dev_warn(dev, "The IUCV device is connected"
2002				" to %s and cannot be removed\n",
2003				priv->conn->userid);
2004			IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
2005			return -EPERM;
2006                }
2007                unregister_netdev(ndev);
2008                netiucv_unregister_device(dev);
2009                return count;
2010        }
2011	read_unlock_bh(&iucv_connection_rwlock);
2012	IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2013        return -EINVAL;
2014}
2015static DRIVER_ATTR_WO(remove);
2016
2017static struct attribute * netiucv_drv_attrs[] = {
2018	&driver_attr_connection.attr,
2019	&driver_attr_remove.attr,
2020	NULL,
2021};
2022
2023static struct attribute_group netiucv_drv_attr_group = {
2024	.attrs = netiucv_drv_attrs,
2025};
2026
2027static const struct attribute_group *netiucv_drv_attr_groups[] = {
2028	&netiucv_drv_attr_group,
2029	NULL,
2030};
2031
2032static void netiucv_banner(void)
2033{
2034	pr_info("driver initialized\n");
2035}
2036
2037static void __exit netiucv_exit(void)
2038{
2039	struct iucv_connection *cp;
2040	struct net_device *ndev;
2041	struct netiucv_priv *priv;
2042	struct device *dev;
2043
2044	IUCV_DBF_TEXT(trace, 3, __func__);
2045	while (!list_empty(&iucv_connection_list)) {
2046		cp = list_entry(iucv_connection_list.next,
2047				struct iucv_connection, list);
2048		ndev = cp->netdev;
2049		priv = netdev_priv(ndev);
2050		dev = priv->dev;
2051
2052		unregister_netdev(ndev);
2053		netiucv_unregister_device(dev);
2054	}
2055
2056	driver_unregister(&netiucv_driver);
2057	iucv_unregister(&netiucv_handler, 1);
2058	iucv_unregister_dbf_views();
2059
2060	pr_info("driver unloaded\n");
2061	return;
2062}
2063
2064static int __init netiucv_init(void)
2065{
2066	int rc;
2067
2068	rc = iucv_register_dbf_views();
2069	if (rc)
2070		goto out;
2071	rc = iucv_register(&netiucv_handler, 1);
2072	if (rc)
2073		goto out_dbf;
2074	IUCV_DBF_TEXT(trace, 3, __func__);
2075	netiucv_driver.groups = netiucv_drv_attr_groups;
2076	rc = driver_register(&netiucv_driver);
2077	if (rc) {
2078		IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2079		goto out_iucv;
2080	}
2081
2082	netiucv_banner();
2083	return rc;
2084
2085out_iucv:
2086	iucv_unregister(&netiucv_handler, 1);
2087out_dbf:
2088	iucv_unregister_dbf_views();
2089out:
2090	return rc;
2091}
2092
2093module_init(netiucv_init);
2094module_exit(netiucv_exit);
2095MODULE_LICENSE("GPL");