Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *    Copyright IBM Corp. 2007, 2009
   4 *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
   5 *		 Frank Pavlic <fpavlic@de.ibm.com>,
   6 *		 Thomas Spatzier <tspat@de.ibm.com>,
   7 *		 Frank Blaschka <frank.blaschka@de.ibm.com>
   8 */
   9
  10#define KMSG_COMPONENT "qeth"
  11#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  12
  13#include <linux/compat.h>
  14#include <linux/module.h>
  15#include <linux/moduleparam.h>
  16#include <linux/string.h>
  17#include <linux/errno.h>
  18#include <linux/kernel.h>
  19#include <linux/log2.h>
 
  20#include <linux/ip.h>
  21#include <linux/tcp.h>
  22#include <linux/mii.h>
  23#include <linux/mm.h>
  24#include <linux/kthread.h>
  25#include <linux/slab.h>
  26#include <linux/if_vlan.h>
  27#include <linux/netdevice.h>
  28#include <linux/netdev_features.h>
  29#include <linux/rcutree.h>
  30#include <linux/skbuff.h>
  31#include <linux/vmalloc.h>
  32
  33#include <net/iucv/af_iucv.h>
  34#include <net/dsfield.h>
 
  35
  36#include <asm/ebcdic.h>
  37#include <asm/chpid.h>
  38#include <asm/io.h>
  39#include <asm/sysinfo.h>
  40#include <asm/diag.h>
  41#include <asm/cio.h>
  42#include <asm/ccwdev.h>
  43#include <asm/cpcmd.h>
  44
  45#include "qeth_core.h"
  46
  47struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
  48	/* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
  49	/*                   N  P  A    M  L  V                      H  */
  50	[QETH_DBF_SETUP] = {"qeth_setup",
  51				8, 1,   8, 5, &debug_hex_ascii_view, NULL},
  52	[QETH_DBF_MSG]	 = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
  53			    &debug_sprintf_view, NULL},
  54	[QETH_DBF_CTRL]  = {"qeth_control",
  55		8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
  56};
  57EXPORT_SYMBOL_GPL(qeth_dbf);
  58
  59struct kmem_cache *qeth_core_header_cache;
  60EXPORT_SYMBOL_GPL(qeth_core_header_cache);
  61static struct kmem_cache *qeth_qdio_outbuf_cache;
 
  62
  63static struct device *qeth_core_root_dev;
  64static struct dentry *qeth_debugfs_root;
  65static struct lock_class_key qdio_out_skb_queue_key;
  66
  67static void qeth_issue_next_read_cb(struct qeth_card *card,
  68				    struct qeth_cmd_buffer *iob,
  69				    unsigned int data_length);
  70static int qeth_qdio_establish(struct qeth_card *);
  71static void qeth_free_qdio_queues(struct qeth_card *card);
  72static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
  73		struct qeth_qdio_out_buffer *buf,
  74		enum iucv_tx_notify notification);
  75static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
  76				 int budget);
  77static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
  78
  79static void qeth_close_dev_handler(struct work_struct *work)
  80{
  81	struct qeth_card *card;
  82
  83	card = container_of(work, struct qeth_card, close_dev_work);
  84	QETH_CARD_TEXT(card, 2, "cldevhdl");
  85	ccwgroup_set_offline(card->gdev);
  86}
  87
  88static const char *qeth_get_cardname(struct qeth_card *card)
  89{
  90	if (IS_VM_NIC(card)) {
  91		switch (card->info.type) {
  92		case QETH_CARD_TYPE_OSD:
  93			return " Virtual NIC QDIO";
  94		case QETH_CARD_TYPE_IQD:
  95			return " Virtual NIC Hiper";
  96		case QETH_CARD_TYPE_OSM:
  97			return " Virtual NIC QDIO - OSM";
  98		case QETH_CARD_TYPE_OSX:
  99			return " Virtual NIC QDIO - OSX";
 100		default:
 101			return " unknown";
 102		}
 103	} else {
 104		switch (card->info.type) {
 105		case QETH_CARD_TYPE_OSD:
 106			return " OSD Express";
 107		case QETH_CARD_TYPE_IQD:
 108			return " HiperSockets";
 109		case QETH_CARD_TYPE_OSN:
 110			return " OSN QDIO";
 111		case QETH_CARD_TYPE_OSM:
 112			return " OSM QDIO";
 113		case QETH_CARD_TYPE_OSX:
 114			return " OSX QDIO";
 115		default:
 116			return " unknown";
 117		}
 118	}
 119	return " n/a";
 120}
 121
 122/* max length to be returned: 14 */
 123const char *qeth_get_cardname_short(struct qeth_card *card)
 124{
 125	if (IS_VM_NIC(card)) {
 126		switch (card->info.type) {
 127		case QETH_CARD_TYPE_OSD:
 128			return "Virt.NIC QDIO";
 129		case QETH_CARD_TYPE_IQD:
 130			return "Virt.NIC Hiper";
 131		case QETH_CARD_TYPE_OSM:
 132			return "Virt.NIC OSM";
 133		case QETH_CARD_TYPE_OSX:
 134			return "Virt.NIC OSX";
 135		default:
 136			return "unknown";
 137		}
 138	} else {
 139		switch (card->info.type) {
 140		case QETH_CARD_TYPE_OSD:
 141			switch (card->info.link_type) {
 142			case QETH_LINK_TYPE_FAST_ETH:
 143				return "OSD_100";
 144			case QETH_LINK_TYPE_HSTR:
 145				return "HSTR";
 146			case QETH_LINK_TYPE_GBIT_ETH:
 147				return "OSD_1000";
 148			case QETH_LINK_TYPE_10GBIT_ETH:
 149				return "OSD_10GIG";
 150			case QETH_LINK_TYPE_25GBIT_ETH:
 151				return "OSD_25GIG";
 152			case QETH_LINK_TYPE_LANE_ETH100:
 153				return "OSD_FE_LANE";
 154			case QETH_LINK_TYPE_LANE_TR:
 155				return "OSD_TR_LANE";
 156			case QETH_LINK_TYPE_LANE_ETH1000:
 157				return "OSD_GbE_LANE";
 158			case QETH_LINK_TYPE_LANE:
 159				return "OSD_ATM_LANE";
 160			default:
 161				return "OSD_Express";
 162			}
 163		case QETH_CARD_TYPE_IQD:
 164			return "HiperSockets";
 165		case QETH_CARD_TYPE_OSN:
 166			return "OSN";
 167		case QETH_CARD_TYPE_OSM:
 168			return "OSM_1000";
 169		case QETH_CARD_TYPE_OSX:
 170			return "OSX_10GIG";
 171		default:
 172			return "unknown";
 173		}
 174	}
 175	return "n/a";
 176}
 177
 178void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
 179			 int clear_start_mask)
 180{
 181	unsigned long flags;
 182
 183	spin_lock_irqsave(&card->thread_mask_lock, flags);
 184	card->thread_allowed_mask = threads;
 185	if (clear_start_mask)
 186		card->thread_start_mask &= threads;
 187	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
 188	wake_up(&card->wait_q);
 189}
 190EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
 191
 192int qeth_threads_running(struct qeth_card *card, unsigned long threads)
 193{
 194	unsigned long flags;
 195	int rc = 0;
 196
 197	spin_lock_irqsave(&card->thread_mask_lock, flags);
 198	rc = (card->thread_running_mask & threads);
 199	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
 200	return rc;
 201}
 202EXPORT_SYMBOL_GPL(qeth_threads_running);
 203
 204void qeth_clear_working_pool_list(struct qeth_card *card)
 205{
 206	struct qeth_buffer_pool_entry *pool_entry, *tmp;
 207	struct qeth_qdio_q *queue = card->qdio.in_q;
 208	unsigned int i;
 209
 210	QETH_CARD_TEXT(card, 5, "clwrklst");
 211	list_for_each_entry_safe(pool_entry, tmp,
 212			    &card->qdio.in_buf_pool.entry_list, list){
 213			list_del(&pool_entry->list);
 214	}
 215
 216	for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
 217		queue->bufs[i].pool_entry = NULL;
 218}
 219EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
 220
 221static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry)
 222{
 223	unsigned int i;
 224
 225	for (i = 0; i < ARRAY_SIZE(entry->elements); i++) {
 226		if (entry->elements[i])
 227			__free_page(entry->elements[i]);
 228	}
 229
 230	kfree(entry);
 231}
 232
 233static void qeth_free_buffer_pool(struct qeth_card *card)
 234{
 235	struct qeth_buffer_pool_entry *entry, *tmp;
 236
 237	list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list,
 238				 init_list) {
 239		list_del(&entry->init_list);
 240		qeth_free_pool_entry(entry);
 241	}
 242}
 243
 244static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages)
 245{
 246	struct qeth_buffer_pool_entry *entry;
 247	unsigned int i;
 248
 249	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 250	if (!entry)
 251		return NULL;
 252
 253	for (i = 0; i < pages; i++) {
 254		entry->elements[i] = __dev_alloc_page(GFP_KERNEL);
 255
 256		if (!entry->elements[i]) {
 257			qeth_free_pool_entry(entry);
 258			return NULL;
 259		}
 260	}
 261
 262	return entry;
 263}
 264
 265static int qeth_alloc_buffer_pool(struct qeth_card *card)
 266{
 267	unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
 268	unsigned int i;
 269
 270	QETH_CARD_TEXT(card, 5, "alocpool");
 271	for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
 272		struct qeth_buffer_pool_entry *entry;
 273
 274		entry = qeth_alloc_pool_entry(buf_elements);
 275		if (!entry) {
 276			qeth_free_buffer_pool(card);
 277			return -ENOMEM;
 278		}
 279
 280		list_add(&entry->init_list, &card->qdio.init_pool.entry_list);
 281	}
 282	return 0;
 283}
 284
 285int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count)
 286{
 287	unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
 288	struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool;
 289	struct qeth_buffer_pool_entry *entry, *tmp;
 290	int delta = count - pool->buf_count;
 291	LIST_HEAD(entries);
 292
 293	QETH_CARD_TEXT(card, 2, "realcbp");
 294
 295	/* Defer until queue is allocated: */
 296	if (!card->qdio.in_q)
 297		goto out;
 298
 299	/* Remove entries from the pool: */
 300	while (delta < 0) {
 301		entry = list_first_entry(&pool->entry_list,
 302					 struct qeth_buffer_pool_entry,
 303					 init_list);
 304		list_del(&entry->init_list);
 305		qeth_free_pool_entry(entry);
 306
 307		delta++;
 308	}
 309
 310	/* Allocate additional entries: */
 311	while (delta > 0) {
 312		entry = qeth_alloc_pool_entry(buf_elements);
 313		if (!entry) {
 314			list_for_each_entry_safe(entry, tmp, &entries,
 315						 init_list) {
 316				list_del(&entry->init_list);
 317				qeth_free_pool_entry(entry);
 318			}
 319
 320			return -ENOMEM;
 321		}
 322
 323		list_add(&entry->init_list, &entries);
 324
 325		delta--;
 326	}
 327
 328	list_splice(&entries, &pool->entry_list);
 329
 330out:
 331	card->qdio.in_buf_pool.buf_count = count;
 332	pool->buf_count = count;
 333	return 0;
 334}
 335EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool);
 336
 337static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
 338{
 339	if (!q)
 340		return;
 341
 342	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
 343	kfree(q);
 344}
 345
 346static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
 347{
 348	struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
 349	int i;
 350
 351	if (!q)
 352		return NULL;
 353
 354	if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
 355		kfree(q);
 356		return NULL;
 357	}
 358
 359	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
 360		q->bufs[i].buffer = q->qdio_bufs[i];
 361
 362	QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
 363	return q;
 364}
 365
 366static int qeth_cq_init(struct qeth_card *card)
 367{
 368	int rc;
 369
 370	if (card->options.cq == QETH_CQ_ENABLED) {
 371		QETH_CARD_TEXT(card, 2, "cqinit");
 372		qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
 373				   QDIO_MAX_BUFFERS_PER_Q);
 374		card->qdio.c_q->next_buf_to_init = 127;
 375		rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
 376			     card->qdio.no_in_queues - 1, 0,
 377			     127);
 378		if (rc) {
 379			QETH_CARD_TEXT_(card, 2, "1err%d", rc);
 380			goto out;
 381		}
 382	}
 383	rc = 0;
 384out:
 385	return rc;
 386}
 387
 388static int qeth_alloc_cq(struct qeth_card *card)
 389{
 390	int rc;
 391
 392	if (card->options.cq == QETH_CQ_ENABLED) {
 393		int i;
 394		struct qdio_outbuf_state *outbuf_states;
 395
 396		QETH_CARD_TEXT(card, 2, "cqon");
 397		card->qdio.c_q = qeth_alloc_qdio_queue();
 398		if (!card->qdio.c_q) {
 399			rc = -1;
 400			goto kmsg_out;
 401		}
 402		card->qdio.no_in_queues = 2;
 403		card->qdio.out_bufstates =
 404			kcalloc(card->qdio.no_out_queues *
 405					QDIO_MAX_BUFFERS_PER_Q,
 406				sizeof(struct qdio_outbuf_state),
 407				GFP_KERNEL);
 408		outbuf_states = card->qdio.out_bufstates;
 409		if (outbuf_states == NULL) {
 410			rc = -1;
 411			goto free_cq_out;
 412		}
 413		for (i = 0; i < card->qdio.no_out_queues; ++i) {
 414			card->qdio.out_qs[i]->bufstates = outbuf_states;
 415			outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
 416		}
 417	} else {
 418		QETH_CARD_TEXT(card, 2, "nocq");
 419		card->qdio.c_q = NULL;
 420		card->qdio.no_in_queues = 1;
 421	}
 422	QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues);
 423	rc = 0;
 424out:
 425	return rc;
 426free_cq_out:
 427	qeth_free_qdio_queue(card->qdio.c_q);
 428	card->qdio.c_q = NULL;
 429kmsg_out:
 430	dev_err(&card->gdev->dev, "Failed to create completion queue\n");
 431	goto out;
 432}
 433
 434static void qeth_free_cq(struct qeth_card *card)
 435{
 436	if (card->qdio.c_q) {
 437		--card->qdio.no_in_queues;
 438		qeth_free_qdio_queue(card->qdio.c_q);
 439		card->qdio.c_q = NULL;
 440	}
 441	kfree(card->qdio.out_bufstates);
 442	card->qdio.out_bufstates = NULL;
 443}
 444
 445static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
 446							int delayed)
 447{
 448	enum iucv_tx_notify n;
 449
 450	switch (sbalf15) {
 451	case 0:
 452		n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
 453		break;
 454	case 4:
 455	case 16:
 456	case 17:
 457	case 18:
 458		n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
 459			TX_NOTIFY_UNREACHABLE;
 460		break;
 461	default:
 462		n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
 463			TX_NOTIFY_GENERALERROR;
 464		break;
 465	}
 466
 467	return n;
 468}
 469
 470static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
 471					 int forced_cleanup)
 472{
 473	if (q->card->options.cq != QETH_CQ_ENABLED)
 474		return;
 475
 476	if (q->bufs[bidx]->next_pending != NULL) {
 477		struct qeth_qdio_out_buffer *head = q->bufs[bidx];
 478		struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
 479
 480		while (c) {
 481			if (forced_cleanup ||
 482			    atomic_read(&c->state) ==
 483			      QETH_QDIO_BUF_HANDLED_DELAYED) {
 484				struct qeth_qdio_out_buffer *f = c;
 485				QETH_CARD_TEXT(f->q->card, 5, "fp");
 486				QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f);
 487				/* release here to avoid interleaving between
 488				   outbound tasklet and inbound tasklet
 489				   regarding notifications and lifecycle */
 490				qeth_tx_complete_buf(c, forced_cleanup, 0);
 491
 492				c = f->next_pending;
 493				WARN_ON_ONCE(head->next_pending != f);
 494				head->next_pending = c;
 495				kmem_cache_free(qeth_qdio_outbuf_cache, f);
 496			} else {
 497				head = c;
 498				c = c->next_pending;
 499			}
 500
 501		}
 502	}
 503	if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
 504					QETH_QDIO_BUF_HANDLED_DELAYED)) {
 505		/* for recovery situations */
 506		qeth_init_qdio_out_buf(q, bidx);
 507		QETH_CARD_TEXT(q->card, 2, "clprecov");
 508	}
 509}
 510
 511
 512static void qeth_qdio_handle_aob(struct qeth_card *card,
 513				 unsigned long phys_aob_addr)
 514{
 515	struct qaob *aob;
 516	struct qeth_qdio_out_buffer *buffer;
 517	enum iucv_tx_notify notification;
 518	unsigned int i;
 519
 520	aob = (struct qaob *) phys_to_virt(phys_aob_addr);
 521	QETH_CARD_TEXT(card, 5, "haob");
 522	QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr);
 523	buffer = (struct qeth_qdio_out_buffer *) aob->user1;
 524	QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
 525
 526	if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
 527			   QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) {
 528		notification = TX_NOTIFY_OK;
 529	} else {
 530		WARN_ON_ONCE(atomic_read(&buffer->state) !=
 531							QETH_QDIO_BUF_PENDING);
 532		atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
 533		notification = TX_NOTIFY_DELAYED_OK;
 534	}
 535
 536	if (aob->aorc != 0)  {
 537		QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
 538		notification = qeth_compute_cq_notification(aob->aorc, 1);
 539	}
 540	qeth_notify_skbs(buffer->q, buffer, notification);
 541
 542	/* Free dangling allocations. The attached skbs are handled by
 543	 * qeth_cleanup_handled_pending().
 544	 */
 545	for (i = 0;
 546	     i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
 547	     i++) {
 548		void *data = phys_to_virt(aob->sba[i]);
 549
 550		if (data && buffer->is_header[i])
 551			kmem_cache_free(qeth_core_header_cache, data);
 552	}
 553	atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
 554
 555	qdio_release_aob(aob);
 556}
 557
 558static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
 559			   void *data)
 560{
 561	ccw->cmd_code = cmd_code;
 562	ccw->flags = flags | CCW_FLAG_SLI;
 563	ccw->count = len;
 564	ccw->cda = (__u32) __pa(data);
 565}
 566
 567static int __qeth_issue_next_read(struct qeth_card *card)
 568{
 569	struct qeth_cmd_buffer *iob = card->read_cmd;
 570	struct qeth_channel *channel = iob->channel;
 571	struct ccw1 *ccw = __ccw_from_cmd(iob);
 572	int rc;
 573
 574	QETH_CARD_TEXT(card, 5, "issnxrd");
 575	if (channel->state != CH_STATE_UP)
 576		return -EIO;
 577
 578	memset(iob->data, 0, iob->length);
 579	qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data);
 580	iob->callback = qeth_issue_next_read_cb;
 581	/* keep the cmd alive after completion: */
 582	qeth_get_cmd(iob);
 583
 584	QETH_CARD_TEXT(card, 6, "noirqpnd");
 585	rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
 586	if (!rc) {
 587		channel->active_cmd = iob;
 588	} else {
 589		QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
 590				 rc, CARD_DEVID(card));
 591		qeth_unlock_channel(card, channel);
 592		qeth_put_cmd(iob);
 593		card->read_or_write_problem = 1;
 594		qeth_schedule_recovery(card);
 595	}
 596	return rc;
 597}
 598
 599static int qeth_issue_next_read(struct qeth_card *card)
 600{
 601	int ret;
 602
 603	spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
 604	ret = __qeth_issue_next_read(card);
 605	spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
 606
 607	return ret;
 608}
 609
 610static void qeth_enqueue_cmd(struct qeth_card *card,
 611			     struct qeth_cmd_buffer *iob)
 612{
 613	spin_lock_irq(&card->lock);
 614	list_add_tail(&iob->list, &card->cmd_waiter_list);
 615	spin_unlock_irq(&card->lock);
 616}
 617
 618static void qeth_dequeue_cmd(struct qeth_card *card,
 619			     struct qeth_cmd_buffer *iob)
 620{
 621	spin_lock_irq(&card->lock);
 622	list_del(&iob->list);
 623	spin_unlock_irq(&card->lock);
 624}
 625
 626void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
 627{
 628	iob->rc = reason;
 629	complete(&iob->done);
 630}
 631EXPORT_SYMBOL_GPL(qeth_notify_cmd);
 632
 633static void qeth_flush_local_addrs4(struct qeth_card *card)
 634{
 635	struct qeth_local_addr *addr;
 636	struct hlist_node *tmp;
 637	unsigned int i;
 638
 639	spin_lock_irq(&card->local_addrs4_lock);
 640	hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) {
 641		hash_del_rcu(&addr->hnode);
 642		kfree_rcu(addr, rcu);
 643	}
 644	spin_unlock_irq(&card->local_addrs4_lock);
 645}
 646
 647static void qeth_flush_local_addrs6(struct qeth_card *card)
 648{
 649	struct qeth_local_addr *addr;
 650	struct hlist_node *tmp;
 651	unsigned int i;
 652
 653	spin_lock_irq(&card->local_addrs6_lock);
 654	hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) {
 655		hash_del_rcu(&addr->hnode);
 656		kfree_rcu(addr, rcu);
 657	}
 658	spin_unlock_irq(&card->local_addrs6_lock);
 659}
 660
 661void qeth_flush_local_addrs(struct qeth_card *card)
 662{
 663	qeth_flush_local_addrs4(card);
 664	qeth_flush_local_addrs6(card);
 665}
 666EXPORT_SYMBOL_GPL(qeth_flush_local_addrs);
 667
 668static void qeth_add_local_addrs4(struct qeth_card *card,
 669				  struct qeth_ipacmd_local_addrs4 *cmd)
 670{
 671	unsigned int i;
 672
 673	if (cmd->addr_length !=
 674	    sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
 675		dev_err_ratelimited(&card->gdev->dev,
 676				    "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n",
 677				    cmd->addr_length);
 678		return;
 679	}
 680
 681	spin_lock(&card->local_addrs4_lock);
 682	for (i = 0; i < cmd->count; i++) {
 683		unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr);
 684		struct qeth_local_addr *addr;
 685		bool duplicate = false;
 686
 687		hash_for_each_possible(card->local_addrs4, addr, hnode, key) {
 688			if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) {
 689				duplicate = true;
 690				break;
 691			}
 692		}
 693
 694		if (duplicate)
 695			continue;
 696
 697		addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
 698		if (!addr) {
 699			dev_err(&card->gdev->dev,
 700				"Failed to allocate local addr object. Traffic to %pI4 might suffer.\n",
 701				&cmd->addrs[i].addr);
 702			continue;
 703		}
 704
 705		ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr);
 706		hash_add_rcu(card->local_addrs4, &addr->hnode, key);
 707	}
 708	spin_unlock(&card->local_addrs4_lock);
 709}
 710
 711static void qeth_add_local_addrs6(struct qeth_card *card,
 712				  struct qeth_ipacmd_local_addrs6 *cmd)
 713{
 714	unsigned int i;
 715
 716	if (cmd->addr_length !=
 717	    sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
 718		dev_err_ratelimited(&card->gdev->dev,
 719				    "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n",
 720				    cmd->addr_length);
 721		return;
 722	}
 723
 724	spin_lock(&card->local_addrs6_lock);
 725	for (i = 0; i < cmd->count; i++) {
 726		u32 key = ipv6_addr_hash(&cmd->addrs[i].addr);
 727		struct qeth_local_addr *addr;
 728		bool duplicate = false;
 729
 730		hash_for_each_possible(card->local_addrs6, addr, hnode, key) {
 731			if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) {
 732				duplicate = true;
 733				break;
 734			}
 735		}
 736
 737		if (duplicate)
 738			continue;
 739
 740		addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
 741		if (!addr) {
 742			dev_err(&card->gdev->dev,
 743				"Failed to allocate local addr object. Traffic to %pI6c might suffer.\n",
 744				&cmd->addrs[i].addr);
 745			continue;
 746		}
 747
 748		addr->addr = cmd->addrs[i].addr;
 749		hash_add_rcu(card->local_addrs6, &addr->hnode, key);
 750	}
 751	spin_unlock(&card->local_addrs6_lock);
 752}
 753
 754static void qeth_del_local_addrs4(struct qeth_card *card,
 755				  struct qeth_ipacmd_local_addrs4 *cmd)
 756{
 757	unsigned int i;
 758
 759	if (cmd->addr_length !=
 760	    sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
 761		dev_err_ratelimited(&card->gdev->dev,
 762				    "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n",
 763				    cmd->addr_length);
 764		return;
 765	}
 766
 767	spin_lock(&card->local_addrs4_lock);
 768	for (i = 0; i < cmd->count; i++) {
 769		struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i];
 770		unsigned int key = ipv4_addr_hash(addr->addr);
 771		struct qeth_local_addr *tmp;
 772
 773		hash_for_each_possible(card->local_addrs4, tmp, hnode, key) {
 774			if (tmp->addr.s6_addr32[3] == addr->addr) {
 775				hash_del_rcu(&tmp->hnode);
 776				kfree_rcu(tmp, rcu);
 777				break;
 778			}
 779		}
 780	}
 781	spin_unlock(&card->local_addrs4_lock);
 782}
 783
 784static void qeth_del_local_addrs6(struct qeth_card *card,
 785				  struct qeth_ipacmd_local_addrs6 *cmd)
 786{
 787	unsigned int i;
 788
 789	if (cmd->addr_length !=
 790	    sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
 791		dev_err_ratelimited(&card->gdev->dev,
 792				    "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n",
 793				    cmd->addr_length);
 794		return;
 795	}
 796
 797	spin_lock(&card->local_addrs6_lock);
 798	for (i = 0; i < cmd->count; i++) {
 799		struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i];
 800		u32 key = ipv6_addr_hash(&addr->addr);
 801		struct qeth_local_addr *tmp;
 802
 803		hash_for_each_possible(card->local_addrs6, tmp, hnode, key) {
 804			if (ipv6_addr_equal(&tmp->addr, &addr->addr)) {
 805				hash_del_rcu(&tmp->hnode);
 806				kfree_rcu(tmp, rcu);
 807				break;
 808			}
 809		}
 810	}
 811	spin_unlock(&card->local_addrs6_lock);
 812}
 813
 814static bool qeth_next_hop_is_local_v4(struct qeth_card *card,
 815				      struct sk_buff *skb)
 816{
 817	struct qeth_local_addr *tmp;
 818	bool is_local = false;
 819	unsigned int key;
 820	__be32 next_hop;
 821
 822	if (hash_empty(card->local_addrs4))
 823		return false;
 824
 825	rcu_read_lock();
 826	next_hop = qeth_next_hop_v4_rcu(skb, qeth_dst_check_rcu(skb, 4));
 
 827	key = ipv4_addr_hash(next_hop);
 828
 829	hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) {
 830		if (tmp->addr.s6_addr32[3] == next_hop) {
 831			is_local = true;
 832			break;
 833		}
 834	}
 835	rcu_read_unlock();
 836
 837	return is_local;
 838}
 839
 840static bool qeth_next_hop_is_local_v6(struct qeth_card *card,
 841				      struct sk_buff *skb)
 842{
 843	struct qeth_local_addr *tmp;
 844	struct in6_addr *next_hop;
 845	bool is_local = false;
 846	u32 key;
 847
 848	if (hash_empty(card->local_addrs6))
 849		return false;
 850
 851	rcu_read_lock();
 852	next_hop = qeth_next_hop_v6_rcu(skb, qeth_dst_check_rcu(skb, 6));
 
 853	key = ipv6_addr_hash(next_hop);
 854
 855	hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) {
 856		if (ipv6_addr_equal(&tmp->addr, next_hop)) {
 857			is_local = true;
 858			break;
 859		}
 860	}
 861	rcu_read_unlock();
 862
 863	return is_local;
 864}
 865
 866static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v)
 867{
 868	struct qeth_card *card = m->private;
 869	struct qeth_local_addr *tmp;
 870	unsigned int i;
 871
 872	rcu_read_lock();
 873	hash_for_each_rcu(card->local_addrs4, i, tmp, hnode)
 874		seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]);
 875	hash_for_each_rcu(card->local_addrs6, i, tmp, hnode)
 876		seq_printf(m, "%pI6c\n", &tmp->addr);
 877	rcu_read_unlock();
 878
 879	return 0;
 880}
 881
 882DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr);
 883
 884static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
 885		struct qeth_card *card)
 886{
 887	const char *ipa_name;
 888	int com = cmd->hdr.command;
 
 889	ipa_name = qeth_get_ipa_cmd_name(com);
 890
 891	if (rc)
 892		QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
 893				 ipa_name, com, CARD_DEVID(card), rc,
 894				 qeth_get_ipa_msg(rc));
 895	else
 896		QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
 897				 ipa_name, com, CARD_DEVID(card));
 898}
 899
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 900static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
 901						struct qeth_ipa_cmd *cmd)
 902{
 903	QETH_CARD_TEXT(card, 5, "chkipad");
 904
 905	if (IS_IPA_REPLY(cmd)) {
 906		if (cmd->hdr.command != IPA_CMD_SETCCID &&
 907		    cmd->hdr.command != IPA_CMD_DELCCID &&
 908		    cmd->hdr.command != IPA_CMD_MODCCID &&
 909		    cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
 910			qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
 911		return cmd;
 912	}
 913
 914	/* handle unsolicited event: */
 915	switch (cmd->hdr.command) {
 916	case IPA_CMD_STOPLAN:
 917		if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
 918			dev_err(&card->gdev->dev,
 919				"Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
 920				QETH_CARD_IFNAME(card));
 921			schedule_work(&card->close_dev_work);
 
 922		} else {
 
 923			dev_warn(&card->gdev->dev,
 924				 "The link for interface %s on CHPID 0x%X failed\n",
 925				 QETH_CARD_IFNAME(card), card->info.chpid);
 926			qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
 927			netif_carrier_off(card->dev);
 
 928		}
 929		return NULL;
 930	case IPA_CMD_STARTLAN:
 931		dev_info(&card->gdev->dev,
 932			 "The link for %s on CHPID 0x%X has been restored\n",
 933			 QETH_CARD_IFNAME(card), card->info.chpid);
 934		if (card->info.hwtrap)
 935			card->info.hwtrap = 2;
 936		qeth_schedule_recovery(card);
 937		return NULL;
 938	case IPA_CMD_SETBRIDGEPORT_IQD:
 939	case IPA_CMD_SETBRIDGEPORT_OSA:
 940	case IPA_CMD_ADDRESS_CHANGE_NOTIF:
 941		if (card->discipline->control_event_handler(card, cmd))
 942			return cmd;
 943		return NULL;
 944	case IPA_CMD_MODCCID:
 945		return cmd;
 946	case IPA_CMD_REGISTER_LOCAL_ADDR:
 947		if (cmd->hdr.prot_version == QETH_PROT_IPV4)
 948			qeth_add_local_addrs4(card, &cmd->data.local_addrs4);
 949		else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
 950			qeth_add_local_addrs6(card, &cmd->data.local_addrs6);
 951
 952		QETH_CARD_TEXT(card, 3, "irla");
 953		return NULL;
 954	case IPA_CMD_UNREGISTER_LOCAL_ADDR:
 955		if (cmd->hdr.prot_version == QETH_PROT_IPV4)
 956			qeth_del_local_addrs4(card, &cmd->data.local_addrs4);
 957		else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
 958			qeth_del_local_addrs6(card, &cmd->data.local_addrs6);
 959
 960		QETH_CARD_TEXT(card, 3, "urla");
 961		return NULL;
 962	default:
 963		QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
 964		return cmd;
 965	}
 966}
 967
 968void qeth_clear_ipacmd_list(struct qeth_card *card)
 969{
 970	struct qeth_cmd_buffer *iob;
 971	unsigned long flags;
 972
 973	QETH_CARD_TEXT(card, 4, "clipalst");
 974
 975	spin_lock_irqsave(&card->lock, flags);
 976	list_for_each_entry(iob, &card->cmd_waiter_list, list)
 977		qeth_notify_cmd(iob, -ECANCELED);
 978	spin_unlock_irqrestore(&card->lock, flags);
 979}
 980EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
 981
 982static int qeth_check_idx_response(struct qeth_card *card,
 983	unsigned char *buffer)
 984{
 985	QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
 986	if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) {
 987		QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
 988				 buffer[4]);
 989		QETH_CARD_TEXT(card, 2, "ckidxres");
 990		QETH_CARD_TEXT(card, 2, " idxterm");
 991		QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]);
 992		if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT ||
 993		    buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) {
 994			dev_err(&card->gdev->dev,
 995				"The device does not support the configured transport mode\n");
 996			return -EPROTONOSUPPORT;
 997		}
 998		return -EIO;
 999	}
1000	return 0;
1001}
1002
1003void qeth_put_cmd(struct qeth_cmd_buffer *iob)
1004{
1005	if (refcount_dec_and_test(&iob->ref_count)) {
1006		kfree(iob->data);
1007		kfree(iob);
1008	}
1009}
1010EXPORT_SYMBOL_GPL(qeth_put_cmd);
1011
1012static void qeth_release_buffer_cb(struct qeth_card *card,
1013				   struct qeth_cmd_buffer *iob,
1014				   unsigned int data_length)
1015{
1016	qeth_put_cmd(iob);
1017}
1018
1019static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
1020{
1021	qeth_notify_cmd(iob, rc);
1022	qeth_put_cmd(iob);
1023}
1024
1025struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
1026				       unsigned int length, unsigned int ccws,
1027				       long timeout)
1028{
1029	struct qeth_cmd_buffer *iob;
1030
1031	if (length > QETH_BUFSIZE)
1032		return NULL;
1033
1034	iob = kzalloc(sizeof(*iob), GFP_KERNEL);
1035	if (!iob)
1036		return NULL;
1037
1038	iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1),
1039			    GFP_KERNEL | GFP_DMA);
1040	if (!iob->data) {
1041		kfree(iob);
1042		return NULL;
1043	}
1044
1045	init_completion(&iob->done);
1046	spin_lock_init(&iob->lock);
1047	INIT_LIST_HEAD(&iob->list);
1048	refcount_set(&iob->ref_count, 1);
1049	iob->channel = channel;
1050	iob->timeout = timeout;
1051	iob->length = length;
1052	return iob;
1053}
1054EXPORT_SYMBOL_GPL(qeth_alloc_cmd);
1055
1056static void qeth_issue_next_read_cb(struct qeth_card *card,
1057				    struct qeth_cmd_buffer *iob,
1058				    unsigned int data_length)
1059{
1060	struct qeth_cmd_buffer *request = NULL;
1061	struct qeth_ipa_cmd *cmd = NULL;
1062	struct qeth_reply *reply = NULL;
1063	struct qeth_cmd_buffer *tmp;
1064	unsigned long flags;
1065	int rc = 0;
1066
1067	QETH_CARD_TEXT(card, 4, "sndctlcb");
1068	rc = qeth_check_idx_response(card, iob->data);
1069	switch (rc) {
1070	case 0:
1071		break;
1072	case -EIO:
1073		qeth_schedule_recovery(card);
1074		fallthrough;
1075	default:
1076		qeth_clear_ipacmd_list(card);
1077		goto err_idx;
1078	}
1079
1080	cmd = __ipa_reply(iob);
1081	if (cmd) {
1082		cmd = qeth_check_ipa_data(card, cmd);
1083		if (!cmd)
1084			goto out;
1085		if (IS_OSN(card) && card->osn_info.assist_cb &&
1086		    cmd->hdr.command != IPA_CMD_STARTLAN) {
1087			card->osn_info.assist_cb(card->dev, cmd);
1088			goto out;
1089		}
1090	}
1091
1092	/* match against pending cmd requests */
1093	spin_lock_irqsave(&card->lock, flags);
1094	list_for_each_entry(tmp, &card->cmd_waiter_list, list) {
1095		if (tmp->match && tmp->match(tmp, iob)) {
1096			request = tmp;
1097			/* take the object outside the lock */
1098			qeth_get_cmd(request);
1099			break;
1100		}
1101	}
1102	spin_unlock_irqrestore(&card->lock, flags);
1103
1104	if (!request)
1105		goto out;
1106
1107	reply = &request->reply;
1108	if (!reply->callback) {
1109		rc = 0;
1110		goto no_callback;
1111	}
1112
1113	spin_lock_irqsave(&request->lock, flags);
1114	if (request->rc)
1115		/* Bail out when the requestor has already left: */
1116		rc = request->rc;
1117	else
1118		rc = reply->callback(card, reply, cmd ? (unsigned long)cmd :
1119							(unsigned long)iob);
1120	spin_unlock_irqrestore(&request->lock, flags);
1121
1122no_callback:
1123	if (rc <= 0)
1124		qeth_notify_cmd(request, rc);
1125	qeth_put_cmd(request);
1126out:
1127	memcpy(&card->seqno.pdu_hdr_ack,
1128		QETH_PDU_HEADER_SEQ_NO(iob->data),
1129		QETH_SEQ_NO_LENGTH);
1130	__qeth_issue_next_read(card);
1131err_idx:
1132	qeth_put_cmd(iob);
1133}
1134
1135static int qeth_set_thread_start_bit(struct qeth_card *card,
1136		unsigned long thread)
1137{
1138	unsigned long flags;
1139	int rc = 0;
1140
1141	spin_lock_irqsave(&card->thread_mask_lock, flags);
1142	if (!(card->thread_allowed_mask & thread))
1143		rc = -EPERM;
1144	else if (card->thread_start_mask & thread)
1145		rc = -EBUSY;
1146	else
1147		card->thread_start_mask |= thread;
1148	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1149
1150	return rc;
1151}
1152
1153static void qeth_clear_thread_start_bit(struct qeth_card *card,
1154					unsigned long thread)
1155{
1156	unsigned long flags;
1157
1158	spin_lock_irqsave(&card->thread_mask_lock, flags);
1159	card->thread_start_mask &= ~thread;
1160	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1161	wake_up(&card->wait_q);
1162}
1163
1164static void qeth_clear_thread_running_bit(struct qeth_card *card,
1165					  unsigned long thread)
1166{
1167	unsigned long flags;
1168
1169	spin_lock_irqsave(&card->thread_mask_lock, flags);
1170	card->thread_running_mask &= ~thread;
1171	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1172	wake_up_all(&card->wait_q);
1173}
1174
1175static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
1176{
1177	unsigned long flags;
1178	int rc = 0;
1179
1180	spin_lock_irqsave(&card->thread_mask_lock, flags);
1181	if (card->thread_start_mask & thread) {
1182		if ((card->thread_allowed_mask & thread) &&
1183		    !(card->thread_running_mask & thread)) {
1184			rc = 1;
1185			card->thread_start_mask &= ~thread;
1186			card->thread_running_mask |= thread;
1187		} else
1188			rc = -EPERM;
1189	}
1190	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1191	return rc;
1192}
1193
1194static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
1195{
1196	int rc = 0;
1197
1198	wait_event(card->wait_q,
1199		   (rc = __qeth_do_run_thread(card, thread)) >= 0);
1200	return rc;
1201}
1202
1203int qeth_schedule_recovery(struct qeth_card *card)
1204{
1205	int rc;
1206
1207	QETH_CARD_TEXT(card, 2, "startrec");
1208
1209	rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD);
1210	if (!rc)
1211		schedule_work(&card->kernel_thread_starter);
1212
1213	return rc;
1214}
1215
1216static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
1217			    struct irb *irb)
1218{
1219	int dstat, cstat;
1220	char *sense;
1221
1222	sense = (char *) irb->ecw;
1223	cstat = irb->scsw.cmd.cstat;
1224	dstat = irb->scsw.cmd.dstat;
1225
1226	if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
1227		     SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
1228		     SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
1229		QETH_CARD_TEXT(card, 2, "CGENCHK");
1230		dev_warn(&cdev->dev, "The qeth device driver "
1231			"failed to recover an error on the device\n");
1232		QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
1233				 CCW_DEVID(cdev), dstat, cstat);
1234		print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
1235				16, 1, irb, 64, 1);
1236		return -EIO;
1237	}
1238
1239	if (dstat & DEV_STAT_UNIT_CHECK) {
1240		if (sense[SENSE_RESETTING_EVENT_BYTE] &
1241		    SENSE_RESETTING_EVENT_FLAG) {
1242			QETH_CARD_TEXT(card, 2, "REVIND");
1243			return -EIO;
1244		}
1245		if (sense[SENSE_COMMAND_REJECT_BYTE] &
1246		    SENSE_COMMAND_REJECT_FLAG) {
1247			QETH_CARD_TEXT(card, 2, "CMDREJi");
1248			return -EIO;
1249		}
1250		if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
1251			QETH_CARD_TEXT(card, 2, "AFFE");
1252			return -EIO;
1253		}
1254		if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
1255			QETH_CARD_TEXT(card, 2, "ZEROSEN");
1256			return 0;
1257		}
1258		QETH_CARD_TEXT(card, 2, "DGENCHK");
1259			return -EIO;
1260	}
1261	return 0;
1262}
1263
1264static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
1265				struct irb *irb)
1266{
1267	if (!IS_ERR(irb))
1268		return 0;
1269
1270	switch (PTR_ERR(irb)) {
1271	case -EIO:
1272		QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
1273				 CCW_DEVID(cdev));
1274		QETH_CARD_TEXT(card, 2, "ckirberr");
1275		QETH_CARD_TEXT_(card, 2, "  rc%d", -EIO);
1276		return -EIO;
1277	case -ETIMEDOUT:
1278		dev_warn(&cdev->dev, "A hardware operation timed out"
1279			" on the device\n");
1280		QETH_CARD_TEXT(card, 2, "ckirberr");
1281		QETH_CARD_TEXT_(card, 2, "  rc%d", -ETIMEDOUT);
1282		return -ETIMEDOUT;
1283	default:
1284		QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
1285				 PTR_ERR(irb), CCW_DEVID(cdev));
1286		QETH_CARD_TEXT(card, 2, "ckirberr");
1287		QETH_CARD_TEXT(card, 2, "  rc???");
1288		return PTR_ERR(irb);
1289	}
1290}
1291
1292static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1293		struct irb *irb)
1294{
1295	int rc;
1296	int cstat, dstat;
1297	struct qeth_cmd_buffer *iob = NULL;
1298	struct ccwgroup_device *gdev;
1299	struct qeth_channel *channel;
1300	struct qeth_card *card;
1301
1302	/* while we hold the ccwdev lock, this stays valid: */
1303	gdev = dev_get_drvdata(&cdev->dev);
1304	card = dev_get_drvdata(&gdev->dev);
1305
1306	QETH_CARD_TEXT(card, 5, "irq");
1307
1308	if (card->read.ccwdev == cdev) {
1309		channel = &card->read;
1310		QETH_CARD_TEXT(card, 5, "read");
1311	} else if (card->write.ccwdev == cdev) {
1312		channel = &card->write;
1313		QETH_CARD_TEXT(card, 5, "write");
1314	} else {
1315		channel = &card->data;
1316		QETH_CARD_TEXT(card, 5, "data");
1317	}
1318
1319	if (intparm == 0) {
1320		QETH_CARD_TEXT(card, 5, "irqunsol");
1321	} else if ((addr_t)intparm != (addr_t)channel->active_cmd) {
1322		QETH_CARD_TEXT(card, 5, "irqunexp");
1323
1324		dev_err(&cdev->dev,
1325			"Received IRQ with intparm %lx, expected %px\n",
1326			intparm, channel->active_cmd);
1327		if (channel->active_cmd)
1328			qeth_cancel_cmd(channel->active_cmd, -EIO);
1329	} else {
1330		iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
1331	}
1332
1333	channel->active_cmd = NULL;
1334	qeth_unlock_channel(card, channel);
1335
1336	rc = qeth_check_irb_error(card, cdev, irb);
1337	if (rc) {
1338		/* IO was terminated, free its resources. */
1339		if (iob)
1340			qeth_cancel_cmd(iob, rc);
1341		return;
1342	}
1343
1344	if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
1345		channel->state = CH_STATE_STOPPED;
1346		wake_up(&card->wait_q);
1347	}
1348
1349	if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
1350		channel->state = CH_STATE_HALTED;
1351		wake_up(&card->wait_q);
1352	}
1353
1354	if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
1355					  SCSW_FCTL_HALT_FUNC))) {
1356		qeth_cancel_cmd(iob, -ECANCELED);
1357		iob = NULL;
1358	}
1359
1360	cstat = irb->scsw.cmd.cstat;
1361	dstat = irb->scsw.cmd.dstat;
1362
1363	if ((dstat & DEV_STAT_UNIT_EXCEP) ||
1364	    (dstat & DEV_STAT_UNIT_CHECK) ||
1365	    (cstat)) {
1366		if (irb->esw.esw0.erw.cons) {
1367			dev_warn(&channel->ccwdev->dev,
1368				"The qeth device driver failed to recover "
1369				"an error on the device\n");
1370			QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
1371					 CCW_DEVID(channel->ccwdev), cstat,
1372					 dstat);
1373			print_hex_dump(KERN_WARNING, "qeth: irb ",
1374				DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
1375			print_hex_dump(KERN_WARNING, "qeth: sense data ",
1376				DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
1377		}
1378
1379		rc = qeth_get_problem(card, cdev, irb);
1380		if (rc) {
1381			card->read_or_write_problem = 1;
1382			if (iob)
1383				qeth_cancel_cmd(iob, rc);
1384			qeth_clear_ipacmd_list(card);
1385			qeth_schedule_recovery(card);
1386			return;
1387		}
1388	}
1389
1390	if (iob) {
1391		/* sanity check: */
1392		if (irb->scsw.cmd.count > iob->length) {
1393			qeth_cancel_cmd(iob, -EIO);
1394			return;
1395		}
1396		if (iob->callback)
1397			iob->callback(card, iob,
1398				      iob->length - irb->scsw.cmd.count);
1399	}
1400}
1401
1402static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
1403		struct qeth_qdio_out_buffer *buf,
1404		enum iucv_tx_notify notification)
1405{
1406	struct sk_buff *skb;
1407
1408	skb_queue_walk(&buf->skb_list, skb) {
 
 
1409		QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
1410		QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1411		if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk)
1412			iucv_sk(skb->sk)->sk_txnotify(skb, notification);
1413	}
1414}
1415
1416static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
 
1417				 int budget)
1418{
1419	struct qeth_qdio_out_q *queue = buf->q;
1420	struct sk_buff *skb;
1421
1422	/* release may never happen from within CQ tasklet scope */
1423	WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
1424
1425	if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
1426		qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR);
1427
1428	/* Empty buffer? */
1429	if (buf->next_element_to_fill == 0)
1430		return;
1431
1432	QETH_TXQ_STAT_INC(queue, bufs);
1433	QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
1434	if (error) {
1435		QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames);
1436	} else {
1437		QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames);
1438		QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes);
1439	}
1440
1441	while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
1442		unsigned int bytes = qdisc_pkt_len(skb);
1443		bool is_tso = skb_is_gso(skb);
1444		unsigned int packets;
1445
1446		packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
1447		if (!error) {
1448			if (skb->ip_summed == CHECKSUM_PARTIAL)
1449				QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
1450			if (skb_is_nonlinear(skb))
1451				QETH_TXQ_STAT_INC(queue, skbs_sg);
1452			if (is_tso) {
1453				QETH_TXQ_STAT_INC(queue, skbs_tso);
1454				QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes);
1455			}
1456		}
1457
1458		napi_consume_skb(skb, budget);
1459	}
1460}
1461
1462static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1463				     struct qeth_qdio_out_buffer *buf,
1464				     bool error, int budget)
1465{
1466	int i;
1467
1468	/* is PCI flag set on buffer? */
1469	if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
1470		atomic_dec(&queue->set_pci_flags_count);
 
 
1471
1472	qeth_tx_complete_buf(buf, error, budget);
1473
1474	for (i = 0; i < queue->max_elements; ++i) {
1475		void *data = phys_to_virt(buf->buffer->element[i].addr);
1476
1477		if (data && buf->is_header[i])
1478			kmem_cache_free(qeth_core_header_cache, data);
1479		buf->is_header[i] = 0;
1480	}
1481
1482	qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
1483	buf->next_element_to_fill = 0;
1484	buf->frames = 0;
1485	buf->bytes = 0;
1486	atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
1487}
1488
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1489static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
1490{
1491	int j;
1492
 
 
1493	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
1494		if (!q->bufs[j])
1495			continue;
1496		qeth_cleanup_handled_pending(q, j, 1);
1497		qeth_clear_output_buffer(q, q->bufs[j], true, 0);
1498		if (free) {
1499			kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
1500			q->bufs[j] = NULL;
1501		}
1502	}
1503}
1504
1505void qeth_drain_output_queues(struct qeth_card *card)
1506{
1507	int i;
1508
1509	QETH_CARD_TEXT(card, 2, "clearqdbf");
1510	/* clear outbound buffers to free skbs */
1511	for (i = 0; i < card->qdio.no_out_queues; ++i) {
1512		if (card->qdio.out_qs[i])
1513			qeth_drain_output_queue(card->qdio.out_qs[i], false);
1514	}
1515}
1516EXPORT_SYMBOL_GPL(qeth_drain_output_queues);
1517
1518static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
1519{
1520	unsigned int max = single ? 1 : card->dev->num_tx_queues;
1521	unsigned int count;
1522	int rc;
1523
1524	count = IS_VM_NIC(card) ? min(max, card->dev->real_num_tx_queues) : max;
1525
1526	rtnl_lock();
1527	rc = netif_set_real_num_tx_queues(card->dev, count);
1528	rtnl_unlock();
1529
1530	if (rc)
1531		return rc;
1532
1533	if (card->qdio.no_out_queues == max)
1534		return 0;
1535
1536	if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
1537		qeth_free_qdio_queues(card);
1538
1539	if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT)
1540		dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
1541
1542	card->qdio.no_out_queues = max;
1543	return 0;
1544}
1545
1546static int qeth_update_from_chp_desc(struct qeth_card *card)
1547{
1548	struct ccw_device *ccwdev;
1549	struct channel_path_desc_fmt0 *chp_dsc;
1550	int rc = 0;
1551
1552	QETH_CARD_TEXT(card, 2, "chp_desc");
1553
1554	ccwdev = card->data.ccwdev;
1555	chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
1556	if (!chp_dsc)
1557		return -ENOMEM;
1558
1559	card->info.func_level = 0x4100 + chp_dsc->desc;
1560
1561	if (IS_OSD(card) || IS_OSX(card))
1562		/* CHPP field bit 6 == 1 -> single queue */
1563		rc = qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
1564
1565	kfree(chp_dsc);
1566	QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
1567	QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
1568	return rc;
1569}
1570
1571static void qeth_init_qdio_info(struct qeth_card *card)
1572{
1573	QETH_CARD_TEXT(card, 4, "intqdinf");
1574	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1575	card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1576	card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1577
1578	/* inbound */
1579	card->qdio.no_in_queues = 1;
1580	card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1581	if (IS_IQD(card))
1582		card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
1583	else
1584		card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
1585	card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
1586	INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
1587	INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
1588}
1589
1590static void qeth_set_initial_options(struct qeth_card *card)
1591{
1592	card->options.route4.type = NO_ROUTER;
1593	card->options.route6.type = NO_ROUTER;
1594	card->options.isolation = ISOLATION_MODE_NONE;
1595	card->options.cq = QETH_CQ_DISABLED;
1596	card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
1597}
1598
1599static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1600{
1601	unsigned long flags;
1602	int rc = 0;
1603
1604	spin_lock_irqsave(&card->thread_mask_lock, flags);
1605	QETH_CARD_TEXT_(card, 4, "  %02x%02x%02x",
1606			(u8) card->thread_start_mask,
1607			(u8) card->thread_allowed_mask,
1608			(u8) card->thread_running_mask);
1609	rc = (card->thread_start_mask & thread);
1610	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1611	return rc;
1612}
1613
1614static int qeth_do_reset(void *data);
1615static void qeth_start_kernel_thread(struct work_struct *work)
1616{
1617	struct task_struct *ts;
1618	struct qeth_card *card = container_of(work, struct qeth_card,
1619					kernel_thread_starter);
1620	QETH_CARD_TEXT(card , 2, "strthrd");
1621
1622	if (card->read.state != CH_STATE_UP &&
1623	    card->write.state != CH_STATE_UP)
1624		return;
1625	if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
1626		ts = kthread_run(qeth_do_reset, card, "qeth_recover");
1627		if (IS_ERR(ts)) {
1628			qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1629			qeth_clear_thread_running_bit(card,
1630				QETH_RECOVER_THREAD);
1631		}
1632	}
1633}
1634
1635static void qeth_buffer_reclaim_work(struct work_struct *);
1636static void qeth_setup_card(struct qeth_card *card)
1637{
1638	QETH_CARD_TEXT(card, 2, "setupcrd");
1639
1640	card->info.type = CARD_RDEV(card)->id.driver_info;
1641	card->state = CARD_STATE_DOWN;
1642	spin_lock_init(&card->lock);
1643	spin_lock_init(&card->thread_mask_lock);
1644	mutex_init(&card->conf_mutex);
1645	mutex_init(&card->discipline_mutex);
1646	INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1647	INIT_LIST_HEAD(&card->cmd_waiter_list);
1648	init_waitqueue_head(&card->wait_q);
1649	qeth_set_initial_options(card);
1650	/* IP address takeover */
1651	INIT_LIST_HEAD(&card->ipato.entries);
1652	qeth_init_qdio_info(card);
1653	INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
1654	INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
1655	hash_init(card->rx_mode_addrs);
1656	hash_init(card->local_addrs4);
1657	hash_init(card->local_addrs6);
1658	spin_lock_init(&card->local_addrs4_lock);
1659	spin_lock_init(&card->local_addrs6_lock);
1660}
1661
1662static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
1663{
1664	struct qeth_card *card = container_of(slr, struct qeth_card,
1665					qeth_service_level);
1666	if (card->info.mcl_level[0])
1667		seq_printf(m, "qeth: %s firmware level %s\n",
1668			CARD_BUS_ID(card), card->info.mcl_level);
1669}
1670
1671static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
1672{
1673	struct qeth_card *card;
1674
1675	QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1676	card = kzalloc(sizeof(*card), GFP_KERNEL);
1677	if (!card)
1678		goto out;
1679	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1680
1681	card->gdev = gdev;
1682	dev_set_drvdata(&gdev->dev, card);
1683	CARD_RDEV(card) = gdev->cdev[0];
1684	CARD_WDEV(card) = gdev->cdev[1];
1685	CARD_DDEV(card) = gdev->cdev[2];
1686
1687	card->event_wq = alloc_ordered_workqueue("%s_event", 0,
1688						 dev_name(&gdev->dev));
1689	if (!card->event_wq)
1690		goto out_wq;
1691
1692	card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0);
1693	if (!card->read_cmd)
1694		goto out_read_cmd;
1695
1696	card->debugfs = debugfs_create_dir(dev_name(&gdev->dev),
1697					   qeth_debugfs_root);
1698	debugfs_create_file("local_addrs", 0400, card->debugfs, card,
1699			    &qeth_debugfs_local_addr_fops);
1700
1701	card->qeth_service_level.seq_print = qeth_core_sl_print;
1702	register_service_level(&card->qeth_service_level);
1703	return card;
1704
1705out_read_cmd:
1706	destroy_workqueue(card->event_wq);
1707out_wq:
1708	dev_set_drvdata(&gdev->dev, NULL);
1709	kfree(card);
1710out:
1711	return NULL;
1712}
1713
1714static int qeth_clear_channel(struct qeth_card *card,
1715			      struct qeth_channel *channel)
1716{
1717	int rc;
1718
1719	QETH_CARD_TEXT(card, 3, "clearch");
1720	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1721	rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd);
1722	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1723
1724	if (rc)
1725		return rc;
1726	rc = wait_event_interruptible_timeout(card->wait_q,
1727			channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
1728	if (rc == -ERESTARTSYS)
1729		return rc;
1730	if (channel->state != CH_STATE_STOPPED)
1731		return -ETIME;
1732	channel->state = CH_STATE_DOWN;
1733	return 0;
1734}
1735
1736static int qeth_halt_channel(struct qeth_card *card,
1737			     struct qeth_channel *channel)
1738{
1739	int rc;
1740
1741	QETH_CARD_TEXT(card, 3, "haltch");
1742	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1743	rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd);
1744	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1745
1746	if (rc)
1747		return rc;
1748	rc = wait_event_interruptible_timeout(card->wait_q,
1749			channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
1750	if (rc == -ERESTARTSYS)
1751		return rc;
1752	if (channel->state != CH_STATE_HALTED)
1753		return -ETIME;
1754	return 0;
1755}
1756
1757int qeth_stop_channel(struct qeth_channel *channel)
1758{
1759	struct ccw_device *cdev = channel->ccwdev;
1760	int rc;
1761
1762	rc = ccw_device_set_offline(cdev);
1763
1764	spin_lock_irq(get_ccwdev_lock(cdev));
1765	if (channel->active_cmd) {
1766		dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
1767			channel->active_cmd);
1768		channel->active_cmd = NULL;
1769	}
1770	cdev->handler = NULL;
1771	spin_unlock_irq(get_ccwdev_lock(cdev));
1772
1773	return rc;
1774}
1775EXPORT_SYMBOL_GPL(qeth_stop_channel);
1776
1777static int qeth_start_channel(struct qeth_channel *channel)
1778{
1779	struct ccw_device *cdev = channel->ccwdev;
1780	int rc;
1781
1782	channel->state = CH_STATE_DOWN;
1783	atomic_set(&channel->irq_pending, 0);
1784
1785	spin_lock_irq(get_ccwdev_lock(cdev));
1786	cdev->handler = qeth_irq;
1787	spin_unlock_irq(get_ccwdev_lock(cdev));
1788
1789	rc = ccw_device_set_online(cdev);
1790	if (rc)
1791		goto err;
1792
1793	return 0;
1794
1795err:
1796	spin_lock_irq(get_ccwdev_lock(cdev));
1797	cdev->handler = NULL;
1798	spin_unlock_irq(get_ccwdev_lock(cdev));
1799	return rc;
1800}
1801
1802static int qeth_halt_channels(struct qeth_card *card)
1803{
1804	int rc1 = 0, rc2 = 0, rc3 = 0;
1805
1806	QETH_CARD_TEXT(card, 3, "haltchs");
1807	rc1 = qeth_halt_channel(card, &card->read);
1808	rc2 = qeth_halt_channel(card, &card->write);
1809	rc3 = qeth_halt_channel(card, &card->data);
1810	if (rc1)
1811		return rc1;
1812	if (rc2)
1813		return rc2;
1814	return rc3;
1815}
1816
1817static int qeth_clear_channels(struct qeth_card *card)
1818{
1819	int rc1 = 0, rc2 = 0, rc3 = 0;
1820
1821	QETH_CARD_TEXT(card, 3, "clearchs");
1822	rc1 = qeth_clear_channel(card, &card->read);
1823	rc2 = qeth_clear_channel(card, &card->write);
1824	rc3 = qeth_clear_channel(card, &card->data);
1825	if (rc1)
1826		return rc1;
1827	if (rc2)
1828		return rc2;
1829	return rc3;
1830}
1831
1832static int qeth_clear_halt_card(struct qeth_card *card, int halt)
1833{
1834	int rc = 0;
1835
1836	QETH_CARD_TEXT(card, 3, "clhacrd");
1837
1838	if (halt)
1839		rc = qeth_halt_channels(card);
1840	if (rc)
1841		return rc;
1842	return qeth_clear_channels(card);
1843}
1844
1845int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
1846{
1847	int rc = 0;
1848
1849	QETH_CARD_TEXT(card, 3, "qdioclr");
1850	switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
1851		QETH_QDIO_CLEANING)) {
1852	case QETH_QDIO_ESTABLISHED:
1853		if (IS_IQD(card))
1854			rc = qdio_shutdown(CARD_DDEV(card),
1855				QDIO_FLAG_CLEANUP_USING_HALT);
1856		else
1857			rc = qdio_shutdown(CARD_DDEV(card),
1858				QDIO_FLAG_CLEANUP_USING_CLEAR);
1859		if (rc)
1860			QETH_CARD_TEXT_(card, 3, "1err%d", rc);
1861		atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
1862		break;
1863	case QETH_QDIO_CLEANING:
1864		return rc;
1865	default:
1866		break;
1867	}
1868	rc = qeth_clear_halt_card(card, use_halt);
1869	if (rc)
1870		QETH_CARD_TEXT_(card, 3, "2err%d", rc);
1871	return rc;
1872}
1873EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
1874
1875static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
1876{
1877	enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1878	struct diag26c_vnic_resp *response = NULL;
1879	struct diag26c_vnic_req *request = NULL;
1880	struct ccw_dev_id id;
1881	char userid[80];
1882	int rc = 0;
1883
1884	QETH_CARD_TEXT(card, 2, "vmlayer");
1885
1886	cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
1887	if (rc)
1888		goto out;
1889
1890	request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
1891	response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
1892	if (!request || !response) {
1893		rc = -ENOMEM;
1894		goto out;
1895	}
1896
1897	ccw_device_get_id(CARD_RDEV(card), &id);
1898	request->resp_buf_len = sizeof(*response);
1899	request->resp_version = DIAG26C_VERSION6_VM65918;
1900	request->req_format = DIAG26C_VNIC_INFO;
1901	ASCEBC(userid, 8);
1902	memcpy(&request->sys_name, userid, 8);
1903	request->devno = id.devno;
1904
1905	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1906	rc = diag26c(request, response, DIAG26C_PORT_VNIC);
1907	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1908	if (rc)
1909		goto out;
1910	QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
1911
1912	if (request->resp_buf_len < sizeof(*response) ||
1913	    response->version != request->resp_version) {
1914		rc = -EIO;
1915		goto out;
1916	}
1917
1918	if (response->protocol == VNIC_INFO_PROT_L2)
1919		disc = QETH_DISCIPLINE_LAYER2;
1920	else if (response->protocol == VNIC_INFO_PROT_L3)
1921		disc = QETH_DISCIPLINE_LAYER3;
1922
1923out:
1924	kfree(response);
1925	kfree(request);
1926	if (rc)
1927		QETH_CARD_TEXT_(card, 2, "err%x", rc);
1928	return disc;
1929}
1930
1931/* Determine whether the device requires a specific layer discipline */
1932static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
1933{
1934	enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1935
1936	if (IS_OSM(card) || IS_OSN(card))
1937		disc = QETH_DISCIPLINE_LAYER2;
1938	else if (IS_VM_NIC(card))
1939		disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
1940				      qeth_vm_detect_layer(card);
1941
1942	switch (disc) {
1943	case QETH_DISCIPLINE_LAYER2:
1944		QETH_CARD_TEXT(card, 3, "force l2");
1945		break;
1946	case QETH_DISCIPLINE_LAYER3:
1947		QETH_CARD_TEXT(card, 3, "force l3");
1948		break;
1949	default:
1950		QETH_CARD_TEXT(card, 3, "force no");
1951	}
1952
1953	return disc;
1954}
1955
1956static void qeth_set_blkt_defaults(struct qeth_card *card)
1957{
1958	QETH_CARD_TEXT(card, 2, "cfgblkt");
1959
1960	if (card->info.use_v1_blkt) {
1961		card->info.blkt.time_total = 0;
1962		card->info.blkt.inter_packet = 0;
1963		card->info.blkt.inter_packet_jumbo = 0;
1964	} else {
1965		card->info.blkt.time_total = 250;
1966		card->info.blkt.inter_packet = 5;
1967		card->info.blkt.inter_packet_jumbo = 15;
1968	}
1969}
1970
1971static void qeth_idx_init(struct qeth_card *card)
1972{
1973	memset(&card->seqno, 0, sizeof(card->seqno));
1974
1975	card->token.issuer_rm_w = 0x00010103UL;
1976	card->token.cm_filter_w = 0x00010108UL;
1977	card->token.cm_connection_w = 0x0001010aUL;
1978	card->token.ulp_filter_w = 0x0001010bUL;
1979	card->token.ulp_connection_w = 0x0001010dUL;
1980
1981	switch (card->info.type) {
1982	case QETH_CARD_TYPE_IQD:
1983		card->info.func_level =	QETH_IDX_FUNC_LEVEL_IQD;
1984		break;
1985	case QETH_CARD_TYPE_OSD:
1986	case QETH_CARD_TYPE_OSN:
1987		card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
1988		break;
1989	default:
1990		break;
1991	}
1992}
1993
1994static void qeth_idx_finalize_cmd(struct qeth_card *card,
1995				  struct qeth_cmd_buffer *iob)
1996{
1997	memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
1998	       QETH_SEQ_NO_LENGTH);
1999	if (iob->channel == &card->write)
2000		card->seqno.trans_hdr++;
2001}
2002
2003static int qeth_peer_func_level(int level)
2004{
2005	if ((level & 0xff) == 8)
2006		return (level & 0xff) + 0x400;
2007	if (((level >> 8) & 3) == 1)
2008		return (level & 0xff) + 0x200;
2009	return level;
2010}
2011
2012static void qeth_mpc_finalize_cmd(struct qeth_card *card,
2013				  struct qeth_cmd_buffer *iob)
2014{
2015	qeth_idx_finalize_cmd(card, iob);
2016
2017	memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
2018	       &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
2019	card->seqno.pdu_hdr++;
2020	memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
2021	       &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
2022
2023	iob->callback = qeth_release_buffer_cb;
2024}
2025
2026static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob,
2027				 struct qeth_cmd_buffer *reply)
2028{
2029	/* MPC cmds are issued strictly in sequence. */
2030	return !IS_IPA(reply->data);
2031}
2032
2033static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
2034						  const void *data,
2035						  unsigned int data_length)
2036{
2037	struct qeth_cmd_buffer *iob;
2038
2039	iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT);
2040	if (!iob)
2041		return NULL;
2042
2043	memcpy(iob->data, data, data_length);
2044	qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
2045		       iob->data);
2046	iob->finalize = qeth_mpc_finalize_cmd;
2047	iob->match = qeth_mpc_match_reply;
2048	return iob;
2049}
2050
2051/**
2052 * qeth_send_control_data() -	send control command to the card
2053 * @card:			qeth_card structure pointer
2054 * @iob:			qeth_cmd_buffer pointer
2055 * @reply_cb:			callback function pointer
2056 * @cb_card:			pointer to the qeth_card structure
2057 * @cb_reply:			pointer to the qeth_reply structure
2058 * @cb_cmd:			pointer to the original iob for non-IPA
2059 *				commands, or to the qeth_ipa_cmd structure
2060 *				for the IPA commands.
2061 * @reply_param:		private pointer passed to the callback
2062 *
2063 * Callback function gets called one or more times, with cb_cmd
2064 * pointing to the response returned by the hardware. Callback
2065 * function must return
2066 *   > 0 if more reply blocks are expected,
2067 *     0 if the last or only reply block is received, and
2068 *   < 0 on error.
2069 * Callback function can get the value of the reply_param pointer from the
2070 * field 'param' of the structure qeth_reply.
2071 */
2072
2073static int qeth_send_control_data(struct qeth_card *card,
2074				  struct qeth_cmd_buffer *iob,
2075				  int (*reply_cb)(struct qeth_card *cb_card,
2076						  struct qeth_reply *cb_reply,
2077						  unsigned long cb_cmd),
2078				  void *reply_param)
2079{
2080	struct qeth_channel *channel = iob->channel;
2081	struct qeth_reply *reply = &iob->reply;
2082	long timeout = iob->timeout;
2083	int rc;
2084
2085	QETH_CARD_TEXT(card, 2, "sendctl");
2086
2087	reply->callback = reply_cb;
2088	reply->param = reply_param;
2089
2090	timeout = wait_event_interruptible_timeout(card->wait_q,
2091						   qeth_trylock_channel(channel),
2092						   timeout);
2093	if (timeout <= 0) {
2094		qeth_put_cmd(iob);
2095		return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
2096	}
2097
2098	if (iob->finalize)
2099		iob->finalize(card, iob);
2100	QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN));
2101
2102	qeth_enqueue_cmd(card, iob);
2103
2104	/* This pairs with iob->callback, and keeps the iob alive after IO: */
2105	qeth_get_cmd(iob);
2106
2107	QETH_CARD_TEXT(card, 6, "noirqpnd");
2108	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
2109	rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
2110				      (addr_t) iob, 0, 0, timeout);
2111	if (!rc)
2112		channel->active_cmd = iob;
2113	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
2114	if (rc) {
2115		QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
2116				 CARD_DEVID(card), rc);
2117		QETH_CARD_TEXT_(card, 2, " err%d", rc);
2118		qeth_dequeue_cmd(card, iob);
2119		qeth_put_cmd(iob);
2120		qeth_unlock_channel(card, channel);
2121		goto out;
2122	}
2123
2124	timeout = wait_for_completion_interruptible_timeout(&iob->done,
2125							    timeout);
2126	if (timeout <= 0)
2127		rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
2128
2129	qeth_dequeue_cmd(card, iob);
2130
2131	if (reply_cb) {
2132		/* Wait until the callback for a late reply has completed: */
2133		spin_lock_irq(&iob->lock);
2134		if (rc)
2135			/* Zap any callback that's still pending: */
2136			iob->rc = rc;
2137		spin_unlock_irq(&iob->lock);
2138	}
2139
2140	if (!rc)
2141		rc = iob->rc;
2142
2143out:
2144	qeth_put_cmd(iob);
2145	return rc;
2146}
2147
2148struct qeth_node_desc {
2149	struct node_descriptor nd1;
2150	struct node_descriptor nd2;
2151	struct node_descriptor nd3;
2152};
2153
2154static void qeth_read_conf_data_cb(struct qeth_card *card,
2155				   struct qeth_cmd_buffer *iob,
2156				   unsigned int data_length)
2157{
2158	struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data;
2159	int rc = 0;
2160	u8 *tag;
2161
2162	QETH_CARD_TEXT(card, 2, "cfgunit");
2163
2164	if (data_length < sizeof(*nd)) {
2165		rc = -EINVAL;
2166		goto out;
2167	}
2168
2169	card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] &&
2170			       nd->nd1.plant[1] == _ascebc['M'];
2171	tag = (u8 *)&nd->nd1.tag;
2172	card->info.chpid = tag[0];
2173	card->info.unit_addr2 = tag[1];
2174
2175	tag = (u8 *)&nd->nd2.tag;
2176	card->info.cula = tag[1];
2177
2178	card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 &&
2179				 nd->nd3.model[1] == 0xF0 &&
2180				 nd->nd3.model[2] >= 0xF1 &&
2181				 nd->nd3.model[2] <= 0xF4;
2182
2183out:
2184	qeth_notify_cmd(iob, rc);
2185	qeth_put_cmd(iob);
2186}
2187
2188static int qeth_read_conf_data(struct qeth_card *card)
2189{
2190	struct qeth_channel *channel = &card->data;
2191	struct qeth_cmd_buffer *iob;
2192	struct ciw *ciw;
2193
2194	/* scan for RCD command in extended SenseID data */
2195	ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
2196	if (!ciw || ciw->cmd == 0)
2197		return -EOPNOTSUPP;
2198	if (ciw->count < sizeof(struct qeth_node_desc))
2199		return -EINVAL;
2200
2201	iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT);
2202	if (!iob)
2203		return -ENOMEM;
2204
2205	iob->callback = qeth_read_conf_data_cb;
2206	qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length,
2207		       iob->data);
2208
2209	return qeth_send_control_data(card, iob, NULL, NULL);
2210}
2211
2212static int qeth_idx_check_activate_response(struct qeth_card *card,
2213					    struct qeth_channel *channel,
2214					    struct qeth_cmd_buffer *iob)
2215{
2216	int rc;
2217
2218	rc = qeth_check_idx_response(card, iob->data);
2219	if (rc)
2220		return rc;
2221
2222	if (QETH_IS_IDX_ACT_POS_REPLY(iob->data))
2223		return 0;
2224
2225	/* negative reply: */
2226	QETH_CARD_TEXT_(card, 2, "idxneg%c",
2227			QETH_IDX_ACT_CAUSE_CODE(iob->data));
2228
2229	switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
2230	case QETH_IDX_ACT_ERR_EXCL:
2231		dev_err(&channel->ccwdev->dev,
2232			"The adapter is used exclusively by another host\n");
2233		return -EBUSY;
2234	case QETH_IDX_ACT_ERR_AUTH:
2235	case QETH_IDX_ACT_ERR_AUTH_USER:
2236		dev_err(&channel->ccwdev->dev,
2237			"Setting the device online failed because of insufficient authorization\n");
2238		return -EPERM;
2239	default:
2240		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
2241				 CCW_DEVID(channel->ccwdev));
2242		return -EIO;
2243	}
2244}
2245
2246static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
2247					      struct qeth_cmd_buffer *iob,
2248					      unsigned int data_length)
2249{
2250	struct qeth_channel *channel = iob->channel;
2251	u16 peer_level;
2252	int rc;
2253
2254	QETH_CARD_TEXT(card, 2, "idxrdcb");
2255
2256	rc = qeth_idx_check_activate_response(card, channel, iob);
2257	if (rc)
2258		goto out;
2259
2260	memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
2261	if (peer_level != qeth_peer_func_level(card->info.func_level)) {
2262		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2263				 CCW_DEVID(channel->ccwdev),
2264				 card->info.func_level, peer_level);
2265		rc = -EINVAL;
2266		goto out;
2267	}
2268
2269	memcpy(&card->token.issuer_rm_r,
2270	       QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
2271	       QETH_MPC_TOKEN_LENGTH);
2272	memcpy(&card->info.mcl_level[0],
2273	       QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
2274
2275out:
2276	qeth_notify_cmd(iob, rc);
2277	qeth_put_cmd(iob);
2278}
2279
2280static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
2281					       struct qeth_cmd_buffer *iob,
2282					       unsigned int data_length)
2283{
2284	struct qeth_channel *channel = iob->channel;
2285	u16 peer_level;
2286	int rc;
2287
2288	QETH_CARD_TEXT(card, 2, "idxwrcb");
2289
2290	rc = qeth_idx_check_activate_response(card, channel, iob);
2291	if (rc)
2292		goto out;
2293
2294	memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
2295	if ((peer_level & ~0x0100) !=
2296	    qeth_peer_func_level(card->info.func_level)) {
2297		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2298				 CCW_DEVID(channel->ccwdev),
2299				 card->info.func_level, peer_level);
2300		rc = -EINVAL;
2301	}
2302
2303out:
2304	qeth_notify_cmd(iob, rc);
2305	qeth_put_cmd(iob);
2306}
2307
2308static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
2309					struct qeth_cmd_buffer *iob)
2310{
2311	u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
2312	u8 port = ((u8)card->dev->dev_port) | 0x80;
2313	struct ccw1 *ccw = __ccw_from_cmd(iob);
2314	struct ccw_dev_id dev_id;
2315
2316	qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
2317		       iob->data);
2318	qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
2319	ccw_device_get_id(CARD_DDEV(card), &dev_id);
2320	iob->finalize = qeth_idx_finalize_cmd;
2321
2322	port |= QETH_IDX_ACT_INVAL_FRAME;
2323	memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
2324	memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
2325	       &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
2326	memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
2327	       &card->info.func_level, 2);
2328	memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &dev_id.devno, 2);
2329	memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
2330}
2331
2332static int qeth_idx_activate_read_channel(struct qeth_card *card)
2333{
2334	struct qeth_channel *channel = &card->read;
2335	struct qeth_cmd_buffer *iob;
2336	int rc;
2337
2338	QETH_CARD_TEXT(card, 2, "idxread");
2339
2340	iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2341	if (!iob)
2342		return -ENOMEM;
2343
2344	memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
2345	qeth_idx_setup_activate_cmd(card, iob);
2346	iob->callback = qeth_idx_activate_read_channel_cb;
2347
2348	rc = qeth_send_control_data(card, iob, NULL, NULL);
2349	if (rc)
2350		return rc;
2351
2352	channel->state = CH_STATE_UP;
2353	return 0;
2354}
2355
2356static int qeth_idx_activate_write_channel(struct qeth_card *card)
2357{
2358	struct qeth_channel *channel = &card->write;
2359	struct qeth_cmd_buffer *iob;
2360	int rc;
2361
2362	QETH_CARD_TEXT(card, 2, "idxwrite");
2363
2364	iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2365	if (!iob)
2366		return -ENOMEM;
2367
2368	memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
2369	qeth_idx_setup_activate_cmd(card, iob);
2370	iob->callback = qeth_idx_activate_write_channel_cb;
2371
2372	rc = qeth_send_control_data(card, iob, NULL, NULL);
2373	if (rc)
2374		return rc;
2375
2376	channel->state = CH_STATE_UP;
2377	return 0;
2378}
2379
2380static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2381		unsigned long data)
2382{
2383	struct qeth_cmd_buffer *iob;
2384
2385	QETH_CARD_TEXT(card, 2, "cmenblcb");
2386
2387	iob = (struct qeth_cmd_buffer *) data;
2388	memcpy(&card->token.cm_filter_r,
2389	       QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
2390	       QETH_MPC_TOKEN_LENGTH);
2391	return 0;
2392}
2393
2394static int qeth_cm_enable(struct qeth_card *card)
2395{
2396	struct qeth_cmd_buffer *iob;
2397
2398	QETH_CARD_TEXT(card, 2, "cmenable");
2399
2400	iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE);
2401	if (!iob)
2402		return -ENOMEM;
2403
2404	memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
2405	       &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2406	memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
2407	       &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
2408
2409	return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL);
2410}
2411
2412static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2413		unsigned long data)
2414{
2415	struct qeth_cmd_buffer *iob;
2416
2417	QETH_CARD_TEXT(card, 2, "cmsetpcb");
2418
2419	iob = (struct qeth_cmd_buffer *) data;
2420	memcpy(&card->token.cm_connection_r,
2421	       QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
2422	       QETH_MPC_TOKEN_LENGTH);
2423	return 0;
2424}
2425
2426static int qeth_cm_setup(struct qeth_card *card)
2427{
2428	struct qeth_cmd_buffer *iob;
2429
2430	QETH_CARD_TEXT(card, 2, "cmsetup");
2431
2432	iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE);
2433	if (!iob)
2434		return -ENOMEM;
2435
2436	memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
2437	       &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2438	memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
2439	       &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
2440	memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
2441	       &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2442	return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
2443}
2444
2445static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type)
2446{
2447	if (link_type == QETH_LINK_TYPE_LANE_TR ||
2448	    link_type == QETH_LINK_TYPE_HSTR) {
2449		dev_err(&card->gdev->dev, "Unsupported Token Ring device\n");
2450		return false;
2451	}
2452
2453	return true;
2454}
2455
2456static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
2457{
2458	struct net_device *dev = card->dev;
2459	unsigned int new_mtu;
2460
2461	if (!max_mtu) {
2462		/* IQD needs accurate max MTU to set up its RX buffers: */
2463		if (IS_IQD(card))
2464			return -EINVAL;
2465		/* tolerate quirky HW: */
2466		max_mtu = ETH_MAX_MTU;
2467	}
2468
2469	rtnl_lock();
2470	if (IS_IQD(card)) {
2471		/* move any device with default MTU to new max MTU: */
2472		new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;
2473
2474		/* adjust RX buffer size to new max MTU: */
2475		card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
2476		if (dev->max_mtu && dev->max_mtu != max_mtu)
2477			qeth_free_qdio_queues(card);
2478	} else {
2479		if (dev->mtu)
2480			new_mtu = dev->mtu;
2481		/* default MTUs for first setup: */
2482		else if (IS_LAYER2(card))
2483			new_mtu = ETH_DATA_LEN;
2484		else
2485			new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
2486	}
2487
2488	dev->max_mtu = max_mtu;
2489	dev->mtu = min(new_mtu, max_mtu);
2490	rtnl_unlock();
2491	return 0;
2492}
2493
2494static int qeth_get_mtu_outof_framesize(int framesize)
2495{
2496	switch (framesize) {
2497	case 0x4000:
2498		return 8192;
2499	case 0x6000:
2500		return 16384;
2501	case 0xa000:
2502		return 32768;
2503	case 0xffff:
2504		return 57344;
2505	default:
2506		return 0;
2507	}
2508}
2509
2510static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2511		unsigned long data)
2512{
2513	__u16 mtu, framesize;
2514	__u16 len;
2515	struct qeth_cmd_buffer *iob;
2516	u8 link_type = 0;
2517
2518	QETH_CARD_TEXT(card, 2, "ulpenacb");
2519
2520	iob = (struct qeth_cmd_buffer *) data;
2521	memcpy(&card->token.ulp_filter_r,
2522	       QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
2523	       QETH_MPC_TOKEN_LENGTH);
2524	if (IS_IQD(card)) {
2525		memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
2526		mtu = qeth_get_mtu_outof_framesize(framesize);
2527	} else {
2528		mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
2529	}
2530	*(u16 *)reply->param = mtu;
2531
2532	memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
2533	if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
2534		memcpy(&link_type,
2535		       QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2536		if (!qeth_is_supported_link_type(card, link_type))
2537			return -EPROTONOSUPPORT;
2538	}
2539
2540	card->info.link_type = link_type;
2541	QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
2542	return 0;
2543}
2544
2545static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
2546{
2547	if (IS_OSN(card))
2548		return QETH_PROT_OSN2;
2549	return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP;
2550}
2551
2552static int qeth_ulp_enable(struct qeth_card *card)
2553{
2554	u8 prot_type = qeth_mpc_select_prot_type(card);
2555	struct qeth_cmd_buffer *iob;
2556	u16 max_mtu;
2557	int rc;
2558
2559	QETH_CARD_TEXT(card, 2, "ulpenabl");
2560
2561	iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE);
2562	if (!iob)
2563		return -ENOMEM;
2564
2565	*(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
2566	memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
2567	memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
2568	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2569	memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
2570	       &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2571	rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu);
2572	if (rc)
2573		return rc;
2574	return qeth_update_max_mtu(card, max_mtu);
2575}
2576
2577static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2578		unsigned long data)
2579{
2580	struct qeth_cmd_buffer *iob;
2581
2582	QETH_CARD_TEXT(card, 2, "ulpstpcb");
2583
2584	iob = (struct qeth_cmd_buffer *) data;
2585	memcpy(&card->token.ulp_connection_r,
2586	       QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2587	       QETH_MPC_TOKEN_LENGTH);
2588	if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2589		     3)) {
2590		QETH_CARD_TEXT(card, 2, "olmlimit");
2591		dev_err(&card->gdev->dev, "A connection could not be "
2592			"established because of an OLM limit\n");
2593		return -EMLINK;
2594	}
2595	return 0;
2596}
2597
2598static int qeth_ulp_setup(struct qeth_card *card)
2599{
2600	__u16 temp;
2601	struct qeth_cmd_buffer *iob;
2602	struct ccw_dev_id dev_id;
2603
2604	QETH_CARD_TEXT(card, 2, "ulpsetup");
2605
2606	iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE);
2607	if (!iob)
2608		return -ENOMEM;
2609
2610	memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2611	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2612	memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2613	       &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2614	memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2615	       &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2616
2617	ccw_device_get_id(CARD_DDEV(card), &dev_id);
2618	memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
2619	temp = (card->info.cula << 8) + card->info.unit_addr2;
2620	memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2621	return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
2622}
2623
2624static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
 
2625{
2626	struct qeth_qdio_out_buffer *newbuf;
2627
2628	newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC);
2629	if (!newbuf)
2630		return -ENOMEM;
2631
2632	newbuf->buffer = q->qdio_bufs[bidx];
2633	skb_queue_head_init(&newbuf->skb_list);
2634	lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
2635	newbuf->q = q;
2636	newbuf->next_pending = q->bufs[bidx];
2637	atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
2638	q->bufs[bidx] = newbuf;
2639	return 0;
2640}
2641
2642static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2643{
2644	if (!q)
2645		return;
2646
2647	qeth_drain_output_queue(q, true);
2648	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2649	kfree(q);
2650}
2651
2652static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
2653{
2654	struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
 
2655
2656	if (!q)
2657		return NULL;
2658
2659	if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
2660		kfree(q);
2661		return NULL;
 
 
 
2662	}
 
2663	return q;
 
 
 
 
 
 
 
 
2664}
2665
2666static void qeth_tx_completion_timer(struct timer_list *timer)
2667{
2668	struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);
2669
2670	napi_schedule(&queue->napi);
2671	QETH_TXQ_STAT_INC(queue, completion_timer);
2672}
2673
2674static int qeth_alloc_qdio_queues(struct qeth_card *card)
2675{
2676	int i, j;
2677
2678	QETH_CARD_TEXT(card, 2, "allcqdbf");
2679
2680	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
2681		QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
2682		return 0;
2683
2684	QETH_CARD_TEXT(card, 2, "inq");
2685	card->qdio.in_q = qeth_alloc_qdio_queue();
2686	if (!card->qdio.in_q)
2687		goto out_nomem;
2688
2689	/* inbound buffer pool */
2690	if (qeth_alloc_buffer_pool(card))
2691		goto out_freeinq;
2692
2693	/* outbound */
2694	for (i = 0; i < card->qdio.no_out_queues; ++i) {
2695		struct qeth_qdio_out_q *queue;
2696
2697		queue = qeth_alloc_output_queue();
2698		if (!queue)
2699			goto out_freeoutq;
2700		QETH_CARD_TEXT_(card, 2, "outq %i", i);
2701		QETH_CARD_HEX(card, 2, &queue, sizeof(void *));
2702		card->qdio.out_qs[i] = queue;
2703		queue->card = card;
2704		queue->queue_no = i;
 
 
2705		timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
2706		queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
2707		queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
2708
2709		/* give outbound qeth_qdio_buffers their qdio_buffers */
2710		for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2711			WARN_ON(queue->bufs[j]);
2712			if (qeth_init_qdio_out_buf(queue, j))
2713				goto out_freeoutqbufs;
2714		}
 
2715	}
2716
2717	/* completion */
2718	if (qeth_alloc_cq(card))
2719		goto out_freeoutq;
2720
2721	return 0;
2722
2723out_freeoutqbufs:
2724	while (j > 0) {
2725		--j;
2726		kmem_cache_free(qeth_qdio_outbuf_cache,
2727				card->qdio.out_qs[i]->bufs[j]);
2728		card->qdio.out_qs[i]->bufs[j] = NULL;
2729	}
2730out_freeoutq:
2731	while (i > 0) {
2732		qeth_free_output_queue(card->qdio.out_qs[--i]);
2733		card->qdio.out_qs[i] = NULL;
2734	}
2735	qeth_free_buffer_pool(card);
2736out_freeinq:
2737	qeth_free_qdio_queue(card->qdio.in_q);
2738	card->qdio.in_q = NULL;
2739out_nomem:
2740	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
2741	return -ENOMEM;
2742}
2743
2744static void qeth_free_qdio_queues(struct qeth_card *card)
2745{
2746	int i, j;
2747
2748	if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
2749		QETH_QDIO_UNINITIALIZED)
2750		return;
2751
2752	qeth_free_cq(card);
2753	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2754		if (card->qdio.in_q->bufs[j].rx_skb)
2755			dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
 
 
2756	}
2757	qeth_free_qdio_queue(card->qdio.in_q);
2758	card->qdio.in_q = NULL;
2759	/* inbound buffer pool */
2760	qeth_free_buffer_pool(card);
2761	/* free outbound qdio_qs */
2762	for (i = 0; i < card->qdio.no_out_queues; i++) {
2763		qeth_free_output_queue(card->qdio.out_qs[i]);
2764		card->qdio.out_qs[i] = NULL;
2765	}
2766}
2767
2768static void qeth_create_qib_param_field(struct qeth_card *card,
2769		char *param_field)
2770{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2771
2772	param_field[0] = _ascebc['P'];
2773	param_field[1] = _ascebc['C'];
2774	param_field[2] = _ascebc['I'];
2775	param_field[3] = _ascebc['T'];
2776	*((unsigned int *) (&param_field[4])) = QETH_PCI_THRESHOLD_A(card);
2777	*((unsigned int *) (&param_field[8])) = QETH_PCI_THRESHOLD_B(card);
2778	*((unsigned int *) (&param_field[12])) = QETH_PCI_TIMER_VALUE(card);
2779}
2780
2781static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
2782		char *param_field)
2783{
2784	param_field[16] = _ascebc['B'];
2785	param_field[17] = _ascebc['L'];
2786	param_field[18] = _ascebc['K'];
2787	param_field[19] = _ascebc['T'];
2788	*((unsigned int *) (&param_field[20])) = card->info.blkt.time_total;
2789	*((unsigned int *) (&param_field[24])) = card->info.blkt.inter_packet;
2790	*((unsigned int *) (&param_field[28])) =
2791		card->info.blkt.inter_packet_jumbo;
2792}
2793
2794static int qeth_qdio_activate(struct qeth_card *card)
2795{
2796	QETH_CARD_TEXT(card, 3, "qdioact");
2797	return qdio_activate(CARD_DDEV(card));
2798}
2799
2800static int qeth_dm_act(struct qeth_card *card)
2801{
2802	struct qeth_cmd_buffer *iob;
2803
2804	QETH_CARD_TEXT(card, 2, "dmact");
2805
2806	iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE);
2807	if (!iob)
2808		return -ENOMEM;
2809
2810	memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
2811	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2812	memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
2813	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2814	return qeth_send_control_data(card, iob, NULL, NULL);
2815}
2816
2817static int qeth_mpc_initialize(struct qeth_card *card)
2818{
2819	int rc;
2820
2821	QETH_CARD_TEXT(card, 2, "mpcinit");
2822
2823	rc = qeth_issue_next_read(card);
2824	if (rc) {
2825		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
2826		return rc;
2827	}
2828	rc = qeth_cm_enable(card);
2829	if (rc) {
2830		QETH_CARD_TEXT_(card, 2, "2err%d", rc);
2831		return rc;
2832	}
2833	rc = qeth_cm_setup(card);
2834	if (rc) {
2835		QETH_CARD_TEXT_(card, 2, "3err%d", rc);
2836		return rc;
2837	}
2838	rc = qeth_ulp_enable(card);
2839	if (rc) {
2840		QETH_CARD_TEXT_(card, 2, "4err%d", rc);
2841		return rc;
2842	}
2843	rc = qeth_ulp_setup(card);
2844	if (rc) {
2845		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2846		return rc;
2847	}
2848	rc = qeth_alloc_qdio_queues(card);
2849	if (rc) {
2850		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2851		return rc;
2852	}
2853	rc = qeth_qdio_establish(card);
2854	if (rc) {
2855		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
2856		qeth_free_qdio_queues(card);
2857		return rc;
2858	}
2859	rc = qeth_qdio_activate(card);
2860	if (rc) {
2861		QETH_CARD_TEXT_(card, 2, "7err%d", rc);
2862		return rc;
2863	}
2864	rc = qeth_dm_act(card);
2865	if (rc) {
2866		QETH_CARD_TEXT_(card, 2, "8err%d", rc);
2867		return rc;
2868	}
2869
2870	return 0;
2871}
2872
2873void qeth_print_status_message(struct qeth_card *card)
2874{
2875	switch (card->info.type) {
2876	case QETH_CARD_TYPE_OSD:
2877	case QETH_CARD_TYPE_OSM:
2878	case QETH_CARD_TYPE_OSX:
2879		/* VM will use a non-zero first character
2880		 * to indicate a HiperSockets like reporting
2881		 * of the level OSA sets the first character to zero
2882		 * */
2883		if (!card->info.mcl_level[0]) {
2884			sprintf(card->info.mcl_level, "%02x%02x",
2885				card->info.mcl_level[2],
2886				card->info.mcl_level[3]);
 
 
2887			break;
2888		}
2889		fallthrough;
2890	case QETH_CARD_TYPE_IQD:
2891		if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
2892			card->info.mcl_level[0] = (char) _ebcasc[(__u8)
2893				card->info.mcl_level[0]];
2894			card->info.mcl_level[1] = (char) _ebcasc[(__u8)
2895				card->info.mcl_level[1]];
2896			card->info.mcl_level[2] = (char) _ebcasc[(__u8)
2897				card->info.mcl_level[2]];
2898			card->info.mcl_level[3] = (char) _ebcasc[(__u8)
2899				card->info.mcl_level[3]];
2900			card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2901		}
2902		break;
2903	default:
2904		memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
2905	}
2906	dev_info(&card->gdev->dev,
2907		 "Device is a%s card%s%s%s\nwith link type %s.\n",
2908		 qeth_get_cardname(card),
2909		 (card->info.mcl_level[0]) ? " (level: " : "",
2910		 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2911		 (card->info.mcl_level[0]) ? ")" : "",
2912		 qeth_get_cardname_short(card));
2913}
2914EXPORT_SYMBOL_GPL(qeth_print_status_message);
2915
2916static void qeth_initialize_working_pool_list(struct qeth_card *card)
2917{
2918	struct qeth_buffer_pool_entry *entry;
2919
2920	QETH_CARD_TEXT(card, 5, "inwrklst");
2921
2922	list_for_each_entry(entry,
2923			    &card->qdio.init_pool.entry_list, init_list) {
2924		qeth_put_buffer_pool_entry(card, entry);
2925	}
2926}
2927
2928static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
2929					struct qeth_card *card)
2930{
2931	struct qeth_buffer_pool_entry *entry;
2932	int i, free;
2933
2934	if (list_empty(&card->qdio.in_buf_pool.entry_list))
2935		return NULL;
2936
2937	list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) {
2938		free = 1;
2939		for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2940			if (page_count(entry->elements[i]) > 1) {
2941				free = 0;
2942				break;
2943			}
2944		}
2945		if (free) {
2946			list_del_init(&entry->list);
2947			return entry;
2948		}
2949	}
2950
2951	/* no free buffer in pool so take first one and swap pages */
2952	entry = list_first_entry(&card->qdio.in_buf_pool.entry_list,
2953				 struct qeth_buffer_pool_entry, list);
2954	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2955		if (page_count(entry->elements[i]) > 1) {
2956			struct page *page = dev_alloc_page();
2957
2958			if (!page)
2959				return NULL;
2960
2961			__free_page(entry->elements[i]);
2962			entry->elements[i] = page;
2963			QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
2964		}
2965	}
2966	list_del_init(&entry->list);
2967	return entry;
2968}
2969
2970static int qeth_init_input_buffer(struct qeth_card *card,
2971		struct qeth_qdio_buffer *buf)
2972{
2973	struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry;
2974	int i;
2975
2976	if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
2977		buf->rx_skb = netdev_alloc_skb(card->dev,
2978					       ETH_HLEN +
2979					       sizeof(struct ipv6hdr));
2980		if (!buf->rx_skb)
2981			return -ENOMEM;
2982	}
2983
2984	if (!pool_entry) {
2985		pool_entry = qeth_find_free_buffer_pool_entry(card);
2986		if (!pool_entry)
2987			return -ENOBUFS;
2988
2989		buf->pool_entry = pool_entry;
2990	}
2991
2992	/*
2993	 * since the buffer is accessed only from the input_tasklet
2994	 * there shouldn't be a need to synchronize; also, since we use
2995	 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run  out off
2996	 * buffers
2997	 */
2998	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2999		buf->buffer->element[i].length = PAGE_SIZE;
3000		buf->buffer->element[i].addr =
3001			page_to_phys(pool_entry->elements[i]);
3002		if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
3003			buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
3004		else
3005			buf->buffer->element[i].eflags = 0;
3006		buf->buffer->element[i].sflags = 0;
3007	}
3008	return 0;
3009}
3010
3011static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
3012					    struct qeth_qdio_out_q *queue)
3013{
3014	if (!IS_IQD(card) ||
3015	    qeth_iqd_is_mcast_queue(card, queue) ||
3016	    card->options.cq == QETH_CQ_ENABLED ||
3017	    qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
3018		return 1;
3019
3020	return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
3021}
3022
3023static int qeth_init_qdio_queues(struct qeth_card *card)
3024{
3025	unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count;
3026	unsigned int i;
3027	int rc;
3028
3029	QETH_CARD_TEXT(card, 2, "initqdqs");
3030
3031	/* inbound queue */
3032	qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
3033	memset(&card->rx, 0, sizeof(struct qeth_rx));
3034
3035	qeth_initialize_working_pool_list(card);
3036	/*give only as many buffers to hardware as we have buffer pool entries*/
3037	for (i = 0; i < rx_bufs; i++) {
3038		rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
3039		if (rc)
3040			return rc;
3041	}
3042
3043	card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs);
3044	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs);
3045	if (rc) {
3046		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
3047		return rc;
3048	}
3049
3050	/* completion */
3051	rc = qeth_cq_init(card);
3052	if (rc) {
3053		return rc;
3054	}
3055
3056	/* outbound queue */
3057	for (i = 0; i < card->qdio.no_out_queues; ++i) {
3058		struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];
3059
3060		qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
3061		queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
3062		queue->next_buf_to_fill = 0;
3063		queue->do_pack = 0;
3064		queue->prev_hdr = NULL;
3065		queue->coalesced_frames = 0;
3066		queue->bulk_start = 0;
3067		queue->bulk_count = 0;
3068		queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
3069		atomic_set(&queue->used_buffers, 0);
3070		atomic_set(&queue->set_pci_flags_count, 0);
3071		atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3072		netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
3073	}
3074	return 0;
3075}
3076
3077static void qeth_ipa_finalize_cmd(struct qeth_card *card,
3078				  struct qeth_cmd_buffer *iob)
3079{
3080	qeth_mpc_finalize_cmd(card, iob);
3081
3082	/* override with IPA-specific values: */
3083	__ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++;
3084}
3085
3086void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
3087			  u16 cmd_length,
3088			  bool (*match)(struct qeth_cmd_buffer *iob,
3089					struct qeth_cmd_buffer *reply))
3090{
3091	u8 prot_type = qeth_mpc_select_prot_type(card);
3092	u16 total_length = iob->length;
3093
3094	qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
3095		       iob->data);
3096	iob->finalize = qeth_ipa_finalize_cmd;
3097	iob->match = match;
3098
3099	memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
3100	memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
3101	memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
3102	memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2);
3103	memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2);
3104	memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
3105	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
3106	memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
3107}
3108EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
3109
3110static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob,
3111				 struct qeth_cmd_buffer *reply)
3112{
3113	struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply);
3114
3115	return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno);
3116}
3117
3118struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
3119					   enum qeth_ipa_cmds cmd_code,
3120					   enum qeth_prot_versions prot,
3121					   unsigned int data_length)
3122{
3123	struct qeth_cmd_buffer *iob;
3124	struct qeth_ipacmd_hdr *hdr;
3125
3126	data_length += offsetof(struct qeth_ipa_cmd, data);
3127	iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1,
3128			     QETH_IPA_TIMEOUT);
3129	if (!iob)
3130		return NULL;
3131
3132	qeth_prepare_ipa_cmd(card, iob, data_length, qeth_ipa_match_reply);
 
3133
3134	hdr = &__ipa_cmd(iob)->hdr;
3135	hdr->command = cmd_code;
3136	hdr->initiator = IPA_CMD_INITIATOR_HOST;
3137	/* hdr->seqno is set by qeth_send_control_data() */
3138	hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH;
3139	hdr->rel_adapter_no = (u8) card->dev->dev_port;
3140	hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
3141	hdr->param_count = 1;
3142	hdr->prot_version = prot;
3143	return iob;
3144}
3145EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd);
3146
3147static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
3148				struct qeth_reply *reply, unsigned long data)
3149{
3150	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3151
3152	return (cmd->hdr.return_code) ? -EIO : 0;
3153}
3154
3155/**
3156 * qeth_send_ipa_cmd() - send an IPA command
3157 *
3158 * See qeth_send_control_data() for explanation of the arguments.
3159 */
3160
3161int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
3162		int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
3163			unsigned long),
3164		void *reply_param)
3165{
3166	int rc;
3167
3168	QETH_CARD_TEXT(card, 4, "sendipa");
3169
3170	if (card->read_or_write_problem) {
3171		qeth_put_cmd(iob);
3172		return -EIO;
3173	}
3174
3175	if (reply_cb == NULL)
3176		reply_cb = qeth_send_ipa_cmd_cb;
3177	rc = qeth_send_control_data(card, iob, reply_cb, reply_param);
3178	if (rc == -ETIME) {
3179		qeth_clear_ipacmd_list(card);
3180		qeth_schedule_recovery(card);
3181	}
3182	return rc;
3183}
3184EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
3185
3186static int qeth_send_startlan_cb(struct qeth_card *card,
3187				 struct qeth_reply *reply, unsigned long data)
3188{
3189	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3190
3191	if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE)
3192		return -ENETDOWN;
3193
3194	return (cmd->hdr.return_code) ? -EIO : 0;
3195}
3196
3197static int qeth_send_startlan(struct qeth_card *card)
3198{
3199	struct qeth_cmd_buffer *iob;
3200
3201	QETH_CARD_TEXT(card, 2, "strtlan");
3202
3203	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0);
3204	if (!iob)
3205		return -ENOMEM;
3206	return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
3207}
3208
3209static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
3210{
3211	if (!cmd->hdr.return_code)
3212		cmd->hdr.return_code =
3213			cmd->data.setadapterparms.hdr.return_code;
3214	return cmd->hdr.return_code;
3215}
3216
3217static int qeth_query_setadapterparms_cb(struct qeth_card *card,
3218		struct qeth_reply *reply, unsigned long data)
3219{
3220	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3221	struct qeth_query_cmds_supp *query_cmd;
3222
3223	QETH_CARD_TEXT(card, 3, "quyadpcb");
3224	if (qeth_setadpparms_inspect_rc(cmd))
3225		return -EIO;
3226
3227	query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp;
3228	if (query_cmd->lan_type & 0x7f) {
3229		if (!qeth_is_supported_link_type(card, query_cmd->lan_type))
3230			return -EPROTONOSUPPORT;
3231
3232		card->info.link_type = query_cmd->lan_type;
3233		QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
3234	}
3235
3236	card->options.adp.supported = query_cmd->supported_cmds;
3237	return 0;
3238}
3239
3240static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
3241						    enum qeth_ipa_setadp_cmd adp_cmd,
3242						    unsigned int data_length)
3243{
3244	struct qeth_ipacmd_setadpparms_hdr *hdr;
3245	struct qeth_cmd_buffer *iob;
3246
3247	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4,
3248				 data_length +
3249				 offsetof(struct qeth_ipacmd_setadpparms,
3250					  data));
3251	if (!iob)
3252		return NULL;
3253
3254	hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr;
3255	hdr->cmdlength = sizeof(*hdr) + data_length;
3256	hdr->command_code = adp_cmd;
3257	hdr->used_total = 1;
3258	hdr->seq_no = 1;
3259	return iob;
3260}
3261
3262static int qeth_query_setadapterparms(struct qeth_card *card)
3263{
3264	int rc;
3265	struct qeth_cmd_buffer *iob;
3266
3267	QETH_CARD_TEXT(card, 3, "queryadp");
3268	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
3269				   SETADP_DATA_SIZEOF(query_cmds_supp));
3270	if (!iob)
3271		return -ENOMEM;
3272	rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
3273	return rc;
3274}
3275
3276static int qeth_query_ipassists_cb(struct qeth_card *card,
3277		struct qeth_reply *reply, unsigned long data)
3278{
3279	struct qeth_ipa_cmd *cmd;
3280
3281	QETH_CARD_TEXT(card, 2, "qipasscb");
3282
3283	cmd = (struct qeth_ipa_cmd *) data;
3284
3285	switch (cmd->hdr.return_code) {
3286	case IPA_RC_SUCCESS:
3287		break;
3288	case IPA_RC_NOTSUPP:
3289	case IPA_RC_L2_UNSUPPORTED_CMD:
3290		QETH_CARD_TEXT(card, 2, "ipaunsup");
3291		card->options.ipa4.supported |= IPA_SETADAPTERPARMS;
3292		card->options.ipa6.supported |= IPA_SETADAPTERPARMS;
3293		return -EOPNOTSUPP;
3294	default:
3295		QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
3296				 CARD_DEVID(card), cmd->hdr.return_code);
3297		return -EIO;
3298	}
3299
3300	if (cmd->hdr.prot_version == QETH_PROT_IPV4)
3301		card->options.ipa4 = cmd->hdr.assists;
3302	else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
3303		card->options.ipa6 = cmd->hdr.assists;
3304	else
3305		QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
3306				 CARD_DEVID(card));
3307	return 0;
3308}
3309
3310static int qeth_query_ipassists(struct qeth_card *card,
3311				enum qeth_prot_versions prot)
3312{
3313	int rc;
3314	struct qeth_cmd_buffer *iob;
3315
3316	QETH_CARD_TEXT_(card, 2, "qipassi%i", prot);
3317	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0);
3318	if (!iob)
3319		return -ENOMEM;
3320	rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
3321	return rc;
3322}
3323
3324static int qeth_query_switch_attributes_cb(struct qeth_card *card,
3325				struct qeth_reply *reply, unsigned long data)
3326{
3327	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3328	struct qeth_query_switch_attributes *attrs;
3329	struct qeth_switch_info *sw_info;
3330
3331	QETH_CARD_TEXT(card, 2, "qswiatcb");
3332	if (qeth_setadpparms_inspect_rc(cmd))
3333		return -EIO;
3334
3335	sw_info = (struct qeth_switch_info *)reply->param;
3336	attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
3337	sw_info->capabilities = attrs->capabilities;
3338	sw_info->settings = attrs->settings;
3339	QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
3340			sw_info->settings);
3341	return 0;
3342}
3343
3344int qeth_query_switch_attributes(struct qeth_card *card,
3345				 struct qeth_switch_info *sw_info)
3346{
3347	struct qeth_cmd_buffer *iob;
3348
3349	QETH_CARD_TEXT(card, 2, "qswiattr");
3350	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
3351		return -EOPNOTSUPP;
3352	if (!netif_carrier_ok(card->dev))
3353		return -ENOMEDIUM;
3354	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0);
3355	if (!iob)
3356		return -ENOMEM;
3357	return qeth_send_ipa_cmd(card, iob,
3358				qeth_query_switch_attributes_cb, sw_info);
3359}
3360
3361struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
3362					  enum qeth_diags_cmds sub_cmd,
3363					  unsigned int data_length)
3364{
3365	struct qeth_ipacmd_diagass *cmd;
3366	struct qeth_cmd_buffer *iob;
3367
3368	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE,
3369				 DIAG_HDR_LEN + data_length);
3370	if (!iob)
3371		return NULL;
3372
3373	cmd = &__ipa_cmd(iob)->data.diagass;
3374	cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length;
3375	cmd->subcmd = sub_cmd;
3376	return iob;
3377}
3378EXPORT_SYMBOL_GPL(qeth_get_diag_cmd);
3379
3380static int qeth_query_setdiagass_cb(struct qeth_card *card,
3381		struct qeth_reply *reply, unsigned long data)
3382{
3383	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3384	u16 rc = cmd->hdr.return_code;
3385
3386	if (rc) {
3387		QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
3388		return -EIO;
3389	}
3390
3391	card->info.diagass_support = cmd->data.diagass.ext;
3392	return 0;
3393}
3394
3395static int qeth_query_setdiagass(struct qeth_card *card)
3396{
3397	struct qeth_cmd_buffer *iob;
3398
3399	QETH_CARD_TEXT(card, 2, "qdiagass");
3400	iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0);
3401	if (!iob)
3402		return -ENOMEM;
3403	return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
3404}
3405
3406static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
3407{
3408	unsigned long info = get_zeroed_page(GFP_KERNEL);
3409	struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
3410	struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
3411	struct ccw_dev_id ccwid;
3412	int level;
3413
3414	tid->chpid = card->info.chpid;
3415	ccw_device_get_id(CARD_RDEV(card), &ccwid);
3416	tid->ssid = ccwid.ssid;
3417	tid->devno = ccwid.devno;
3418	if (!info)
3419		return;
3420	level = stsi(NULL, 0, 0, 0);
3421	if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
3422		tid->lparnr = info222->lpar_number;
3423	if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
3424		EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
3425		memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
3426	}
3427	free_page(info);
3428	return;
3429}
3430
3431static int qeth_hw_trap_cb(struct qeth_card *card,
3432		struct qeth_reply *reply, unsigned long data)
3433{
3434	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3435	u16 rc = cmd->hdr.return_code;
3436
3437	if (rc) {
3438		QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
3439		return -EIO;
3440	}
3441	return 0;
3442}
3443
3444int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
3445{
3446	struct qeth_cmd_buffer *iob;
3447	struct qeth_ipa_cmd *cmd;
3448
3449	QETH_CARD_TEXT(card, 2, "diagtrap");
3450	iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64);
3451	if (!iob)
3452		return -ENOMEM;
3453	cmd = __ipa_cmd(iob);
3454	cmd->data.diagass.type = 1;
3455	cmd->data.diagass.action = action;
3456	switch (action) {
3457	case QETH_DIAGS_TRAP_ARM:
3458		cmd->data.diagass.options = 0x0003;
3459		cmd->data.diagass.ext = 0x00010000 +
3460			sizeof(struct qeth_trap_id);
3461		qeth_get_trap_id(card,
3462			(struct qeth_trap_id *)cmd->data.diagass.cdata);
3463		break;
3464	case QETH_DIAGS_TRAP_DISARM:
3465		cmd->data.diagass.options = 0x0001;
3466		break;
3467	case QETH_DIAGS_TRAP_CAPTURE:
3468		break;
3469	}
3470	return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
3471}
3472
3473static int qeth_check_qdio_errors(struct qeth_card *card,
3474				  struct qdio_buffer *buf,
3475				  unsigned int qdio_error,
3476				  const char *dbftext)
3477{
3478	if (qdio_error) {
3479		QETH_CARD_TEXT(card, 2, dbftext);
3480		QETH_CARD_TEXT_(card, 2, " F15=%02X",
3481			       buf->element[15].sflags);
3482		QETH_CARD_TEXT_(card, 2, " F14=%02X",
3483			       buf->element[14].sflags);
3484		QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
3485		if ((buf->element[15].sflags) == 0x12) {
3486			QETH_CARD_STAT_INC(card, rx_fifo_errors);
3487			return 0;
3488		} else
3489			return 1;
3490	}
3491	return 0;
3492}
3493
3494static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
3495					 unsigned int count)
3496{
3497	struct qeth_qdio_q *queue = card->qdio.in_q;
3498	struct list_head *lh;
3499	int i;
3500	int rc;
3501	int newcount = 0;
3502
3503	/* only requeue at a certain threshold to avoid SIGAs */
3504	if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
3505		for (i = queue->next_buf_to_init;
3506		     i < queue->next_buf_to_init + count; ++i) {
3507			if (qeth_init_input_buffer(card,
3508				&queue->bufs[QDIO_BUFNR(i)])) {
3509				break;
3510			} else {
3511				newcount++;
3512			}
3513		}
3514
3515		if (newcount < count) {
3516			/* we are in memory shortage so we switch back to
3517			   traditional skb allocation and drop packages */
3518			atomic_set(&card->force_alloc_skb, 3);
3519			count = newcount;
3520		} else {
3521			atomic_add_unless(&card->force_alloc_skb, -1, 0);
3522		}
3523
3524		if (!count) {
3525			i = 0;
3526			list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
3527				i++;
3528			if (i == card->qdio.in_buf_pool.buf_count) {
3529				QETH_CARD_TEXT(card, 2, "qsarbw");
3530				schedule_delayed_work(
3531					&card->buffer_reclaim_work,
3532					QETH_RECLAIM_WORK_TIME);
3533			}
3534			return 0;
3535		}
3536
3537		rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
3538			     queue->next_buf_to_init, count);
 
3539		if (rc) {
3540			QETH_CARD_TEXT(card, 2, "qinberr");
3541		}
3542		queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init +
3543						     count);
3544		return count;
3545	}
3546
3547	return 0;
3548}
3549
3550static void qeth_buffer_reclaim_work(struct work_struct *work)
3551{
3552	struct qeth_card *card = container_of(work, struct qeth_card,
3553		buffer_reclaim_work.work);
 
3554
3555	local_bh_disable();
3556	napi_schedule(&card->napi);
3557	/* kick-start the NAPI softirq: */
3558	local_bh_enable();
3559}
3560
3561static void qeth_handle_send_error(struct qeth_card *card,
3562		struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
3563{
3564	int sbalf15 = buffer->buffer->element[15].sflags;
3565
3566	QETH_CARD_TEXT(card, 6, "hdsnderr");
3567	qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
3568
3569	if (!qdio_err)
3570		return;
3571
3572	if ((sbalf15 >= 15) && (sbalf15 <= 31))
3573		return;
3574
3575	QETH_CARD_TEXT(card, 1, "lnkfail");
3576	QETH_CARD_TEXT_(card, 1, "%04x %02x",
3577		       (u16)qdio_err, (u8)sbalf15);
3578}
3579
3580/**
3581 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
3582 * @queue: queue to check for packing buffer
3583 *
3584 * Returns number of buffers that were prepared for flush.
3585 */
3586static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
3587{
3588	struct qeth_qdio_out_buffer *buffer;
3589
3590	buffer = queue->bufs[queue->next_buf_to_fill];
3591	if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
3592	    (buffer->next_element_to_fill > 0)) {
3593		/* it's a packing buffer */
3594		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3595		queue->next_buf_to_fill =
3596			QDIO_BUFNR(queue->next_buf_to_fill + 1);
3597		return 1;
3598	}
3599	return 0;
3600}
3601
3602/*
3603 * Switched to packing state if the number of used buffers on a queue
3604 * reaches a certain limit.
3605 */
3606static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
3607{
3608	if (!queue->do_pack) {
3609		if (atomic_read(&queue->used_buffers)
3610		    >= QETH_HIGH_WATERMARK_PACK){
3611			/* switch non-PACKING -> PACKING */
3612			QETH_CARD_TEXT(queue->card, 6, "np->pack");
3613			QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3614			queue->do_pack = 1;
3615		}
3616	}
3617}
3618
3619/*
3620 * Switches from packing to non-packing mode. If there is a packing
3621 * buffer on the queue this buffer will be prepared to be flushed.
3622 * In that case 1 is returned to inform the caller. If no buffer
3623 * has to be flushed, zero is returned.
3624 */
3625static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
3626{
3627	if (queue->do_pack) {
3628		if (atomic_read(&queue->used_buffers)
3629		    <= QETH_LOW_WATERMARK_PACK) {
3630			/* switch PACKING -> non-PACKING */
3631			QETH_CARD_TEXT(queue->card, 6, "pack->np");
3632			QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3633			queue->do_pack = 0;
3634			return qeth_prep_flush_pack_buffer(queue);
3635		}
3636	}
3637	return 0;
3638}
3639
3640static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
3641			       int count)
3642{
3643	struct qeth_qdio_out_buffer *buf = queue->bufs[index];
3644	unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
3645	struct qeth_card *card = queue->card;
 
 
3646	int rc;
3647	int i;
3648
3649	for (i = index; i < index + count; ++i) {
3650		unsigned int bidx = QDIO_BUFNR(i);
3651		struct sk_buff *skb;
3652
3653		buf = queue->bufs[bidx];
3654		buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
3655				SBAL_EFLAGS_LAST_ENTRY;
3656		queue->coalesced_frames += buf->frames;
3657
3658		if (queue->bufstates)
3659			queue->bufstates[bidx].user = buf;
3660
3661		if (IS_IQD(card)) {
3662			skb_queue_walk(&buf->skb_list, skb)
3663				skb_tx_timestamp(skb);
3664		}
3665	}
3666
3667	if (!IS_IQD(card)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3668		if (!queue->do_pack) {
3669			if ((atomic_read(&queue->used_buffers) >=
3670				(QETH_HIGH_WATERMARK_PACK -
3671				 QETH_WATERMARK_PACK_FUZZ)) &&
3672			    !atomic_read(&queue->set_pci_flags_count)) {
3673				/* it's likely that we'll go to packing
3674				 * mode soon */
3675				atomic_inc(&queue->set_pci_flags_count);
3676				buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3677			}
3678		} else {
3679			if (!atomic_read(&queue->set_pci_flags_count)) {
3680				/*
3681				 * there's no outstanding PCI any more, so we
3682				 * have to request a PCI to be sure the the PCI
3683				 * will wake at some time in the future then we
3684				 * can flush packed buffers that might still be
3685				 * hanging around, which can happen if no
3686				 * further send was requested by the stack
3687				 */
3688				atomic_inc(&queue->set_pci_flags_count);
3689				buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3690			}
3691		}
3692
3693		if (atomic_read(&queue->set_pci_flags_count))
3694			qdio_flags |= QDIO_FLAG_PCI_OUT;
3695	}
3696
3697	QETH_TXQ_STAT_INC(queue, doorbell);
3698	rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
3699		     queue->queue_no, index, count);
3700
3701	/* Fake the TX completion interrupt: */
3702	if (IS_IQD(card)) {
3703		unsigned int frames = READ_ONCE(queue->max_coalesced_frames);
3704		unsigned int usecs = READ_ONCE(queue->coalesce_usecs);
 
 
 
 
3705
3706		if (frames && queue->coalesced_frames >= frames) {
3707			napi_schedule(&queue->napi);
3708			queue->coalesced_frames = 0;
3709			QETH_TXQ_STAT_INC(queue, coal_frames);
 
 
 
 
 
3710		} else if (usecs) {
3711			qeth_tx_arm_timer(queue, usecs);
3712		}
3713	}
3714
3715	if (rc) {
3716		/* ignore temporary SIGA errors without busy condition */
3717		if (rc == -ENOBUFS)
3718			return;
3719		QETH_CARD_TEXT(queue->card, 2, "flushbuf");
3720		QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
3721		QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
3722		QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
3723		QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
3724
3725		/* this must not happen under normal circumstances. if it
3726		 * happens something is really wrong -> recover */
3727		qeth_schedule_recovery(queue->card);
3728		return;
3729	}
3730}
3731
3732static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
3733{
3734	qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
3735
3736	queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
3737	queue->prev_hdr = NULL;
3738	queue->bulk_count = 0;
3739}
3740
3741static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
3742{
3743	int index;
3744	int flush_cnt = 0;
3745	int q_was_packing = 0;
3746
3747	/*
3748	 * check if weed have to switch to non-packing mode or if
3749	 * we have to get a pci flag out on the queue
3750	 */
3751	if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
3752	    !atomic_read(&queue->set_pci_flags_count)) {
3753		if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
3754				QETH_OUT_Q_UNLOCKED) {
3755			/*
3756			 * If we get in here, there was no action in
3757			 * do_send_packet. So, we check if there is a
3758			 * packing buffer to be flushed here.
3759			 */
3760			index = queue->next_buf_to_fill;
3761			q_was_packing = queue->do_pack;
3762			/* queue->do_pack may change */
3763			barrier();
3764			flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
3765			if (!flush_cnt &&
3766			    !atomic_read(&queue->set_pci_flags_count))
3767				flush_cnt += qeth_prep_flush_pack_buffer(queue);
3768			if (q_was_packing)
3769				QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
3770			if (flush_cnt)
3771				qeth_flush_buffers(queue, index, flush_cnt);
3772			atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3773		}
 
 
3774	}
3775}
3776
3777static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
3778{
3779	struct qeth_card *card = (struct qeth_card *)card_ptr;
3780
3781	napi_schedule_irqoff(&card->napi);
3782}
3783
3784int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
3785{
3786	int rc;
3787
3788	if (card->options.cq ==  QETH_CQ_NOTAVAILABLE) {
3789		rc = -1;
3790		goto out;
3791	} else {
3792		if (card->options.cq == cq) {
3793			rc = 0;
3794			goto out;
3795		}
3796
3797		qeth_free_qdio_queues(card);
3798		card->options.cq = cq;
3799		rc = 0;
3800	}
3801out:
3802	return rc;
3803
3804}
3805EXPORT_SYMBOL_GPL(qeth_configure_cq);
3806
 
 
 
 
 
 
 
 
 
 
 
 
3807static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
3808				 unsigned int queue, int first_element,
3809				 int count)
3810{
3811	struct qeth_qdio_q *cq = card->qdio.c_q;
3812	int i;
3813	int rc;
3814
3815	QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
3816	QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
3817	QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
3818
3819	if (qdio_err) {
3820		netif_tx_stop_all_queues(card->dev);
3821		qeth_schedule_recovery(card);
3822		return;
3823	}
3824
3825	for (i = first_element; i < first_element + count; ++i) {
3826		struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)];
3827		int e = 0;
3828
3829		while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
3830		       buffer->element[e].addr) {
3831			unsigned long phys_aob_addr = buffer->element[e].addr;
3832
3833			qeth_qdio_handle_aob(card, phys_aob_addr);
3834			++e;
3835		}
3836		qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
3837	}
3838	rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
3839		    card->qdio.c_q->next_buf_to_init,
3840		    count);
3841	if (rc) {
3842		dev_warn(&card->gdev->dev,
3843			"QDIO reported an error, rc=%i\n", rc);
3844		QETH_CARD_TEXT(card, 2, "qcqherr");
3845	}
3846
3847	cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count);
3848}
3849
3850static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
3851				    unsigned int qdio_err, int queue,
3852				    int first_elem, int count,
3853				    unsigned long card_ptr)
3854{
3855	struct qeth_card *card = (struct qeth_card *)card_ptr;
3856
3857	QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
3858	QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
3859
3860	if (qdio_err)
3861		qeth_schedule_recovery(card);
3862}
3863
3864static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3865				     unsigned int qdio_error, int __queue,
3866				     int first_element, int count,
3867				     unsigned long card_ptr)
3868{
3869	struct qeth_card *card        = (struct qeth_card *) card_ptr;
3870	struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
3871	struct net_device *dev = card->dev;
3872	struct netdev_queue *txq;
3873	int i;
3874
3875	QETH_CARD_TEXT(card, 6, "qdouhdl");
3876	if (qdio_error & QDIO_ERROR_FATAL) {
3877		QETH_CARD_TEXT(card, 2, "achkcond");
3878		netif_tx_stop_all_queues(dev);
3879		qeth_schedule_recovery(card);
3880		return;
3881	}
3882
3883	for (i = first_element; i < (first_element + count); ++i) {
3884		struct qeth_qdio_out_buffer *buf = queue->bufs[QDIO_BUFNR(i)];
3885
3886		qeth_handle_send_error(card, buf, qdio_error);
3887		qeth_clear_output_buffer(queue, buf, qdio_error, 0);
3888	}
3889
3890	atomic_sub(count, &queue->used_buffers);
3891	qeth_check_outbound_queue(queue);
3892
3893	txq = netdev_get_tx_queue(dev, __queue);
3894	/* xmit may have observed the full-condition, but not yet stopped the
3895	 * txq. In which case the code below won't trigger. So before returning,
3896	 * xmit will re-check the txq's fill level and wake it up if needed.
3897	 */
3898	if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue))
3899		netif_tx_wake_queue(txq);
3900}
3901
3902/**
3903 * Note: Function assumes that we have 4 outbound queues.
3904 */
3905int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
3906{
3907	struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
3908	u8 tos;
3909
3910	switch (card->qdio.do_prio_queueing) {
3911	case QETH_PRIO_Q_ING_TOS:
3912	case QETH_PRIO_Q_ING_PREC:
3913		switch (qeth_get_ip_version(skb)) {
3914		case 4:
3915			tos = ipv4_get_dsfield(ip_hdr(skb));
3916			break;
3917		case 6:
3918			tos = ipv6_get_dsfield(ipv6_hdr(skb));
3919			break;
3920		default:
3921			return card->qdio.default_out_queue;
3922		}
3923		if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
3924			return ~tos >> 6 & 3;
3925		if (tos & IPTOS_MINCOST)
3926			return 3;
3927		if (tos & IPTOS_RELIABILITY)
3928			return 2;
3929		if (tos & IPTOS_THROUGHPUT)
3930			return 1;
3931		if (tos & IPTOS_LOWDELAY)
3932			return 0;
3933		break;
3934	case QETH_PRIO_Q_ING_SKB:
3935		if (skb->priority > 5)
3936			return 0;
3937		return ~skb->priority >> 1 & 3;
3938	case QETH_PRIO_Q_ING_VLAN:
3939		if (veth->h_vlan_proto == htons(ETH_P_8021Q))
3940			return ~ntohs(veth->h_vlan_TCI) >>
3941			       (VLAN_PRIO_SHIFT + 1) & 3;
3942		break;
3943	case QETH_PRIO_Q_ING_FIXED:
3944		return card->qdio.default_out_queue;
3945	default:
3946		break;
3947	}
3948	return card->qdio.default_out_queue;
3949}
3950EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
3951
3952/**
3953 * qeth_get_elements_for_frags() -	find number of SBALEs for skb frags.
3954 * @skb:				SKB address
3955 *
3956 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3957 * fragmented part of the SKB. Returns zero for linear SKB.
3958 */
3959static int qeth_get_elements_for_frags(struct sk_buff *skb)
3960{
3961	int cnt, elements = 0;
3962
3963	for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3964		skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3965
3966		elements += qeth_get_elements_for_range(
3967			(addr_t)skb_frag_address(frag),
3968			(addr_t)skb_frag_address(frag) + skb_frag_size(frag));
3969	}
3970	return elements;
3971}
3972
3973/**
3974 * qeth_count_elements() -	Counts the number of QDIO buffer elements needed
3975 *				to transmit an skb.
3976 * @skb:			the skb to operate on.
3977 * @data_offset:		skip this part of the skb's linear data
3978 *
3979 * Returns the number of pages, and thus QDIO buffer elements, needed to map the
3980 * skb's data (both its linear part and paged fragments).
3981 */
3982unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset)
 
3983{
3984	unsigned int elements = qeth_get_elements_for_frags(skb);
3985	addr_t end = (addr_t)skb->data + skb_headlen(skb);
3986	addr_t start = (addr_t)skb->data + data_offset;
3987
3988	if (start != end)
3989		elements += qeth_get_elements_for_range(start, end);
3990	return elements;
3991}
3992EXPORT_SYMBOL_GPL(qeth_count_elements);
3993
3994#define QETH_HDR_CACHE_OBJ_SIZE		(sizeof(struct qeth_hdr_tso) + \
3995					 MAX_TCP_HEADER)
3996
3997/**
3998 * qeth_add_hw_header() - add a HW header to an skb.
 
3999 * @skb: skb that the HW header should be added to.
4000 * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
4001 *	 it contains a valid pointer to a qeth_hdr.
4002 * @hdr_len: length of the HW header.
4003 * @proto_len: length of protocol headers that need to be in same page as the
4004 *	       HW header.
 
4005 *
4006 * Returns the pushed length. If the header can't be pushed on
4007 * (eg. because it would cross a page boundary), it is allocated from
4008 * the cache instead and 0 is returned.
4009 * The number of needed buffer elements is returned in @elements.
4010 * Error to create the hdr is indicated by returning with < 0.
4011 */
4012static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
4013			      struct sk_buff *skb, struct qeth_hdr **hdr,
4014			      unsigned int hdr_len, unsigned int proto_len,
4015			      unsigned int *elements)
4016{
4017	gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
4018	const unsigned int contiguous = proto_len ? proto_len : 1;
4019	const unsigned int max_elements = queue->max_elements;
4020	unsigned int __elements;
4021	addr_t start, end;
4022	bool push_ok;
4023	int rc;
4024
4025check_layout:
4026	start = (addr_t)skb->data - hdr_len;
4027	end = (addr_t)skb->data;
4028
4029	if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
4030		/* Push HW header into same page as first protocol header. */
4031		push_ok = true;
4032		/* ... but TSO always needs a separate element for headers: */
4033		if (skb_is_gso(skb))
4034			__elements = 1 + qeth_count_elements(skb, proto_len);
4035		else
4036			__elements = qeth_count_elements(skb, 0);
4037	} else if (!proto_len && PAGE_ALIGNED(skb->data)) {
4038		/* Push HW header into preceding page, flush with skb->data. */
4039		push_ok = true;
4040		__elements = 1 + qeth_count_elements(skb, 0);
4041	} else {
4042		/* Use header cache, copy protocol headers up. */
4043		push_ok = false;
4044		__elements = 1 + qeth_count_elements(skb, proto_len);
4045	}
4046
4047	/* Compress skb to fit into one IO buffer: */
4048	if (__elements > max_elements) {
4049		if (!skb_is_nonlinear(skb)) {
4050			/* Drop it, no easy way of shrinking it further. */
4051			QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
4052					 max_elements, __elements, skb->len);
4053			return -E2BIG;
4054		}
4055
4056		rc = skb_linearize(skb);
4057		if (rc) {
4058			QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
4059			return rc;
4060		}
4061
4062		QETH_TXQ_STAT_INC(queue, skbs_linearized);
4063		/* Linearization changed the layout, re-evaluate: */
4064		goto check_layout;
4065	}
4066
4067	*elements = __elements;
4068	/* Add the header: */
4069	if (push_ok) {
4070		*hdr = skb_push(skb, hdr_len);
4071		return hdr_len;
4072	}
4073
4074	/* Fall back to cache element with known-good alignment: */
4075	if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
4076		return -E2BIG;
4077	*hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
4078	if (!*hdr)
4079		return -ENOMEM;
4080	/* Copy protocol headers behind HW header: */
4081	skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
4082	return 0;
4083}
4084
4085static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
4086			      struct sk_buff *curr_skb,
4087			      struct qeth_hdr *curr_hdr)
4088{
4089	struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
4090	struct qeth_hdr *prev_hdr = queue->prev_hdr;
4091
4092	if (!prev_hdr)
4093		return true;
4094
4095	/* All packets must have the same target: */
4096	if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
4097		struct sk_buff *prev_skb = skb_peek(&buffer->skb_list);
4098
4099		return ether_addr_equal(eth_hdr(prev_skb)->h_dest,
4100					eth_hdr(curr_skb)->h_dest) &&
4101		       qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2);
4102	}
4103
4104	return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) &&
4105	       qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
4106}
4107
4108/**
4109 * qeth_fill_buffer() - map skb into an output buffer
4110 * @buf:	buffer to transport the skb
4111 * @skb:	skb to map into the buffer
4112 * @hdr:	qeth_hdr for this skb. Either at skb->data, or allocated
4113 *		from qeth_core_header_cache.
4114 * @offset:	when mapping the skb, start at skb->data + offset
4115 * @hd_len:	if > 0, build a dedicated header element of this size
4116 */
4117static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
4118				     struct sk_buff *skb, struct qeth_hdr *hdr,
4119				     unsigned int offset, unsigned int hd_len)
4120{
4121	struct qdio_buffer *buffer = buf->buffer;
4122	int element = buf->next_element_to_fill;
4123	int length = skb_headlen(skb) - offset;
4124	char *data = skb->data + offset;
4125	unsigned int elem_length, cnt;
4126	bool is_first_elem = true;
4127
4128	__skb_queue_tail(&buf->skb_list, skb);
4129
4130	/* build dedicated element for HW Header */
4131	if (hd_len) {
4132		is_first_elem = false;
4133
4134		buffer->element[element].addr = virt_to_phys(hdr);
4135		buffer->element[element].length = hd_len;
4136		buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
4137
4138		/* HW header is allocated from cache: */
4139		if ((void *)hdr != skb->data)
4140			buf->is_header[element] = 1;
4141		/* HW header was pushed and is contiguous with linear part: */
4142		else if (length > 0 && !PAGE_ALIGNED(data) &&
4143			 (data == (char *)hdr + hd_len))
4144			buffer->element[element].eflags |=
4145				SBAL_EFLAGS_CONTIGUOUS;
4146
4147		element++;
4148	}
4149
4150	/* map linear part into buffer element(s) */
4151	while (length > 0) {
4152		elem_length = min_t(unsigned int, length,
4153				    PAGE_SIZE - offset_in_page(data));
4154
4155		buffer->element[element].addr = virt_to_phys(data);
4156		buffer->element[element].length = elem_length;
4157		length -= elem_length;
4158		if (is_first_elem) {
4159			is_first_elem = false;
4160			if (length || skb_is_nonlinear(skb))
4161				/* skb needs additional elements */
4162				buffer->element[element].eflags =
4163					SBAL_EFLAGS_FIRST_FRAG;
4164			else
4165				buffer->element[element].eflags = 0;
4166		} else {
4167			buffer->element[element].eflags =
4168				SBAL_EFLAGS_MIDDLE_FRAG;
4169		}
4170
4171		data += elem_length;
4172		element++;
4173	}
4174
4175	/* map page frags into buffer element(s) */
4176	for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
4177		skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
4178
4179		data = skb_frag_address(frag);
4180		length = skb_frag_size(frag);
4181		while (length > 0) {
4182			elem_length = min_t(unsigned int, length,
4183					    PAGE_SIZE - offset_in_page(data));
4184
4185			buffer->element[element].addr = virt_to_phys(data);
4186			buffer->element[element].length = elem_length;
4187			buffer->element[element].eflags =
4188				SBAL_EFLAGS_MIDDLE_FRAG;
4189
4190			length -= elem_length;
4191			data += elem_length;
4192			element++;
4193		}
4194	}
4195
4196	if (buffer->element[element - 1].eflags)
4197		buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
4198	buf->next_element_to_fill = element;
4199	return element;
4200}
4201
4202static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4203		       struct sk_buff *skb, unsigned int elements,
4204		       struct qeth_hdr *hdr, unsigned int offset,
4205		       unsigned int hd_len)
4206{
4207	unsigned int bytes = qdisc_pkt_len(skb);
4208	struct qeth_qdio_out_buffer *buffer;
4209	unsigned int next_element;
4210	struct netdev_queue *txq;
4211	bool stopped = false;
4212	bool flush;
4213
4214	buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
4215	txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
4216
4217	/* Just a sanity check, the wake/stop logic should ensure that we always
4218	 * get a free buffer.
4219	 */
4220	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4221		return -EBUSY;
4222
4223	flush = !qeth_iqd_may_bulk(queue, skb, hdr);
4224
4225	if (flush ||
4226	    (buffer->next_element_to_fill + elements > queue->max_elements)) {
4227		if (buffer->next_element_to_fill > 0) {
4228			atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4229			queue->bulk_count++;
4230		}
4231
4232		if (queue->bulk_count >= queue->bulk_max)
4233			flush = true;
4234
4235		if (flush)
4236			qeth_flush_queue(queue);
4237
4238		buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
4239						queue->bulk_count)];
4240
4241		/* Sanity-check again: */
4242		if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4243			return -EBUSY;
4244	}
4245
4246	if (buffer->next_element_to_fill == 0 &&
4247	    atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4248		/* If a TX completion happens right _here_ and misses to wake
4249		 * the txq, then our re-check below will catch the race.
4250		 */
4251		QETH_TXQ_STAT_INC(queue, stopped);
4252		netif_tx_stop_queue(txq);
4253		stopped = true;
4254	}
4255
4256	next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4257	buffer->bytes += bytes;
4258	buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4259	queue->prev_hdr = hdr;
4260
4261	flush = __netdev_tx_sent_queue(txq, bytes,
4262				       !stopped && netdev_xmit_more());
4263
4264	if (flush || next_element >= queue->max_elements) {
4265		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4266		queue->bulk_count++;
4267
4268		if (queue->bulk_count >= queue->bulk_max)
4269			flush = true;
4270
4271		if (flush)
4272			qeth_flush_queue(queue);
4273	}
4274
4275	if (stopped && !qeth_out_queue_is_full(queue))
4276		netif_tx_start_queue(txq);
4277	return 0;
4278}
4279
4280int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4281			struct sk_buff *skb, struct qeth_hdr *hdr,
4282			unsigned int offset, unsigned int hd_len,
4283			int elements_needed)
 
4284{
 
4285	struct qeth_qdio_out_buffer *buffer;
4286	unsigned int next_element;
4287	struct netdev_queue *txq;
4288	bool stopped = false;
4289	int start_index;
4290	int flush_count = 0;
4291	int do_pack = 0;
4292	int tmp;
4293	int rc = 0;
4294
4295	/* spin until we get the queue ... */
4296	while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
4297			      QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
4298	start_index = queue->next_buf_to_fill;
4299	buffer = queue->bufs[queue->next_buf_to_fill];
4300
4301	/* Just a sanity check, the wake/stop logic should ensure that we always
4302	 * get a free buffer.
4303	 */
4304	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
4305		atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
4306		return -EBUSY;
4307	}
4308
4309	txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
4310
4311	/* check if we need to switch packing state of this queue */
4312	qeth_switch_to_packing_if_needed(queue);
4313	if (queue->do_pack) {
4314		do_pack = 1;
4315		/* does packet fit in current buffer? */
4316		if (buffer->next_element_to_fill + elements_needed >
4317		    queue->max_elements) {
4318			/* ... no -> set state PRIMED */
4319			atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4320			flush_count++;
4321			queue->next_buf_to_fill =
4322				QDIO_BUFNR(queue->next_buf_to_fill + 1);
4323			buffer = queue->bufs[queue->next_buf_to_fill];
4324
4325			/* We stepped forward, so sanity-check again: */
4326			if (atomic_read(&buffer->state) !=
4327			    QETH_QDIO_BUF_EMPTY) {
4328				qeth_flush_buffers(queue, start_index,
4329							   flush_count);
4330				atomic_set(&queue->state,
4331						QETH_OUT_Q_UNLOCKED);
4332				rc = -EBUSY;
4333				goto out;
4334			}
4335		}
4336	}
4337
4338	if (buffer->next_element_to_fill == 0 &&
4339	    atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4340		/* If a TX completion happens right _here_ and misses to wake
4341		 * the txq, then our re-check below will catch the race.
4342		 */
4343		QETH_TXQ_STAT_INC(queue, stopped);
4344		netif_tx_stop_queue(txq);
4345		stopped = true;
4346	}
4347
4348	next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4349	buffer->bytes += qdisc_pkt_len(skb);
4350	buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4351
4352	if (queue->do_pack)
4353		QETH_TXQ_STAT_INC(queue, skbs_pack);
4354	if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
4355		flush_count++;
4356		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4357		queue->next_buf_to_fill =
4358				QDIO_BUFNR(queue->next_buf_to_fill + 1);
4359	}
4360
4361	if (flush_count)
4362		qeth_flush_buffers(queue, start_index, flush_count);
4363	else if (!atomic_read(&queue->set_pci_flags_count))
4364		atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
4365	/*
4366	 * queue->state will go from LOCKED -> UNLOCKED or from
4367	 * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
4368	 * (switch packing state or flush buffer to get another pci flag out).
4369	 * In that case we will enter this loop
4370	 */
4371	while (atomic_dec_return(&queue->state)) {
4372		start_index = queue->next_buf_to_fill;
4373		/* check if we can go back to non-packing state */
4374		tmp = qeth_switch_to_nonpacking_if_needed(queue);
4375		/*
4376		 * check if we need to flush a packing buffer to get a pci
4377		 * flag out on the queue
4378		 */
4379		if (!tmp && !atomic_read(&queue->set_pci_flags_count))
4380			tmp = qeth_prep_flush_pack_buffer(queue);
4381		if (tmp) {
4382			qeth_flush_buffers(queue, start_index, tmp);
4383			flush_count += tmp;
4384		}
4385	}
4386out:
4387	/* at this point the queue is UNLOCKED again */
4388	if (do_pack)
4389		QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
4390
4391	if (stopped && !qeth_out_queue_is_full(queue))
4392		netif_tx_start_queue(txq);
4393	return rc;
4394}
4395EXPORT_SYMBOL_GPL(qeth_do_send_packet);
4396
4397static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
4398			      unsigned int payload_len, struct sk_buff *skb,
4399			      unsigned int proto_len)
4400{
4401	struct qeth_hdr_ext_tso *ext = &hdr->ext;
4402
4403	ext->hdr_tot_len = sizeof(*ext);
4404	ext->imb_hdr_no = 1;
4405	ext->hdr_type = 1;
4406	ext->hdr_version = 1;
4407	ext->hdr_len = 28;
4408	ext->payload_len = payload_len;
4409	ext->mss = skb_shinfo(skb)->gso_size;
4410	ext->dg_hdr_len = proto_len;
4411}
4412
4413int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
4414	      struct qeth_qdio_out_q *queue, int ipv,
4415	      void (*fill_header)(struct qeth_qdio_out_q *queue,
4416				  struct qeth_hdr *hdr, struct sk_buff *skb,
4417				  int ipv, unsigned int data_len))
4418{
4419	unsigned int proto_len, hw_hdr_len;
4420	unsigned int frame_len = skb->len;
4421	bool is_tso = skb_is_gso(skb);
4422	unsigned int data_offset = 0;
4423	struct qeth_hdr *hdr = NULL;
4424	unsigned int hd_len = 0;
4425	unsigned int elements;
4426	int push_len, rc;
4427
4428	if (is_tso) {
4429		hw_hdr_len = sizeof(struct qeth_hdr_tso);
4430		proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4431	} else {
4432		hw_hdr_len = sizeof(struct qeth_hdr);
4433		proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
4434	}
4435
4436	rc = skb_cow_head(skb, hw_hdr_len);
4437	if (rc)
4438		return rc;
4439
4440	push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
4441				      &elements);
4442	if (push_len < 0)
4443		return push_len;
4444	if (is_tso || !push_len) {
4445		/* HW header needs its own buffer element. */
4446		hd_len = hw_hdr_len + proto_len;
4447		data_offset = push_len + proto_len;
4448	}
4449	memset(hdr, 0, hw_hdr_len);
4450	fill_header(queue, hdr, skb, ipv, frame_len);
4451	if (is_tso)
4452		qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
4453				  frame_len - proto_len, skb, proto_len);
4454
4455	if (IS_IQD(card)) {
4456		rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
4457				 hd_len);
4458	} else {
4459		/* TODO: drop skb_orphan() once TX completion is fast enough */
4460		skb_orphan(skb);
 
4461		rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
4462					 hd_len, elements);
 
4463	}
4464
4465	if (rc && !push_len)
4466		kmem_cache_free(qeth_core_header_cache, hdr);
4467
4468	return rc;
4469}
4470EXPORT_SYMBOL_GPL(qeth_xmit);
4471
4472static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
4473		struct qeth_reply *reply, unsigned long data)
4474{
4475	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4476	struct qeth_ipacmd_setadpparms *setparms;
4477
4478	QETH_CARD_TEXT(card, 4, "prmadpcb");
4479
4480	setparms = &(cmd->data.setadapterparms);
4481	if (qeth_setadpparms_inspect_rc(cmd)) {
4482		QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
4483		setparms->data.mode = SET_PROMISC_MODE_OFF;
4484	}
4485	card->info.promisc_mode = setparms->data.mode;
4486	return (cmd->hdr.return_code) ? -EIO : 0;
4487}
4488
4489void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable)
4490{
4491	enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON :
4492						    SET_PROMISC_MODE_OFF;
4493	struct qeth_cmd_buffer *iob;
4494	struct qeth_ipa_cmd *cmd;
4495
4496	QETH_CARD_TEXT(card, 4, "setprom");
4497	QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
4498
4499	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4500				   SETADP_DATA_SIZEOF(mode));
4501	if (!iob)
4502		return;
4503	cmd = __ipa_cmd(iob);
4504	cmd->data.setadapterparms.data.mode = mode;
4505	qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
4506}
4507EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
4508
4509static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
4510		struct qeth_reply *reply, unsigned long data)
4511{
4512	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4513	struct qeth_ipacmd_setadpparms *adp_cmd;
4514
4515	QETH_CARD_TEXT(card, 4, "chgmaccb");
4516	if (qeth_setadpparms_inspect_rc(cmd))
4517		return -EIO;
4518
4519	adp_cmd = &cmd->data.setadapterparms;
4520	if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr))
4521		return -EADDRNOTAVAIL;
4522
4523	if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
4524	    !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
4525		return -EADDRNOTAVAIL;
4526
4527	ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
4528	return 0;
4529}
4530
4531int qeth_setadpparms_change_macaddr(struct qeth_card *card)
4532{
4533	int rc;
4534	struct qeth_cmd_buffer *iob;
4535	struct qeth_ipa_cmd *cmd;
4536
4537	QETH_CARD_TEXT(card, 4, "chgmac");
4538
4539	iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4540				   SETADP_DATA_SIZEOF(change_addr));
4541	if (!iob)
4542		return -ENOMEM;
4543	cmd = __ipa_cmd(iob);
4544	cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
4545	cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
4546	ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
4547			card->dev->dev_addr);
4548	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
4549			       NULL);
4550	return rc;
4551}
4552EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
4553
4554static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4555		struct qeth_reply *reply, unsigned long data)
4556{
4557	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4558	struct qeth_set_access_ctrl *access_ctrl_req;
4559
4560	QETH_CARD_TEXT(card, 4, "setaccb");
4561
4562	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4563	QETH_CARD_TEXT_(card, 2, "rc=%d",
4564			cmd->data.setadapterparms.hdr.return_code);
4565	if (cmd->data.setadapterparms.hdr.return_code !=
4566						SET_ACCESS_CTRL_RC_SUCCESS)
4567		QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
4568				 access_ctrl_req->subcmd_code, CARD_DEVID(card),
4569				 cmd->data.setadapterparms.hdr.return_code);
4570	switch (qeth_setadpparms_inspect_rc(cmd)) {
4571	case SET_ACCESS_CTRL_RC_SUCCESS:
4572		if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE)
4573			dev_info(&card->gdev->dev,
4574			    "QDIO data connection isolation is deactivated\n");
4575		else
4576			dev_info(&card->gdev->dev,
4577			    "QDIO data connection isolation is activated\n");
4578		return 0;
4579	case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
4580		QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
4581				 CARD_DEVID(card));
4582		return 0;
4583	case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
4584		QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
4585				 CARD_DEVID(card));
4586		return 0;
4587	case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
4588		dev_err(&card->gdev->dev, "Adapter does not "
4589			"support QDIO data connection isolation\n");
4590		return -EOPNOTSUPP;
4591	case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
4592		dev_err(&card->gdev->dev,
4593			"Adapter is dedicated. "
4594			"QDIO data connection isolation not supported\n");
4595		return -EOPNOTSUPP;
4596	case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
4597		dev_err(&card->gdev->dev,
4598			"TSO does not permit QDIO data connection isolation\n");
4599		return -EPERM;
4600	case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
4601		dev_err(&card->gdev->dev, "The adjacent switch port does not "
4602			"support reflective relay mode\n");
4603		return -EOPNOTSUPP;
4604	case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
4605		dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
4606					"enabled at the adjacent switch port");
4607		return -EREMOTEIO;
4608	case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
4609		dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
4610					"at the adjacent switch failed\n");
4611		/* benign error while disabling ISOLATION_MODE_FWD */
4612		return 0;
4613	default:
4614		return -EIO;
4615	}
4616}
4617
4618int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
4619				     enum qeth_ipa_isolation_modes mode)
4620{
4621	int rc;
4622	struct qeth_cmd_buffer *iob;
4623	struct qeth_ipa_cmd *cmd;
4624	struct qeth_set_access_ctrl *access_ctrl_req;
4625
4626	QETH_CARD_TEXT(card, 4, "setacctl");
4627
4628	if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
4629		dev_err(&card->gdev->dev,
4630			"Adapter does not support QDIO data connection isolation\n");
4631		return -EOPNOTSUPP;
4632	}
4633
4634	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
4635				   SETADP_DATA_SIZEOF(set_access_ctrl));
4636	if (!iob)
4637		return -ENOMEM;
4638	cmd = __ipa_cmd(iob);
4639	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4640	access_ctrl_req->subcmd_code = mode;
4641
4642	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
4643			       NULL);
4644	if (rc) {
4645		QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
4646		QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
4647				 rc, CARD_DEVID(card));
4648	}
4649
4650	return rc;
4651}
4652
4653void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue)
4654{
4655	struct qeth_card *card;
4656
4657	card = dev->ml_priv;
4658	QETH_CARD_TEXT(card, 4, "txtimeo");
4659	qeth_schedule_recovery(card);
4660}
4661EXPORT_SYMBOL_GPL(qeth_tx_timeout);
4662
4663static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4664{
4665	struct qeth_card *card = dev->ml_priv;
4666	int rc = 0;
4667
4668	switch (regnum) {
4669	case MII_BMCR: /* Basic mode control register */
4670		rc = BMCR_FULLDPLX;
4671		if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
4672		    (card->info.link_type != QETH_LINK_TYPE_OSN) &&
4673		    (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
4674		    (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
4675			rc |= BMCR_SPEED100;
4676		break;
4677	case MII_BMSR: /* Basic mode status register */
4678		rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
4679		     BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
4680		     BMSR_100BASE4;
4681		break;
4682	case MII_PHYSID1: /* PHYS ID 1 */
4683		rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
4684		     dev->dev_addr[2];
4685		rc = (rc >> 5) & 0xFFFF;
4686		break;
4687	case MII_PHYSID2: /* PHYS ID 2 */
4688		rc = (dev->dev_addr[2] << 10) & 0xFFFF;
4689		break;
4690	case MII_ADVERTISE: /* Advertisement control reg */
4691		rc = ADVERTISE_ALL;
4692		break;
4693	case MII_LPA: /* Link partner ability reg */
4694		rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
4695		     LPA_100BASE4 | LPA_LPACK;
4696		break;
4697	case MII_EXPANSION: /* Expansion register */
4698		break;
4699	case MII_DCOUNTER: /* disconnect counter */
4700		break;
4701	case MII_FCSCOUNTER: /* false carrier counter */
4702		break;
4703	case MII_NWAYTEST: /* N-way auto-neg test register */
4704		break;
4705	case MII_RERRCOUNTER: /* rx error counter */
4706		rc = card->stats.rx_length_errors +
4707		     card->stats.rx_frame_errors +
4708		     card->stats.rx_fifo_errors;
4709		break;
4710	case MII_SREVISION: /* silicon revision */
4711		break;
4712	case MII_RESV1: /* reserved 1 */
4713		break;
4714	case MII_LBRERROR: /* loopback, rx, bypass error */
4715		break;
4716	case MII_PHYADDR: /* physical address */
4717		break;
4718	case MII_RESV2: /* reserved 2 */
4719		break;
4720	case MII_TPISTATUS: /* TPI status for 10mbps */
4721		break;
4722	case MII_NCONFIG: /* network interface config */
4723		break;
4724	default:
4725		break;
4726	}
4727	return rc;
4728}
4729
4730static int qeth_snmp_command_cb(struct qeth_card *card,
4731				struct qeth_reply *reply, unsigned long data)
4732{
4733	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4734	struct qeth_arp_query_info *qinfo = reply->param;
4735	struct qeth_ipacmd_setadpparms *adp_cmd;
4736	unsigned int data_len;
4737	void *snmp_data;
4738
4739	QETH_CARD_TEXT(card, 3, "snpcmdcb");
4740
4741	if (cmd->hdr.return_code) {
4742		QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
4743		return -EIO;
4744	}
4745	if (cmd->data.setadapterparms.hdr.return_code) {
4746		cmd->hdr.return_code =
4747			cmd->data.setadapterparms.hdr.return_code;
4748		QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
4749		return -EIO;
4750	}
4751
4752	adp_cmd = &cmd->data.setadapterparms;
4753	data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr);
4754	if (adp_cmd->hdr.seq_no == 1) {
4755		snmp_data = &adp_cmd->data.snmp;
4756	} else {
4757		snmp_data = &adp_cmd->data.snmp.request;
4758		data_len -= offsetof(struct qeth_snmp_cmd, request);
4759	}
4760
4761	/* check if there is enough room in userspace */
4762	if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4763		QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC);
4764		return -ENOSPC;
4765	}
4766	QETH_CARD_TEXT_(card, 4, "snore%i",
4767			cmd->data.setadapterparms.hdr.used_total);
4768	QETH_CARD_TEXT_(card, 4, "sseqn%i",
4769			cmd->data.setadapterparms.hdr.seq_no);
4770	/*copy entries to user buffer*/
4771	memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
4772	qinfo->udata_offset += data_len;
4773
4774	if (cmd->data.setadapterparms.hdr.seq_no <
4775	    cmd->data.setadapterparms.hdr.used_total)
4776		return 1;
4777	return 0;
4778}
4779
4780static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4781{
4782	struct qeth_snmp_ureq __user *ureq;
4783	struct qeth_cmd_buffer *iob;
4784	unsigned int req_len;
4785	struct qeth_arp_query_info qinfo = {0, };
4786	int rc = 0;
4787
4788	QETH_CARD_TEXT(card, 3, "snmpcmd");
4789
4790	if (IS_VM_NIC(card))
4791		return -EOPNOTSUPP;
4792
4793	if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
4794	    IS_LAYER3(card))
4795		return -EOPNOTSUPP;
4796
4797	ureq = (struct qeth_snmp_ureq __user *) udata;
4798	if (get_user(qinfo.udata_len, &ureq->hdr.data_len) ||
4799	    get_user(req_len, &ureq->hdr.req_len))
4800		return -EFAULT;
4801
4802	/* Sanitize user input, to avoid overflows in iob size calculation: */
4803	if (req_len > QETH_BUFSIZE)
4804		return -EINVAL;
4805
4806	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
4807	if (!iob)
4808		return -ENOMEM;
4809
4810	if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp,
4811			   &ureq->cmd, req_len)) {
4812		qeth_put_cmd(iob);
4813		return -EFAULT;
4814	}
4815
4816	qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
4817	if (!qinfo.udata) {
4818		qeth_put_cmd(iob);
4819		return -ENOMEM;
4820	}
4821	qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
4822
4823	rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
4824	if (rc)
4825		QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
4826				 CARD_DEVID(card), rc);
4827	else {
4828		if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
4829			rc = -EFAULT;
4830	}
4831
4832	kfree(qinfo.udata);
4833	return rc;
4834}
4835
4836static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
4837					 struct qeth_reply *reply,
4838					 unsigned long data)
4839{
4840	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4841	struct qeth_qoat_priv *priv = reply->param;
4842	int resdatalen;
4843
4844	QETH_CARD_TEXT(card, 3, "qoatcb");
4845	if (qeth_setadpparms_inspect_rc(cmd))
4846		return -EIO;
4847
4848	resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
4849
4850	if (resdatalen > (priv->buffer_len - priv->response_len))
4851		return -ENOSPC;
4852
4853	memcpy(priv->buffer + priv->response_len,
4854	       &cmd->data.setadapterparms.hdr, resdatalen);
4855	priv->response_len += resdatalen;
4856
4857	if (cmd->data.setadapterparms.hdr.seq_no <
4858	    cmd->data.setadapterparms.hdr.used_total)
4859		return 1;
4860	return 0;
4861}
4862
4863static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4864{
4865	int rc = 0;
4866	struct qeth_cmd_buffer *iob;
4867	struct qeth_ipa_cmd *cmd;
4868	struct qeth_query_oat *oat_req;
4869	struct qeth_query_oat_data oat_data;
4870	struct qeth_qoat_priv priv;
4871	void __user *tmp;
4872
4873	QETH_CARD_TEXT(card, 3, "qoatcmd");
4874
4875	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT))
4876		return -EOPNOTSUPP;
4877
4878	if (copy_from_user(&oat_data, udata, sizeof(oat_data)))
4879		return -EFAULT;
4880
4881	priv.buffer_len = oat_data.buffer_len;
4882	priv.response_len = 0;
4883	priv.buffer = vzalloc(oat_data.buffer_len);
4884	if (!priv.buffer)
4885		return -ENOMEM;
4886
4887	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4888				   SETADP_DATA_SIZEOF(query_oat));
4889	if (!iob) {
4890		rc = -ENOMEM;
4891		goto out_free;
4892	}
4893	cmd = __ipa_cmd(iob);
4894	oat_req = &cmd->data.setadapterparms.data.query_oat;
4895	oat_req->subcmd_code = oat_data.command;
4896
4897	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv);
4898	if (!rc) {
4899		tmp = is_compat_task() ? compat_ptr(oat_data.ptr) :
4900					 u64_to_user_ptr(oat_data.ptr);
4901		oat_data.response_len = priv.response_len;
4902
4903		if (copy_to_user(tmp, priv.buffer, priv.response_len) ||
4904		    copy_to_user(udata, &oat_data, sizeof(oat_data)))
4905			rc = -EFAULT;
4906	}
4907
4908out_free:
4909	vfree(priv.buffer);
4910	return rc;
4911}
4912
4913static int qeth_query_card_info_cb(struct qeth_card *card,
4914				   struct qeth_reply *reply, unsigned long data)
 
4915{
4916	struct carrier_info *carrier_info = (struct carrier_info *)reply->param;
4917	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4918	struct qeth_query_card_info *card_info;
 
 
4919
4920	QETH_CARD_TEXT(card, 2, "qcrdincb");
4921	if (qeth_setadpparms_inspect_rc(cmd))
4922		return -EIO;
4923
4924	card_info = &cmd->data.setadapterparms.data.card_info;
4925	carrier_info->card_type = card_info->card_type;
4926	carrier_info->port_mode = card_info->port_mode;
4927	carrier_info->port_speed = card_info->port_speed;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4928	return 0;
4929}
4930
4931int qeth_query_card_info(struct qeth_card *card,
4932			 struct carrier_info *carrier_info)
4933{
4934	struct qeth_cmd_buffer *iob;
4935
4936	QETH_CARD_TEXT(card, 2, "qcrdinfo");
4937	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
4938		return -EOPNOTSUPP;
4939	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
4940	if (!iob)
4941		return -ENOMEM;
4942	return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
4943					(void *)carrier_info);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4944}
4945
4946/**
4947 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
4948 * @card: pointer to a qeth_card
4949 *
4950 * Returns
4951 *	0, if a MAC address has been set for the card's netdevice
4952 *	a return code, for various error conditions
4953 */
4954int qeth_vm_request_mac(struct qeth_card *card)
4955{
4956	struct diag26c_mac_resp *response;
4957	struct diag26c_mac_req *request;
4958	struct ccw_dev_id id;
4959	int rc;
4960
4961	QETH_CARD_TEXT(card, 2, "vmreqmac");
4962
4963	request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
4964	response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
4965	if (!request || !response) {
4966		rc = -ENOMEM;
4967		goto out;
4968	}
4969
4970	ccw_device_get_id(CARD_DDEV(card), &id);
4971	request->resp_buf_len = sizeof(*response);
4972	request->resp_version = DIAG26C_VERSION2;
4973	request->op_code = DIAG26C_GET_MAC;
4974	request->devno = id.devno;
4975
4976	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4977	rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
4978	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4979	if (rc)
4980		goto out;
4981	QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
4982
4983	if (request->resp_buf_len < sizeof(*response) ||
4984	    response->version != request->resp_version) {
4985		rc = -EIO;
4986		QETH_CARD_TEXT(card, 2, "badresp");
4987		QETH_CARD_HEX(card, 2, &request->resp_buf_len,
4988			      sizeof(request->resp_buf_len));
4989	} else if (!is_valid_ether_addr(response->mac)) {
4990		rc = -EINVAL;
4991		QETH_CARD_TEXT(card, 2, "badmac");
4992		QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
4993	} else {
4994		ether_addr_copy(card->dev->dev_addr, response->mac);
4995	}
4996
4997out:
4998	kfree(response);
4999	kfree(request);
5000	return rc;
5001}
5002EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
5003
5004static void qeth_determine_capabilities(struct qeth_card *card)
5005{
5006	struct qeth_channel *channel = &card->data;
5007	struct ccw_device *ddev = channel->ccwdev;
5008	int rc;
5009	int ddev_offline = 0;
5010
5011	QETH_CARD_TEXT(card, 2, "detcapab");
5012	if (!ddev->online) {
5013		ddev_offline = 1;
5014		rc = qeth_start_channel(channel);
5015		if (rc) {
5016			QETH_CARD_TEXT_(card, 2, "3err%d", rc);
5017			goto out;
5018		}
5019	}
5020
5021	rc = qeth_read_conf_data(card);
5022	if (rc) {
5023		QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
5024				 CARD_DEVID(card), rc);
5025		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
5026		goto out_offline;
5027	}
5028
5029	rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
5030	if (rc)
5031		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5032
5033	QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt);
5034	QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1);
5035	QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2);
5036	QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3);
5037	QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt);
5038	if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
5039	    ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
5040	    ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
5041		dev_info(&card->gdev->dev,
5042			"Completion Queueing supported\n");
5043	} else {
5044		card->options.cq = QETH_CQ_NOTAVAILABLE;
5045	}
5046
5047
5048out_offline:
5049	if (ddev_offline == 1)
5050		qeth_stop_channel(channel);
5051out:
5052	return;
5053}
5054
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5055static int qeth_qdio_establish(struct qeth_card *card)
5056{
5057	struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES];
5058	struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES];
 
5059	struct qdio_initialize init_data;
5060	char *qib_param_field;
5061	unsigned int i;
5062	int rc = 0;
5063
5064	QETH_CARD_TEXT(card, 2, "qdioest");
5065
5066	qib_param_field = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL);
5067	if (!qib_param_field) {
5068		rc =  -ENOMEM;
5069		goto out_free_nothing;
5070	}
5071
5072	qeth_create_qib_param_field(card, qib_param_field);
5073	qeth_create_qib_param_field_blkt(card, qib_param_field);
5074
5075	in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs;
5076	if (card->options.cq == QETH_CQ_ENABLED)
5077		in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs;
 
 
5078
5079	for (i = 0; i < card->qdio.no_out_queues; i++)
5080		out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs;
5081
5082	memset(&init_data, 0, sizeof(struct qdio_initialize));
5083	init_data.q_format		 = IS_IQD(card) ? QDIO_IQDIO_QFMT :
5084							  QDIO_QETH_QFMT;
5085	init_data.qib_param_field_format = 0;
5086	init_data.qib_param_field        = qib_param_field;
5087	init_data.no_input_qs            = card->qdio.no_in_queues;
5088	init_data.no_output_qs           = card->qdio.no_out_queues;
5089	init_data.input_handler		 = qeth_qdio_input_handler;
5090	init_data.output_handler	 = qeth_qdio_output_handler;
5091	init_data.irq_poll		 = qeth_qdio_poll;
5092	init_data.int_parm               = (unsigned long) card;
5093	init_data.input_sbal_addr_array  = in_sbal_ptrs;
5094	init_data.output_sbal_addr_array = out_sbal_ptrs;
5095	init_data.output_sbal_state_array = card->qdio.out_bufstates;
5096	init_data.scan_threshold	 = IS_IQD(card) ? 0 : 32;
5097
5098	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
5099		QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
5100		rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs,
5101				   init_data.no_output_qs);
5102		if (rc) {
5103			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
5104			goto out;
5105		}
5106		rc = qdio_establish(CARD_DDEV(card), &init_data);
5107		if (rc) {
5108			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
5109			qdio_free(CARD_DDEV(card));
5110		}
5111	}
5112
5113	switch (card->options.cq) {
5114	case QETH_CQ_ENABLED:
5115		dev_info(&card->gdev->dev, "Completion Queue support enabled");
5116		break;
5117	case QETH_CQ_DISABLED:
5118		dev_info(&card->gdev->dev, "Completion Queue support disabled");
5119		break;
5120	default:
5121		break;
5122	}
 
5123out:
5124	kfree(qib_param_field);
5125out_free_nothing:
5126	return rc;
5127}
5128
5129static void qeth_core_free_card(struct qeth_card *card)
5130{
5131	QETH_CARD_TEXT(card, 2, "freecrd");
5132
5133	unregister_service_level(&card->qeth_service_level);
5134	debugfs_remove_recursive(card->debugfs);
5135	qeth_put_cmd(card->read_cmd);
5136	destroy_workqueue(card->event_wq);
5137	dev_set_drvdata(&card->gdev->dev, NULL);
5138	kfree(card);
5139}
5140
5141void qeth_trace_features(struct qeth_card *card)
5142{
5143	QETH_CARD_TEXT(card, 2, "features");
5144	QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
5145	QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
5146	QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
5147	QETH_CARD_HEX(card, 2, &card->info.diagass_support,
5148		      sizeof(card->info.diagass_support));
5149}
5150EXPORT_SYMBOL_GPL(qeth_trace_features);
5151
5152static struct ccw_device_id qeth_ids[] = {
5153	{CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
5154					.driver_info = QETH_CARD_TYPE_OSD},
5155	{CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
5156					.driver_info = QETH_CARD_TYPE_IQD},
5157#ifdef CONFIG_QETH_OSN
5158	{CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
5159					.driver_info = QETH_CARD_TYPE_OSN},
5160#endif
5161	{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
5162					.driver_info = QETH_CARD_TYPE_OSM},
5163#ifdef CONFIG_QETH_OSX
5164	{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
5165					.driver_info = QETH_CARD_TYPE_OSX},
5166#endif
5167	{},
5168};
5169MODULE_DEVICE_TABLE(ccw, qeth_ids);
5170
5171static struct ccw_driver qeth_ccw_driver = {
5172	.driver = {
5173		.owner = THIS_MODULE,
5174		.name = "qeth",
5175	},
5176	.ids = qeth_ids,
5177	.probe = ccwgroup_probe_ccwdev,
5178	.remove = ccwgroup_remove_ccwdev,
5179};
5180
5181int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
5182{
5183	int retries = 3;
5184	int rc;
5185
5186	QETH_CARD_TEXT(card, 2, "hrdsetup");
5187	atomic_set(&card->force_alloc_skb, 0);
5188	rc = qeth_update_from_chp_desc(card);
5189	if (rc)
5190		return rc;
5191retry:
5192	if (retries < 3)
5193		QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
5194				 CARD_DEVID(card));
5195	rc = qeth_qdio_clear_card(card, !IS_IQD(card));
5196	qeth_stop_channel(&card->data);
5197	qeth_stop_channel(&card->write);
5198	qeth_stop_channel(&card->read);
5199	qdio_free(CARD_DDEV(card));
5200
5201	rc = qeth_start_channel(&card->read);
5202	if (rc)
5203		goto retriable;
5204	rc = qeth_start_channel(&card->write);
5205	if (rc)
5206		goto retriable;
5207	rc = qeth_start_channel(&card->data);
5208	if (rc)
5209		goto retriable;
5210retriable:
5211	if (rc == -ERESTARTSYS) {
5212		QETH_CARD_TEXT(card, 2, "break1");
5213		return rc;
5214	} else if (rc) {
5215		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5216		if (--retries < 0)
5217			goto out;
5218		else
5219			goto retry;
5220	}
5221
5222	qeth_determine_capabilities(card);
 
5223	qeth_idx_init(card);
5224
5225	rc = qeth_idx_activate_read_channel(card);
5226	if (rc == -EINTR) {
5227		QETH_CARD_TEXT(card, 2, "break2");
5228		return rc;
5229	} else if (rc) {
5230		QETH_CARD_TEXT_(card, 2, "3err%d", rc);
5231		if (--retries < 0)
5232			goto out;
5233		else
5234			goto retry;
5235	}
5236
5237	rc = qeth_idx_activate_write_channel(card);
5238	if (rc == -EINTR) {
5239		QETH_CARD_TEXT(card, 2, "break3");
5240		return rc;
5241	} else if (rc) {
5242		QETH_CARD_TEXT_(card, 2, "4err%d", rc);
5243		if (--retries < 0)
5244			goto out;
5245		else
5246			goto retry;
5247	}
5248	card->read_or_write_problem = 0;
5249	rc = qeth_mpc_initialize(card);
5250	if (rc) {
5251		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
5252		goto out;
5253	}
5254
5255	rc = qeth_send_startlan(card);
5256	if (rc) {
5257		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5258		if (rc == -ENETDOWN) {
5259			dev_warn(&card->gdev->dev, "The LAN is offline\n");
5260			*carrier_ok = false;
5261		} else {
5262			goto out;
5263		}
5264	} else {
5265		*carrier_ok = true;
5266	}
5267
5268	card->options.ipa4.supported = 0;
5269	card->options.ipa6.supported = 0;
5270	card->options.adp.supported = 0;
5271	card->options.sbp.supported_funcs = 0;
5272	card->info.diagass_support = 0;
5273	rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
5274	if (rc == -ENOMEM)
5275		goto out;
5276	if (qeth_is_supported(card, IPA_IPV6)) {
5277		rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
5278		if (rc == -ENOMEM)
5279			goto out;
5280	}
5281	if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
5282		rc = qeth_query_setadapterparms(card);
5283		if (rc < 0) {
5284			QETH_CARD_TEXT_(card, 2, "7err%d", rc);
5285			goto out;
5286		}
5287	}
5288	if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
5289		rc = qeth_query_setdiagass(card);
5290		if (rc)
5291			QETH_CARD_TEXT_(card, 2, "8err%d", rc);
5292	}
5293
 
 
5294	if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
5295	    (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
5296		card->info.hwtrap = 0;
5297
5298	if (card->options.isolation != ISOLATION_MODE_NONE) {
5299		rc = qeth_setadpparms_set_access_ctrl(card,
5300						      card->options.isolation);
5301		if (rc)
5302			goto out;
5303	}
5304
 
 
5305	rc = qeth_init_qdio_queues(card);
5306	if (rc) {
5307		QETH_CARD_TEXT_(card, 2, "9err%d", rc);
5308		goto out;
5309	}
5310
5311	return 0;
5312out:
5313	dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
5314		"an error on the device\n");
5315	QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
5316			 CARD_DEVID(card), rc);
5317	return rc;
5318}
5319EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
5320
5321static int qeth_set_online(struct qeth_card *card)
 
5322{
 
5323	int rc;
5324
5325	mutex_lock(&card->discipline_mutex);
5326	mutex_lock(&card->conf_mutex);
5327	QETH_CARD_TEXT(card, 2, "setonlin");
5328
5329	rc = card->discipline->set_online(card);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5330
5331	mutex_unlock(&card->conf_mutex);
5332	mutex_unlock(&card->discipline_mutex);
5333
 
 
 
 
 
 
 
 
 
 
 
 
5334	return rc;
5335}
5336
5337int qeth_set_offline(struct qeth_card *card, bool resetting)
 
5338{
5339	int rc, rc2, rc3;
5340
5341	mutex_lock(&card->discipline_mutex);
5342	mutex_lock(&card->conf_mutex);
5343	QETH_CARD_TEXT(card, 3, "setoffl");
5344
5345	if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) {
5346		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
5347		card->info.hwtrap = 1;
5348	}
5349
 
 
 
5350	rtnl_lock();
5351	card->info.open_when_online = card->dev->flags & IFF_UP;
5352	dev_close(card->dev);
5353	netif_device_detach(card->dev);
5354	netif_carrier_off(card->dev);
5355	rtnl_unlock();
5356
5357	card->discipline->set_offline(card);
 
 
 
 
 
 
 
 
 
5358
5359	rc  = qeth_stop_channel(&card->data);
5360	rc2 = qeth_stop_channel(&card->write);
5361	rc3 = qeth_stop_channel(&card->read);
5362	if (!rc)
5363		rc = (rc2) ? rc2 : rc3;
5364	if (rc)
5365		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5366	qdio_free(CARD_DDEV(card));
5367
5368	/* let user_space know that device is offline */
5369	kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
5370
5371	mutex_unlock(&card->conf_mutex);
5372	mutex_unlock(&card->discipline_mutex);
5373	return 0;
5374}
5375EXPORT_SYMBOL_GPL(qeth_set_offline);
5376
5377static int qeth_do_reset(void *data)
5378{
 
5379	struct qeth_card *card = data;
5380	int rc;
5381
 
 
 
5382	QETH_CARD_TEXT(card, 2, "recover1");
5383	if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
5384		return 0;
5385	QETH_CARD_TEXT(card, 2, "recover2");
5386	dev_warn(&card->gdev->dev,
5387		 "A recovery process has been started for the device\n");
5388
5389	qeth_set_offline(card, true);
5390	rc = qeth_set_online(card);
5391	if (!rc) {
5392		dev_info(&card->gdev->dev,
5393			 "Device successfully recovered!\n");
5394	} else {
5395		ccwgroup_set_offline(card->gdev);
 
5396		dev_warn(&card->gdev->dev,
5397			 "The qeth device driver failed to recover an error on the device\n");
5398	}
5399	qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
5400	qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
5401	return 0;
5402}
5403
5404#if IS_ENABLED(CONFIG_QETH_L3)
5405static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
5406				struct qeth_hdr *hdr)
5407{
5408	struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
5409	struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
5410	struct net_device *dev = skb->dev;
5411
5412	if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
5413		dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
5414				"FAKELL", skb->len);
5415		return;
5416	}
5417
5418	if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) {
5419		u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
5420							     ETH_P_IP;
5421		unsigned char tg_addr[ETH_ALEN];
5422
5423		skb_reset_network_header(skb);
5424		switch (l3_hdr->flags & QETH_HDR_CAST_MASK) {
5425		case QETH_CAST_MULTICAST:
5426			if (prot == ETH_P_IP)
5427				ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
5428			else
5429				ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
5430			QETH_CARD_STAT_INC(card, rx_multicast);
5431			break;
5432		case QETH_CAST_BROADCAST:
5433			ether_addr_copy(tg_addr, dev->broadcast);
5434			QETH_CARD_STAT_INC(card, rx_multicast);
5435			break;
5436		default:
5437			if (card->options.sniffer)
5438				skb->pkt_type = PACKET_OTHERHOST;
5439			ether_addr_copy(tg_addr, dev->dev_addr);
5440		}
5441
5442		if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
5443			dev_hard_header(skb, dev, prot, tg_addr,
5444					&l3_hdr->next_hop.rx.src_mac, skb->len);
5445		else
5446			dev_hard_header(skb, dev, prot, tg_addr, "FAKELL",
5447					skb->len);
5448	}
5449
5450	/* copy VLAN tag from hdr into skb */
5451	if (!card->options.sniffer &&
5452	    (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
5453				  QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
5454		u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
5455				l3_hdr->vlan_id :
5456				l3_hdr->next_hop.rx.vlan_id;
5457
5458		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
5459	}
5460}
5461#endif
5462
5463static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
5464			     struct qeth_hdr *hdr, bool uses_frags)
5465{
5466	struct napi_struct *napi = &card->napi;
5467	bool is_cso;
5468
5469	switch (hdr->hdr.l2.id) {
5470	case QETH_HEADER_TYPE_OSN:
5471		skb_push(skb, sizeof(*hdr));
5472		skb_copy_to_linear_data(skb, hdr, sizeof(*hdr));
5473		QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
5474		QETH_CARD_STAT_INC(card, rx_packets);
5475
5476		card->osn_info.data_cb(skb);
5477		return;
5478#if IS_ENABLED(CONFIG_QETH_L3)
5479	case QETH_HEADER_TYPE_LAYER3:
5480		qeth_l3_rebuild_skb(card, skb, hdr);
5481		is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ;
5482		break;
5483#endif
5484	case QETH_HEADER_TYPE_LAYER2:
5485		is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ;
5486		break;
5487	default:
5488		/* never happens */
5489		if (uses_frags)
5490			napi_free_frags(napi);
5491		else
5492			dev_kfree_skb_any(skb);
5493		return;
5494	}
5495
5496	if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) {
5497		skb->ip_summed = CHECKSUM_UNNECESSARY;
5498		QETH_CARD_STAT_INC(card, rx_skb_csum);
5499	} else {
5500		skb->ip_summed = CHECKSUM_NONE;
5501	}
5502
5503	QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
5504	QETH_CARD_STAT_INC(card, rx_packets);
5505	if (skb_is_nonlinear(skb)) {
5506		QETH_CARD_STAT_INC(card, rx_sg_skbs);
5507		QETH_CARD_STAT_ADD(card, rx_sg_frags,
5508				   skb_shinfo(skb)->nr_frags);
5509	}
5510
5511	if (uses_frags) {
5512		napi_gro_frags(napi);
5513	} else {
5514		skb->protocol = eth_type_trans(skb, skb->dev);
5515		napi_gro_receive(napi, skb);
5516	}
5517}
5518
5519static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len)
5520{
5521	struct page *page = virt_to_page(data);
5522	unsigned int next_frag;
5523
5524	next_frag = skb_shinfo(skb)->nr_frags;
5525	get_page(page);
5526	skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len,
5527			data_len);
5528}
5529
5530static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
5531{
5532	return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
5533}
5534
5535static int qeth_extract_skb(struct qeth_card *card,
5536			    struct qeth_qdio_buffer *qethbuffer, u8 *element_no,
5537			    int *__offset)
5538{
5539	struct qeth_priv *priv = netdev_priv(card->dev);
5540	struct qdio_buffer *buffer = qethbuffer->buffer;
5541	struct napi_struct *napi = &card->napi;
5542	struct qdio_buffer_element *element;
5543	unsigned int linear_len = 0;
5544	bool uses_frags = false;
5545	int offset = *__offset;
5546	bool use_rx_sg = false;
5547	unsigned int headroom;
5548	struct qeth_hdr *hdr;
5549	struct sk_buff *skb;
5550	int skb_len = 0;
 
5551
5552	element = &buffer->element[*element_no];
5553
5554next_packet:
5555	/* qeth_hdr must not cross element boundaries */
5556	while (element->length < offset + sizeof(struct qeth_hdr)) {
5557		if (qeth_is_last_sbale(element))
5558			return -ENODATA;
5559		element++;
5560		offset = 0;
5561	}
5562
5563	hdr = phys_to_virt(element->addr) + offset;
5564	offset += sizeof(*hdr);
5565	skb = NULL;
5566
5567	switch (hdr->hdr.l2.id) {
5568	case QETH_HEADER_TYPE_LAYER2:
5569		skb_len = hdr->hdr.l2.pkt_length;
 
 
5570		linear_len = ETH_HLEN;
5571		headroom = 0;
5572		break;
5573	case QETH_HEADER_TYPE_LAYER3:
5574		skb_len = hdr->hdr.l3.length;
 
 
5575		if (!IS_LAYER3(card)) {
5576			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5577			goto walk_packet;
5578		}
5579
5580		if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
5581			linear_len = ETH_HLEN;
5582			headroom = 0;
5583			break;
5584		}
5585
5586		if (hdr->hdr.l3.flags & QETH_HDR_IPV6)
5587			linear_len = sizeof(struct ipv6hdr);
5588		else
5589			linear_len = sizeof(struct iphdr);
5590		headroom = ETH_HLEN;
5591		break;
5592	case QETH_HEADER_TYPE_OSN:
5593		skb_len = hdr->hdr.osn.pdu_length;
5594		if (!IS_OSN(card)) {
5595			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5596			goto walk_packet;
5597		}
5598
5599		linear_len = skb_len;
5600		headroom = sizeof(struct qeth_hdr);
5601		break;
5602	default:
5603		if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL)
5604			QETH_CARD_STAT_INC(card, rx_frame_errors);
5605		else
5606			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5607
5608		/* Can't determine packet length, drop the whole buffer. */
5609		return -EPROTONOSUPPORT;
5610	}
5611
5612	if (skb_len < linear_len) {
5613		QETH_CARD_STAT_INC(card, rx_dropped_runt);
5614		goto walk_packet;
5615	}
5616
5617	use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
5618		    (skb_len > READ_ONCE(priv->rx_copybreak) &&
5619		     !atomic_read(&card->force_alloc_skb) &&
5620		     !IS_OSN(card));
5621
5622	if (use_rx_sg) {
5623		/* QETH_CQ_ENABLED only: */
5624		if (qethbuffer->rx_skb &&
5625		    skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) {
5626			skb = qethbuffer->rx_skb;
5627			qethbuffer->rx_skb = NULL;
5628			goto use_skb;
5629		}
5630
5631		skb = napi_get_frags(napi);
5632		if (!skb) {
5633			/* -ENOMEM, no point in falling back further. */
5634			QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5635			goto walk_packet;
5636		}
5637
5638		if (skb_tailroom(skb) >= linear_len + headroom) {
5639			uses_frags = true;
5640			goto use_skb;
5641		}
5642
5643		netdev_info_once(card->dev,
5644				 "Insufficient linear space in NAPI frags skb, need %u but have %u\n",
5645				 linear_len + headroom, skb_tailroom(skb));
5646		/* Shouldn't happen. Don't optimize, fall back to linear skb. */
5647	}
5648
5649	linear_len = skb_len;
5650	skb = napi_alloc_skb(napi, linear_len + headroom);
5651	if (!skb) {
5652		QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5653		goto walk_packet;
5654	}
5655
5656use_skb:
5657	if (headroom)
5658		skb_reserve(skb, headroom);
5659walk_packet:
5660	while (skb_len) {
5661		int data_len = min(skb_len, (int)(element->length - offset));
5662		char *data = phys_to_virt(element->addr) + offset;
5663
5664		skb_len -= data_len;
5665		offset += data_len;
5666
5667		/* Extract data from current element: */
5668		if (skb && data_len) {
5669			if (linear_len) {
5670				unsigned int copy_len;
5671
5672				copy_len = min_t(unsigned int, linear_len,
5673						 data_len);
5674
5675				skb_put_data(skb, data, copy_len);
5676				linear_len -= copy_len;
5677				data_len -= copy_len;
5678				data += copy_len;
5679			}
5680
5681			if (data_len)
5682				qeth_create_skb_frag(skb, data, data_len);
5683		}
5684
5685		/* Step forward to next element: */
5686		if (skb_len) {
5687			if (qeth_is_last_sbale(element)) {
5688				QETH_CARD_TEXT(card, 4, "unexeob");
5689				QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
5690				if (skb) {
5691					if (uses_frags)
5692						napi_free_frags(napi);
5693					else
5694						dev_kfree_skb_any(skb);
5695					QETH_CARD_STAT_INC(card,
5696							   rx_length_errors);
5697				}
5698				return -EMSGSIZE;
5699			}
5700			element++;
5701			offset = 0;
5702		}
5703	}
5704
5705	/* This packet was skipped, go get another one: */
5706	if (!skb)
5707		goto next_packet;
5708
5709	*element_no = element - &buffer->element[0];
5710	*__offset = offset;
5711
5712	qeth_receive_skb(card, skb, hdr, uses_frags);
 
 
 
 
 
5713	return 0;
5714}
5715
5716static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget,
5717				      struct qeth_qdio_buffer *buf, bool *done)
5718{
5719	unsigned int work_done = 0;
5720
5721	while (budget) {
5722		if (qeth_extract_skb(card, buf, &card->rx.buf_element,
5723				     &card->rx.e_offset)) {
5724			*done = true;
5725			break;
5726		}
5727
5728		work_done++;
5729		budget--;
5730	}
5731
5732	return work_done;
5733}
5734
5735static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
5736{
5737	struct qeth_rx *ctx = &card->rx;
5738	unsigned int work_done = 0;
5739
5740	while (budget > 0) {
5741		struct qeth_qdio_buffer *buffer;
5742		unsigned int skbs_done = 0;
5743		bool done = false;
5744
5745		/* Fetch completed RX buffers: */
5746		if (!card->rx.b_count) {
5747			card->rx.qdio_err = 0;
5748			card->rx.b_count = qdio_get_next_buffers(
5749				card->data.ccwdev, 0, &card->rx.b_index,
5750				&card->rx.qdio_err);
 
5751			if (card->rx.b_count <= 0) {
5752				card->rx.b_count = 0;
5753				break;
5754			}
5755		}
5756
5757		/* Process one completed RX buffer: */
5758		buffer = &card->qdio.in_q->bufs[card->rx.b_index];
5759		if (!(card->rx.qdio_err &&
5760		      qeth_check_qdio_errors(card, buffer->buffer,
5761					     card->rx.qdio_err, "qinerr")))
5762			skbs_done = qeth_extract_skbs(card, budget, buffer,
5763						      &done);
5764		else
5765			done = true;
5766
5767		work_done += skbs_done;
5768		budget -= skbs_done;
5769
5770		if (done) {
5771			QETH_CARD_STAT_INC(card, rx_bufs);
5772			qeth_put_buffer_pool_entry(card, buffer->pool_entry);
5773			buffer->pool_entry = NULL;
5774			card->rx.b_count--;
5775			ctx->bufs_refill++;
5776			ctx->bufs_refill -= qeth_rx_refill_queue(card,
5777								 ctx->bufs_refill);
5778
5779			/* Step forward to next buffer: */
5780			card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1);
5781			card->rx.buf_element = 0;
5782			card->rx.e_offset = 0;
5783		}
5784	}
5785
5786	return work_done;
5787}
5788
5789static void qeth_cq_poll(struct qeth_card *card)
5790{
5791	unsigned int work_done = 0;
5792
5793	while (work_done < QDIO_MAX_BUFFERS_PER_Q) {
5794		unsigned int start, error;
5795		int completed;
5796
5797		completed = qdio_inspect_queue(CARD_DDEV(card), 1, true, &start,
5798					       &error);
5799		if (completed <= 0)
5800			return;
5801
5802		qeth_qdio_cq_handler(card, error, 1, start, completed);
5803		work_done += completed;
5804	}
5805}
5806
5807int qeth_poll(struct napi_struct *napi, int budget)
5808{
5809	struct qeth_card *card = container_of(napi, struct qeth_card, napi);
5810	unsigned int work_done;
5811
5812	work_done = qeth_rx_poll(card, budget);
5813
 
 
 
 
 
 
 
 
 
 
5814	if (card->options.cq == QETH_CQ_ENABLED)
5815		qeth_cq_poll(card);
5816
5817	if (budget) {
5818		struct qeth_rx *ctx = &card->rx;
5819
5820		/* Process any substantial refill backlog: */
5821		ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill);
5822
5823		/* Exhausted the RX budget. Keep IRQ disabled, we get called again. */
5824		if (work_done >= budget)
5825			return work_done;
5826	}
5827
5828	if (napi_complete_done(napi, work_done) &&
5829	    qdio_start_irq(CARD_DDEV(card)))
5830		napi_schedule(napi);
5831
5832	return work_done;
5833}
5834EXPORT_SYMBOL_GPL(qeth_poll);
5835
5836static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
5837				 unsigned int bidx, bool error, int budget)
 
5838{
5839	struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
5840	u8 sflags = buffer->buffer->element[15].sflags;
5841	struct qeth_card *card = queue->card;
 
5842
5843	if (queue->bufstates && (queue->bufstates[bidx].flags &
5844				 QDIO_OUTBUF_STATE_FLAG_PENDING)) {
5845		WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
5846
5847		if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
5848						   QETH_QDIO_BUF_PENDING) ==
5849		    QETH_QDIO_BUF_PRIMED)
5850			qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
 
 
 
 
5851
5852		QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
5853
5854		/* prepare the queue slot for re-use: */
5855		qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
5856		if (qeth_init_qdio_out_buf(queue, bidx)) {
5857			QETH_CARD_TEXT(card, 2, "outofbuf");
5858			qeth_schedule_recovery(card);
5859		}
5860
5861		return;
5862	}
 
 
 
 
5863
5864	if (card->options.cq == QETH_CQ_ENABLED)
 
 
 
 
 
 
 
 
 
 
5865		qeth_notify_skbs(queue, buffer,
5866				 qeth_compute_cq_notification(sflags, 0));
 
 
5867	qeth_clear_output_buffer(queue, buffer, error, budget);
5868}
5869
5870static int qeth_tx_poll(struct napi_struct *napi, int budget)
5871{
5872	struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi);
5873	unsigned int queue_no = queue->queue_no;
5874	struct qeth_card *card = queue->card;
5875	struct net_device *dev = card->dev;
5876	unsigned int work_done = 0;
5877	struct netdev_queue *txq;
5878
5879	txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
 
 
 
5880
5881	while (1) {
5882		unsigned int start, error, i;
5883		unsigned int packets = 0;
5884		unsigned int bytes = 0;
5885		int completed;
5886
 
 
5887		if (qeth_out_queue_is_empty(queue)) {
5888			napi_complete(napi);
5889			return 0;
5890		}
5891
5892		/* Give the CPU a breather: */
5893		if (work_done >= QDIO_MAX_BUFFERS_PER_Q) {
5894			QETH_TXQ_STAT_INC(queue, completion_yield);
5895			if (napi_complete_done(napi, 0))
5896				napi_schedule(napi);
5897			return 0;
5898		}
5899
5900		completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false,
5901					       &start, &error);
5902		if (completed <= 0) {
5903			/* Ensure we see TX completion for pending work: */
5904			if (napi_complete_done(napi, 0))
5905				qeth_tx_arm_timer(queue, QETH_TX_TIMER_USECS);
 
5906			return 0;
5907		}
5908
5909		for (i = start; i < start + completed; i++) {
5910			struct qeth_qdio_out_buffer *buffer;
5911			unsigned int bidx = QDIO_BUFNR(i);
5912
5913			buffer = queue->bufs[bidx];
5914			packets += buffer->frames;
5915			bytes += buffer->bytes;
5916
5917			qeth_handle_send_error(card, buffer, error);
5918			qeth_iqd_tx_complete(queue, bidx, error, budget);
5919			qeth_cleanup_handled_pending(queue, bidx, false);
 
 
 
5920		}
5921
5922		netdev_tx_completed_queue(txq, packets, bytes);
5923		atomic_sub(completed, &queue->used_buffers);
5924		work_done += completed;
 
 
 
 
5925
5926		/* xmit may have observed the full-condition, but not yet
5927		 * stopped the txq. In which case the code below won't trigger.
5928		 * So before returning, xmit will re-check the txq's fill level
5929		 * and wake it up if needed.
5930		 */
5931		if (netif_tx_queue_stopped(txq) &&
5932		    !qeth_out_queue_is_full(queue))
5933			netif_tx_wake_queue(txq);
5934	}
5935}
5936
5937static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
5938{
5939	if (!cmd->hdr.return_code)
5940		cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5941	return cmd->hdr.return_code;
5942}
5943
5944static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
5945					struct qeth_reply *reply,
5946					unsigned long data)
5947{
5948	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5949	struct qeth_ipa_caps *caps = reply->param;
5950
5951	if (qeth_setassparms_inspect_rc(cmd))
5952		return -EIO;
5953
5954	caps->supported = cmd->data.setassparms.data.caps.supported;
5955	caps->enabled = cmd->data.setassparms.data.caps.enabled;
5956	return 0;
5957}
5958
5959int qeth_setassparms_cb(struct qeth_card *card,
5960			struct qeth_reply *reply, unsigned long data)
5961{
5962	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5963
5964	QETH_CARD_TEXT(card, 4, "defadpcb");
5965
5966	if (cmd->hdr.return_code)
5967		return -EIO;
5968
5969	cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5970	if (cmd->hdr.prot_version == QETH_PROT_IPV4)
5971		card->options.ipa4.enabled = cmd->hdr.assists.enabled;
5972	if (cmd->hdr.prot_version == QETH_PROT_IPV6)
5973		card->options.ipa6.enabled = cmd->hdr.assists.enabled;
5974	return 0;
5975}
5976EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
5977
5978struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
5979						 enum qeth_ipa_funcs ipa_func,
5980						 u16 cmd_code,
5981						 unsigned int data_length,
5982						 enum qeth_prot_versions prot)
5983{
5984	struct qeth_ipacmd_setassparms *setassparms;
5985	struct qeth_ipacmd_setassparms_hdr *hdr;
5986	struct qeth_cmd_buffer *iob;
5987
5988	QETH_CARD_TEXT(card, 4, "getasscm");
5989	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot,
5990				 data_length +
5991				 offsetof(struct qeth_ipacmd_setassparms,
5992					  data));
5993	if (!iob)
5994		return NULL;
5995
5996	setassparms = &__ipa_cmd(iob)->data.setassparms;
5997	setassparms->assist_no = ipa_func;
5998
5999	hdr = &setassparms->hdr;
6000	hdr->length = sizeof(*hdr) + data_length;
6001	hdr->command_code = cmd_code;
6002	return iob;
6003}
6004EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
6005
6006int qeth_send_simple_setassparms_prot(struct qeth_card *card,
6007				      enum qeth_ipa_funcs ipa_func,
6008				      u16 cmd_code, u32 *data,
6009				      enum qeth_prot_versions prot)
6010{
6011	unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0;
6012	struct qeth_cmd_buffer *iob;
6013
6014	QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
6015	iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
6016	if (!iob)
6017		return -ENOMEM;
6018
6019	if (data)
6020		__ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data;
6021	return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
6022}
6023EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
6024
6025static void qeth_unregister_dbf_views(void)
6026{
6027	int x;
 
6028	for (x = 0; x < QETH_DBF_INFOS; x++) {
6029		debug_unregister(qeth_dbf[x].id);
6030		qeth_dbf[x].id = NULL;
6031	}
6032}
6033
6034void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
6035{
6036	char dbf_txt_buf[32];
6037	va_list args;
6038
6039	if (!debug_level_enabled(id, level))
6040		return;
6041	va_start(args, fmt);
6042	vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
6043	va_end(args);
6044	debug_text_event(id, level, dbf_txt_buf);
6045}
6046EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
6047
6048static int qeth_register_dbf_views(void)
6049{
6050	int ret;
6051	int x;
6052
6053	for (x = 0; x < QETH_DBF_INFOS; x++) {
6054		/* register the areas */
6055		qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
6056						qeth_dbf[x].pages,
6057						qeth_dbf[x].areas,
6058						qeth_dbf[x].len);
6059		if (qeth_dbf[x].id == NULL) {
6060			qeth_unregister_dbf_views();
6061			return -ENOMEM;
6062		}
6063
6064		/* register a view */
6065		ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
6066		if (ret) {
6067			qeth_unregister_dbf_views();
6068			return ret;
6069		}
6070
6071		/* set a passing level */
6072		debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
6073	}
6074
6075	return 0;
6076}
6077
6078static DEFINE_MUTEX(qeth_mod_mutex);	/* for synchronized module loading */
6079
6080int qeth_core_load_discipline(struct qeth_card *card,
6081		enum qeth_discipline_id discipline)
6082{
 
 
6083	mutex_lock(&qeth_mod_mutex);
6084	switch (discipline) {
6085	case QETH_DISCIPLINE_LAYER3:
6086		card->discipline = try_then_request_module(
6087			symbol_get(qeth_l3_discipline), "qeth_l3");
6088		break;
6089	case QETH_DISCIPLINE_LAYER2:
6090		card->discipline = try_then_request_module(
6091			symbol_get(qeth_l2_discipline), "qeth_l2");
6092		break;
6093	default:
6094		break;
6095	}
6096	mutex_unlock(&qeth_mod_mutex);
6097
6098	if (!card->discipline) {
6099		dev_err(&card->gdev->dev, "There is no kernel module to "
6100			"support discipline %d\n", discipline);
6101		return -EINVAL;
6102	}
6103
 
 
 
 
 
 
 
 
 
 
 
6104	card->options.layer = discipline;
6105	return 0;
6106}
6107
6108void qeth_core_free_discipline(struct qeth_card *card)
6109{
 
 
6110	if (IS_LAYER2(card))
6111		symbol_put(qeth_l2_discipline);
6112	else
6113		symbol_put(qeth_l3_discipline);
6114	card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
6115	card->discipline = NULL;
6116}
6117
6118const struct device_type qeth_generic_devtype = {
6119	.name = "qeth_generic",
6120	.groups = qeth_generic_attr_groups,
6121};
6122EXPORT_SYMBOL_GPL(qeth_generic_devtype);
6123
6124static const struct device_type qeth_osn_devtype = {
6125	.name = "qeth_osn",
6126	.groups = qeth_osn_attr_groups,
6127};
6128
6129#define DBF_NAME_LEN	20
6130
6131struct qeth_dbf_entry {
6132	char dbf_name[DBF_NAME_LEN];
6133	debug_info_t *dbf_info;
6134	struct list_head dbf_list;
6135};
6136
6137static LIST_HEAD(qeth_dbf_list);
6138static DEFINE_MUTEX(qeth_dbf_list_mutex);
6139
6140static debug_info_t *qeth_get_dbf_entry(char *name)
6141{
6142	struct qeth_dbf_entry *entry;
6143	debug_info_t *rc = NULL;
6144
6145	mutex_lock(&qeth_dbf_list_mutex);
6146	list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
6147		if (strcmp(entry->dbf_name, name) == 0) {
6148			rc = entry->dbf_info;
6149			break;
6150		}
6151	}
6152	mutex_unlock(&qeth_dbf_list_mutex);
6153	return rc;
6154}
6155
6156static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
6157{
6158	struct qeth_dbf_entry *new_entry;
6159
6160	card->debug = debug_register(name, 2, 1, 8);
6161	if (!card->debug) {
6162		QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
6163		goto err;
6164	}
6165	if (debug_register_view(card->debug, &debug_hex_ascii_view))
6166		goto err_dbg;
6167	new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
6168	if (!new_entry)
6169		goto err_dbg;
6170	strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
6171	new_entry->dbf_info = card->debug;
6172	mutex_lock(&qeth_dbf_list_mutex);
6173	list_add(&new_entry->dbf_list, &qeth_dbf_list);
6174	mutex_unlock(&qeth_dbf_list_mutex);
6175
6176	return 0;
6177
6178err_dbg:
6179	debug_unregister(card->debug);
6180err:
6181	return -ENOMEM;
6182}
6183
6184static void qeth_clear_dbf_list(void)
6185{
6186	struct qeth_dbf_entry *entry, *tmp;
6187
6188	mutex_lock(&qeth_dbf_list_mutex);
6189	list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
6190		list_del(&entry->dbf_list);
6191		debug_unregister(entry->dbf_info);
6192		kfree(entry);
6193	}
6194	mutex_unlock(&qeth_dbf_list_mutex);
6195}
6196
6197static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
6198{
6199	struct net_device *dev;
6200	struct qeth_priv *priv;
6201
6202	switch (card->info.type) {
6203	case QETH_CARD_TYPE_IQD:
6204		dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN,
6205				       ether_setup, QETH_MAX_OUT_QUEUES, 1);
6206		break;
6207	case QETH_CARD_TYPE_OSM:
6208		dev = alloc_etherdev(sizeof(*priv));
6209		break;
6210	case QETH_CARD_TYPE_OSN:
6211		dev = alloc_netdev(sizeof(*priv), "osn%d", NET_NAME_UNKNOWN,
6212				   ether_setup);
6213		break;
6214	default:
6215		dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1);
6216	}
6217
6218	if (!dev)
6219		return NULL;
6220
6221	priv = netdev_priv(dev);
6222	priv->rx_copybreak = QETH_RX_COPYBREAK;
 
6223
6224	dev->ml_priv = card;
6225	dev->watchdog_timeo = QETH_TX_TIMEOUT;
6226	dev->min_mtu = IS_OSN(card) ? 64 : 576;
6227	 /* initialized when device first goes online: */
6228	dev->max_mtu = 0;
6229	dev->mtu = 0;
6230	SET_NETDEV_DEV(dev, &card->gdev->dev);
6231	netif_carrier_off(dev);
6232
6233	dev->ethtool_ops = IS_OSN(card) ? &qeth_osn_ethtool_ops :
6234					  &qeth_ethtool_ops;
 
 
 
 
6235
6236	return dev;
6237}
6238
6239struct net_device *qeth_clone_netdev(struct net_device *orig)
6240{
6241	struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);
6242
6243	if (!clone)
6244		return NULL;
6245
6246	clone->dev_port = orig->dev_port;
6247	return clone;
6248}
6249
6250int qeth_setup_netdev(struct qeth_card *card)
6251{
6252	struct net_device *dev = card->dev;
6253	unsigned int num_tx_queues;
6254
6255	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
6256	dev->hw_features |= NETIF_F_SG;
6257	dev->vlan_features |= NETIF_F_SG;
6258
6259	if (IS_IQD(card)) {
6260		dev->features |= NETIF_F_SG;
6261		num_tx_queues = QETH_IQD_MIN_TXQ;
6262	} else if (IS_VM_NIC(card)) {
6263		num_tx_queues = 1;
6264	} else {
6265		num_tx_queues = dev->real_num_tx_queues;
6266	}
6267
6268	return qeth_set_real_num_tx_queues(card, num_tx_queues);
6269}
6270EXPORT_SYMBOL_GPL(qeth_setup_netdev);
6271
6272static int qeth_core_probe_device(struct ccwgroup_device *gdev)
6273{
6274	struct qeth_card *card;
6275	struct device *dev;
6276	int rc;
6277	enum qeth_discipline_id enforced_disc;
6278	char dbf_name[DBF_NAME_LEN];
6279
6280	QETH_DBF_TEXT(SETUP, 2, "probedev");
6281
6282	dev = &gdev->dev;
6283	if (!get_device(dev))
6284		return -ENODEV;
6285
6286	QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
6287
6288	card = qeth_alloc_card(gdev);
6289	if (!card) {
6290		QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
6291		rc = -ENOMEM;
6292		goto err_dev;
6293	}
6294
6295	snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
6296		dev_name(&gdev->dev));
6297	card->debug = qeth_get_dbf_entry(dbf_name);
6298	if (!card->debug) {
6299		rc = qeth_add_dbf_entry(card, dbf_name);
6300		if (rc)
6301			goto err_card;
6302	}
6303
6304	qeth_setup_card(card);
6305	card->dev = qeth_alloc_netdev(card);
6306	if (!card->dev) {
6307		rc = -ENOMEM;
6308		goto err_card;
6309	}
6310
6311	qeth_determine_capabilities(card);
6312	qeth_set_blkt_defaults(card);
6313
 
 
 
 
 
 
6314	card->qdio.no_out_queues = card->dev->num_tx_queues;
6315	rc = qeth_update_from_chp_desc(card);
6316	if (rc)
6317		goto err_chp_desc;
6318
 
 
6319	enforced_disc = qeth_enforce_discipline(card);
6320	switch (enforced_disc) {
6321	case QETH_DISCIPLINE_UNDETERMINED:
6322		gdev->dev.type = &qeth_generic_devtype;
6323		break;
6324	default:
6325		card->info.layer_enforced = true;
6326		rc = qeth_core_load_discipline(card, enforced_disc);
 
6327		if (rc)
6328			goto err_load;
6329
6330		gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype :
6331						card->discipline->devtype;
6332		rc = card->discipline->setup(card->gdev);
6333		if (rc)
6334			goto err_disc;
6335		break;
6336	}
6337
6338	return 0;
6339
6340err_disc:
6341	qeth_core_free_discipline(card);
6342err_load:
6343err_chp_desc:
 
 
6344	free_netdev(card->dev);
6345err_card:
6346	qeth_core_free_card(card);
6347err_dev:
6348	put_device(dev);
6349	return rc;
6350}
6351
6352static void qeth_core_remove_device(struct ccwgroup_device *gdev)
6353{
6354	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6355
6356	QETH_CARD_TEXT(card, 2, "removedv");
6357
6358	if (card->discipline) {
6359		card->discipline->remove(gdev);
6360		qeth_core_free_discipline(card);
6361	}
6362
6363	qeth_free_qdio_queues(card);
6364
 
6365	free_netdev(card->dev);
6366	qeth_core_free_card(card);
6367	put_device(&gdev->dev);
6368}
6369
6370static int qeth_core_set_online(struct ccwgroup_device *gdev)
6371{
6372	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6373	int rc = 0;
6374	enum qeth_discipline_id def_discipline;
6375
 
6376	if (!card->discipline) {
6377		def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
6378						QETH_DISCIPLINE_LAYER2;
6379		rc = qeth_core_load_discipline(card, def_discipline);
6380		if (rc)
6381			goto err;
6382		rc = card->discipline->setup(card->gdev);
6383		if (rc) {
6384			qeth_core_free_discipline(card);
6385			goto err;
6386		}
6387	}
6388
6389	rc = qeth_set_online(card);
 
6390err:
 
6391	return rc;
6392}
6393
6394static int qeth_core_set_offline(struct ccwgroup_device *gdev)
6395{
6396	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
 
6397
6398	return qeth_set_offline(card, false);
 
 
 
 
6399}
6400
6401static void qeth_core_shutdown(struct ccwgroup_device *gdev)
6402{
6403	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
 
6404	qeth_set_allowed_threads(card, 0, 1);
6405	if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
6406		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
6407	qeth_qdio_clear_card(card, 0);
6408	qeth_drain_output_queues(card);
6409	qdio_free(CARD_DDEV(card));
6410}
6411
6412static ssize_t group_store(struct device_driver *ddrv, const char *buf,
6413			   size_t count)
6414{
6415	int err;
6416
6417	err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
6418				  buf);
6419
6420	return err ? err : count;
6421}
6422static DRIVER_ATTR_WO(group);
6423
6424static struct attribute *qeth_drv_attrs[] = {
6425	&driver_attr_group.attr,
6426	NULL,
6427};
6428static struct attribute_group qeth_drv_attr_group = {
6429	.attrs = qeth_drv_attrs,
6430};
6431static const struct attribute_group *qeth_drv_attr_groups[] = {
6432	&qeth_drv_attr_group,
6433	NULL,
6434};
6435
6436static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
6437	.driver = {
6438		.groups = qeth_drv_attr_groups,
6439		.owner = THIS_MODULE,
6440		.name = "qeth",
6441	},
6442	.ccw_driver = &qeth_ccw_driver,
6443	.setup = qeth_core_probe_device,
6444	.remove = qeth_core_remove_device,
6445	.set_online = qeth_core_set_online,
6446	.set_offline = qeth_core_set_offline,
6447	.shutdown = qeth_core_shutdown,
6448};
6449
6450struct qeth_card *qeth_get_card_by_busid(char *bus_id)
6451{
6452	struct ccwgroup_device *gdev;
6453	struct qeth_card *card;
6454
6455	gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id);
6456	if (!gdev)
6457		return NULL;
6458
6459	card = dev_get_drvdata(&gdev->dev);
6460	put_device(&gdev->dev);
6461	return card;
6462}
6463EXPORT_SYMBOL_GPL(qeth_get_card_by_busid);
6464
6465int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6466{
6467	struct qeth_card *card = dev->ml_priv;
6468	struct mii_ioctl_data *mii_data;
6469	int rc = 0;
6470
6471	switch (cmd) {
6472	case SIOC_QETH_ADP_SET_SNMP_CONTROL:
6473		rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
6474		break;
6475	case SIOC_QETH_GET_CARD_TYPE:
6476		if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
6477		    !IS_VM_NIC(card))
6478			return 1;
6479		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6480	case SIOCGMIIPHY:
6481		mii_data = if_mii(rq);
6482		mii_data->phy_id = 0;
6483		break;
6484	case SIOCGMIIREG:
6485		mii_data = if_mii(rq);
6486		if (mii_data->phy_id != 0)
6487			rc = -EINVAL;
6488		else
6489			mii_data->val_out = qeth_mdio_read(dev,
6490				mii_data->phy_id, mii_data->reg_num);
6491		break;
6492	case SIOC_QETH_QUERY_OAT:
6493		rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
6494		break;
6495	default:
6496		if (card->discipline->do_ioctl)
6497			rc = card->discipline->do_ioctl(dev, rq, cmd);
6498		else
6499			rc = -EOPNOTSUPP;
6500	}
6501	if (rc)
6502		QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
6503	return rc;
6504}
6505EXPORT_SYMBOL_GPL(qeth_do_ioctl);
6506
6507static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
6508			      unsigned long data)
6509{
6510	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6511	u32 *features = reply->param;
6512
6513	if (qeth_setassparms_inspect_rc(cmd))
6514		return -EIO;
6515
6516	*features = cmd->data.setassparms.data.flags_32bit;
6517	return 0;
6518}
6519
6520static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6521			     enum qeth_prot_versions prot)
6522{
6523	return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP,
6524						 NULL, prot);
6525}
6526
6527static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6528			    enum qeth_prot_versions prot, u8 *lp2lp)
6529{
6530	u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
6531	struct qeth_cmd_buffer *iob;
6532	struct qeth_ipa_caps caps;
6533	u32 features;
6534	int rc;
6535
6536	/* some L3 HW requires combined L3+L4 csum offload: */
6537	if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 &&
6538	    cstype == IPA_OUTBOUND_CHECKSUM)
6539		required_features |= QETH_IPA_CHECKSUM_IP_HDR;
6540
6541	iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0,
6542				       prot);
6543	if (!iob)
6544		return -ENOMEM;
6545
6546	rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features);
6547	if (rc)
6548		return rc;
6549
6550	if ((required_features & features) != required_features) {
6551		qeth_set_csum_off(card, cstype, prot);
6552		return -EOPNOTSUPP;
6553	}
6554
6555	iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
6556				       SETASS_DATA_SIZEOF(flags_32bit),
6557				       prot);
6558	if (!iob) {
6559		qeth_set_csum_off(card, cstype, prot);
6560		return -ENOMEM;
6561	}
6562
6563	if (features & QETH_IPA_CHECKSUM_LP2LP)
6564		required_features |= QETH_IPA_CHECKSUM_LP2LP;
6565	__ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features;
6566	rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6567	if (rc) {
6568		qeth_set_csum_off(card, cstype, prot);
6569		return rc;
6570	}
6571
6572	if (!qeth_ipa_caps_supported(&caps, required_features) ||
6573	    !qeth_ipa_caps_enabled(&caps, required_features)) {
6574		qeth_set_csum_off(card, cstype, prot);
6575		return -EOPNOTSUPP;
6576	}
6577
6578	dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
6579		 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
6580
6581	if (lp2lp)
6582		*lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP);
6583
6584	return 0;
6585}
6586
6587static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
6588			     enum qeth_prot_versions prot, u8 *lp2lp)
6589{
6590	return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) :
6591		    qeth_set_csum_off(card, cstype, prot);
6592}
6593
6594static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
6595			     unsigned long data)
6596{
6597	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6598	struct qeth_tso_start_data *tso_data = reply->param;
6599
6600	if (qeth_setassparms_inspect_rc(cmd))
6601		return -EIO;
6602
6603	tso_data->mss = cmd->data.setassparms.data.tso.mss;
6604	tso_data->supported = cmd->data.setassparms.data.tso.supported;
6605	return 0;
6606}
6607
6608static int qeth_set_tso_off(struct qeth_card *card,
6609			    enum qeth_prot_versions prot)
6610{
6611	return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
6612						 IPA_CMD_ASS_STOP, NULL, prot);
6613}
6614
6615static int qeth_set_tso_on(struct qeth_card *card,
6616			   enum qeth_prot_versions prot)
6617{
6618	struct qeth_tso_start_data tso_data;
6619	struct qeth_cmd_buffer *iob;
6620	struct qeth_ipa_caps caps;
6621	int rc;
6622
6623	iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6624				       IPA_CMD_ASS_START, 0, prot);
6625	if (!iob)
6626		return -ENOMEM;
6627
6628	rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
6629	if (rc)
6630		return rc;
6631
6632	if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
6633		qeth_set_tso_off(card, prot);
6634		return -EOPNOTSUPP;
6635	}
6636
6637	iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6638				       IPA_CMD_ASS_ENABLE,
6639				       SETASS_DATA_SIZEOF(caps), prot);
6640	if (!iob) {
6641		qeth_set_tso_off(card, prot);
6642		return -ENOMEM;
6643	}
6644
6645	/* enable TSO capability */
6646	__ipa_cmd(iob)->data.setassparms.data.caps.enabled =
6647		QETH_IPA_LARGE_SEND_TCP;
6648	rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6649	if (rc) {
6650		qeth_set_tso_off(card, prot);
6651		return rc;
6652	}
6653
6654	if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
6655	    !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
6656		qeth_set_tso_off(card, prot);
6657		return -EOPNOTSUPP;
6658	}
6659
6660	dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
6661		 tso_data.mss);
6662	return 0;
6663}
6664
6665static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
6666			    enum qeth_prot_versions prot)
6667{
6668	return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot);
6669}
6670
6671static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
6672{
6673	int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
6674	int rc_ipv6;
6675
6676	if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
6677		rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6678					    QETH_PROT_IPV4, NULL);
6679	if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6680		/* no/one Offload Assist available, so the rc is trivial */
6681		return rc_ipv4;
6682
6683	rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6684				    QETH_PROT_IPV6, NULL);
6685
6686	if (on)
6687		/* enable: success if any Assist is active */
6688		return (rc_ipv6) ? rc_ipv4 : 0;
6689
6690	/* disable: failure if any Assist is still active */
6691	return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
6692}
6693
6694/**
6695 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
6696 * @dev:	a net_device
6697 */
6698void qeth_enable_hw_features(struct net_device *dev)
6699{
6700	struct qeth_card *card = dev->ml_priv;
6701	netdev_features_t features;
6702
6703	features = dev->features;
6704	/* force-off any feature that might need an IPA sequence.
6705	 * netdev_update_features() will restart them.
6706	 */
6707	dev->features &= ~dev->hw_features;
6708	/* toggle VLAN filter, so that VIDs are re-programmed: */
6709	if (IS_LAYER2(card) && IS_VM_NIC(card)) {
6710		dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
6711		dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6712	}
6713	netdev_update_features(dev);
6714	if (features != dev->features)
6715		dev_warn(&card->gdev->dev,
6716			 "Device recovery failed to restore all offload features\n");
6717}
6718EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
6719
6720static void qeth_check_restricted_features(struct qeth_card *card,
6721					   netdev_features_t changed,
6722					   netdev_features_t actual)
6723{
6724	netdev_features_t ipv6_features = NETIF_F_TSO6;
6725	netdev_features_t ipv4_features = NETIF_F_TSO;
6726
6727	if (!card->info.has_lp2lp_cso_v6)
6728		ipv6_features |= NETIF_F_IPV6_CSUM;
6729	if (!card->info.has_lp2lp_cso_v4)
6730		ipv4_features |= NETIF_F_IP_CSUM;
6731
6732	if ((changed & ipv6_features) && !(actual & ipv6_features))
6733		qeth_flush_local_addrs6(card);
6734	if ((changed & ipv4_features) && !(actual & ipv4_features))
6735		qeth_flush_local_addrs4(card);
6736}
6737
6738int qeth_set_features(struct net_device *dev, netdev_features_t features)
6739{
6740	struct qeth_card *card = dev->ml_priv;
6741	netdev_features_t changed = dev->features ^ features;
6742	int rc = 0;
6743
6744	QETH_CARD_TEXT(card, 2, "setfeat");
6745	QETH_CARD_HEX(card, 2, &features, sizeof(features));
6746
6747	if ((changed & NETIF_F_IP_CSUM)) {
6748		rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
6749				       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4,
6750				       &card->info.has_lp2lp_cso_v4);
6751		if (rc)
6752			changed ^= NETIF_F_IP_CSUM;
6753	}
6754	if (changed & NETIF_F_IPV6_CSUM) {
6755		rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
6756				       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6,
6757				       &card->info.has_lp2lp_cso_v6);
6758		if (rc)
6759			changed ^= NETIF_F_IPV6_CSUM;
6760	}
6761	if (changed & NETIF_F_RXCSUM) {
6762		rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
6763		if (rc)
6764			changed ^= NETIF_F_RXCSUM;
6765	}
6766	if (changed & NETIF_F_TSO) {
6767		rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
6768				      QETH_PROT_IPV4);
6769		if (rc)
6770			changed ^= NETIF_F_TSO;
6771	}
6772	if (changed & NETIF_F_TSO6) {
6773		rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
6774				      QETH_PROT_IPV6);
6775		if (rc)
6776			changed ^= NETIF_F_TSO6;
6777	}
6778
6779	qeth_check_restricted_features(card, dev->features ^ features,
6780				       dev->features ^ changed);
6781
6782	/* everything changed successfully? */
6783	if ((dev->features ^ features) == changed)
6784		return 0;
6785	/* something went wrong. save changed features and return error */
6786	dev->features ^= changed;
6787	return -EIO;
6788}
6789EXPORT_SYMBOL_GPL(qeth_set_features);
6790
6791netdev_features_t qeth_fix_features(struct net_device *dev,
6792				    netdev_features_t features)
6793{
6794	struct qeth_card *card = dev->ml_priv;
6795
6796	QETH_CARD_TEXT(card, 2, "fixfeat");
6797	if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
6798		features &= ~NETIF_F_IP_CSUM;
6799	if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
6800		features &= ~NETIF_F_IPV6_CSUM;
6801	if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
6802	    !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6803		features &= ~NETIF_F_RXCSUM;
6804	if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
6805		features &= ~NETIF_F_TSO;
6806	if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
6807		features &= ~NETIF_F_TSO6;
6808
6809	QETH_CARD_HEX(card, 2, &features, sizeof(features));
6810	return features;
6811}
6812EXPORT_SYMBOL_GPL(qeth_fix_features);
6813
6814netdev_features_t qeth_features_check(struct sk_buff *skb,
6815				      struct net_device *dev,
6816				      netdev_features_t features)
6817{
6818	struct qeth_card *card = dev->ml_priv;
6819
6820	/* Traffic with local next-hop is not eligible for some offloads: */
6821	if (skb->ip_summed == CHECKSUM_PARTIAL &&
6822	    READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) {
6823		netdev_features_t restricted = 0;
6824
6825		if (skb_is_gso(skb) && !netif_needs_gso(skb, features))
6826			restricted |= NETIF_F_ALL_TSO;
6827
6828		switch (vlan_get_protocol(skb)) {
6829		case htons(ETH_P_IP):
6830			if (!card->info.has_lp2lp_cso_v4)
6831				restricted |= NETIF_F_IP_CSUM;
6832
6833			if (restricted && qeth_next_hop_is_local_v4(card, skb))
6834				features &= ~restricted;
6835			break;
6836		case htons(ETH_P_IPV6):
6837			if (!card->info.has_lp2lp_cso_v6)
6838				restricted |= NETIF_F_IPV6_CSUM;
6839
6840			if (restricted && qeth_next_hop_is_local_v6(card, skb))
6841				features &= ~restricted;
6842			break;
6843		default:
6844			break;
6845		}
6846	}
6847
6848	/* GSO segmentation builds skbs with
6849	 *	a (small) linear part for the headers, and
6850	 *	page frags for the data.
6851	 * Compared to a linear skb, the header-only part consumes an
6852	 * additional buffer element. This reduces buffer utilization, and
6853	 * hurts throughput. So compress small segments into one element.
6854	 */
6855	if (netif_needs_gso(skb, features)) {
6856		/* match skb_segment(): */
6857		unsigned int doffset = skb->data - skb_mac_header(skb);
6858		unsigned int hsize = skb_shinfo(skb)->gso_size;
6859		unsigned int hroom = skb_headroom(skb);
6860
6861		/* linearize only if resulting skb allocations are order-0: */
6862		if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
6863			features &= ~NETIF_F_SG;
6864	}
6865
6866	return vlan_features_check(skb, features);
6867}
6868EXPORT_SYMBOL_GPL(qeth_features_check);
6869
6870void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6871{
6872	struct qeth_card *card = dev->ml_priv;
6873	struct qeth_qdio_out_q *queue;
6874	unsigned int i;
6875
6876	QETH_CARD_TEXT(card, 5, "getstat");
6877
6878	stats->rx_packets = card->stats.rx_packets;
6879	stats->rx_bytes = card->stats.rx_bytes;
6880	stats->rx_errors = card->stats.rx_length_errors +
6881			   card->stats.rx_frame_errors +
6882			   card->stats.rx_fifo_errors;
6883	stats->rx_dropped = card->stats.rx_dropped_nomem +
6884			    card->stats.rx_dropped_notsupp +
6885			    card->stats.rx_dropped_runt;
6886	stats->multicast = card->stats.rx_multicast;
6887	stats->rx_length_errors = card->stats.rx_length_errors;
6888	stats->rx_frame_errors = card->stats.rx_frame_errors;
6889	stats->rx_fifo_errors = card->stats.rx_fifo_errors;
6890
6891	for (i = 0; i < card->qdio.no_out_queues; i++) {
6892		queue = card->qdio.out_qs[i];
6893
6894		stats->tx_packets += queue->stats.tx_packets;
6895		stats->tx_bytes += queue->stats.tx_bytes;
6896		stats->tx_errors += queue->stats.tx_errors;
6897		stats->tx_dropped += queue->stats.tx_dropped;
6898	}
6899}
6900EXPORT_SYMBOL_GPL(qeth_get_stats64);
6901
6902#define TC_IQD_UCAST   0
6903static void qeth_iqd_set_prio_tc_map(struct net_device *dev,
6904				     unsigned int ucast_txqs)
6905{
6906	unsigned int prio;
6907
6908	/* IQD requires mcast traffic to be placed on a dedicated queue, and
6909	 * qeth_iqd_select_queue() deals with this.
6910	 * For unicast traffic, we defer the queue selection to the stack.
6911	 * By installing a trivial prio map that spans over only the unicast
6912	 * queues, we can encourage the stack to spread the ucast traffic evenly
6913	 * without selecting the mcast queue.
6914	 */
6915
6916	/* One traffic class, spanning over all active ucast queues: */
6917	netdev_set_num_tc(dev, 1);
6918	netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs,
6919			    QETH_IQD_MIN_UCAST_TXQ);
6920
6921	/* Map all priorities to this traffic class: */
6922	for (prio = 0; prio <= TC_BITMASK; prio++)
6923		netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST);
6924}
6925
6926int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count)
6927{
6928	struct net_device *dev = card->dev;
6929	int rc;
6930
6931	/* Per netif_setup_tc(), adjust the mapping first: */
6932	if (IS_IQD(card))
6933		qeth_iqd_set_prio_tc_map(dev, count - 1);
6934
6935	rc = netif_set_real_num_tx_queues(dev, count);
6936
6937	if (rc && IS_IQD(card))
6938		qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1);
6939
6940	return rc;
6941}
 
6942
6943u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
6944			  u8 cast_type, struct net_device *sb_dev)
6945{
6946	u16 txq;
6947
6948	if (cast_type != RTN_UNICAST)
6949		return QETH_IQD_MCAST_TXQ;
6950	if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ)
6951		return QETH_IQD_MIN_UCAST_TXQ;
6952
6953	txq = netdev_pick_tx(dev, skb, sb_dev);
6954	return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq;
6955}
6956EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
6957
 
 
 
 
 
 
 
 
 
 
 
 
6958int qeth_open(struct net_device *dev)
6959{
6960	struct qeth_card *card = dev->ml_priv;
 
 
6961
6962	QETH_CARD_TEXT(card, 4, "qethopen");
6963
6964	card->data.state = CH_STATE_UP;
6965	netif_tx_start_all_queues(dev);
6966
6967	napi_enable(&card->napi);
6968	local_bh_disable();
6969	napi_schedule(&card->napi);
6970	if (IS_IQD(card)) {
6971		struct qeth_qdio_out_q *queue;
6972		unsigned int i;
6973
6974		qeth_for_each_output_queue(card, queue, i) {
6975			netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
6976					  QETH_NAPI_WEIGHT);
6977			napi_enable(&queue->napi);
6978			napi_schedule(&queue->napi);
6979		}
6980	}
 
 
 
6981	/* kick-start the NAPI softirq: */
6982	local_bh_enable();
 
6983	return 0;
6984}
6985EXPORT_SYMBOL_GPL(qeth_open);
6986
6987int qeth_stop(struct net_device *dev)
6988{
6989	struct qeth_card *card = dev->ml_priv;
 
 
6990
6991	QETH_CARD_TEXT(card, 4, "qethstop");
6992	if (IS_IQD(card)) {
6993		struct qeth_qdio_out_q *queue;
6994		unsigned int i;
6995
6996		/* Quiesce the NAPI instances: */
6997		qeth_for_each_output_queue(card, queue, i)
6998			napi_disable(&queue->napi);
6999
7000		/* Stop .ndo_start_xmit, might still access queue->napi. */
7001		netif_tx_disable(dev);
7002
7003		qeth_for_each_output_queue(card, queue, i) {
7004			del_timer_sync(&queue->timer);
7005			/* Queues may get re-allocated, so remove the NAPIs. */
7006			netif_napi_del(&queue->napi);
7007		}
7008	} else {
7009		netif_tx_disable(dev);
7010	}
7011
7012	napi_disable(&card->napi);
7013	cancel_delayed_work_sync(&card->buffer_reclaim_work);
7014	qdio_stop_irq(CARD_DDEV(card));
7015
 
 
 
 
 
 
 
 
 
 
 
 
 
7016	return 0;
7017}
7018EXPORT_SYMBOL_GPL(qeth_stop);
7019
7020static int __init qeth_core_init(void)
7021{
7022	int rc;
7023
7024	pr_info("loading core functions\n");
7025
7026	qeth_debugfs_root = debugfs_create_dir("qeth", NULL);
7027
7028	rc = qeth_register_dbf_views();
7029	if (rc)
7030		goto dbf_err;
7031	qeth_core_root_dev = root_device_register("qeth");
7032	rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
7033	if (rc)
7034		goto register_err;
7035	qeth_core_header_cache =
7036		kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
7037				  roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
7038				  0, NULL);
7039	if (!qeth_core_header_cache) {
7040		rc = -ENOMEM;
7041		goto slab_err;
7042	}
7043	qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
7044			sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
7045	if (!qeth_qdio_outbuf_cache) {
7046		rc = -ENOMEM;
7047		goto cqslab_err;
7048	}
 
 
 
 
 
 
 
 
 
 
7049	rc = ccw_driver_register(&qeth_ccw_driver);
7050	if (rc)
7051		goto ccw_err;
7052	rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
7053	if (rc)
7054		goto ccwgroup_err;
7055
7056	return 0;
7057
7058ccwgroup_err:
7059	ccw_driver_unregister(&qeth_ccw_driver);
7060ccw_err:
 
 
7061	kmem_cache_destroy(qeth_qdio_outbuf_cache);
7062cqslab_err:
7063	kmem_cache_destroy(qeth_core_header_cache);
7064slab_err:
7065	root_device_unregister(qeth_core_root_dev);
7066register_err:
7067	qeth_unregister_dbf_views();
7068dbf_err:
7069	debugfs_remove_recursive(qeth_debugfs_root);
7070	pr_err("Initializing the qeth device driver failed\n");
7071	return rc;
7072}
7073
7074static void __exit qeth_core_exit(void)
7075{
7076	qeth_clear_dbf_list();
7077	ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
7078	ccw_driver_unregister(&qeth_ccw_driver);
 
7079	kmem_cache_destroy(qeth_qdio_outbuf_cache);
7080	kmem_cache_destroy(qeth_core_header_cache);
7081	root_device_unregister(qeth_core_root_dev);
7082	qeth_unregister_dbf_views();
7083	debugfs_remove_recursive(qeth_debugfs_root);
7084	pr_info("core functions removed\n");
7085}
7086
7087module_init(qeth_core_init);
7088module_exit(qeth_core_exit);
7089MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
7090MODULE_DESCRIPTION("qeth core functions");
7091MODULE_LICENSE("GPL");
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *    Copyright IBM Corp. 2007, 2009
   4 *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
   5 *		 Frank Pavlic <fpavlic@de.ibm.com>,
   6 *		 Thomas Spatzier <tspat@de.ibm.com>,
   7 *		 Frank Blaschka <frank.blaschka@de.ibm.com>
   8 */
   9
  10#define KMSG_COMPONENT "qeth"
  11#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  12
  13#include <linux/compat.h>
  14#include <linux/module.h>
  15#include <linux/moduleparam.h>
  16#include <linux/string.h>
  17#include <linux/errno.h>
  18#include <linux/kernel.h>
  19#include <linux/log2.h>
  20#include <linux/io.h>
  21#include <linux/ip.h>
  22#include <linux/tcp.h>
  23#include <linux/mii.h>
  24#include <linux/mm.h>
  25#include <linux/kthread.h>
  26#include <linux/slab.h>
  27#include <linux/if_vlan.h>
  28#include <linux/netdevice.h>
  29#include <linux/netdev_features.h>
  30#include <linux/rcutree.h>
  31#include <linux/skbuff.h>
  32#include <linux/vmalloc.h>
  33
  34#include <net/iucv/af_iucv.h>
  35#include <net/dsfield.h>
  36#include <net/sock.h>
  37
  38#include <asm/ebcdic.h>
  39#include <asm/chpid.h>
 
  40#include <asm/sysinfo.h>
  41#include <asm/diag.h>
  42#include <asm/cio.h>
  43#include <asm/ccwdev.h>
  44#include <asm/cpcmd.h>
  45
  46#include "qeth_core.h"
  47
  48struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
  49	/* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
  50	/*                   N  P  A    M  L  V                      H  */
  51	[QETH_DBF_SETUP] = {"qeth_setup",
  52				8, 1,   8, 5, &debug_hex_ascii_view, NULL},
  53	[QETH_DBF_MSG]	 = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
  54			    &debug_sprintf_view, NULL},
  55	[QETH_DBF_CTRL]  = {"qeth_control",
  56		8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
  57};
  58EXPORT_SYMBOL_GPL(qeth_dbf);
  59
  60static struct kmem_cache *qeth_core_header_cache;
 
  61static struct kmem_cache *qeth_qdio_outbuf_cache;
  62static struct kmem_cache *qeth_qaob_cache;
  63
  64static struct device *qeth_core_root_dev;
  65static struct dentry *qeth_debugfs_root;
  66static struct lock_class_key qdio_out_skb_queue_key;
  67
  68static void qeth_issue_next_read_cb(struct qeth_card *card,
  69				    struct qeth_cmd_buffer *iob,
  70				    unsigned int data_length);
  71static int qeth_qdio_establish(struct qeth_card *);
  72static void qeth_free_qdio_queues(struct qeth_card *card);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  73
  74static const char *qeth_get_cardname(struct qeth_card *card)
  75{
  76	if (IS_VM_NIC(card)) {
  77		switch (card->info.type) {
  78		case QETH_CARD_TYPE_OSD:
  79			return " Virtual NIC QDIO";
  80		case QETH_CARD_TYPE_IQD:
  81			return " Virtual NIC Hiper";
  82		case QETH_CARD_TYPE_OSM:
  83			return " Virtual NIC QDIO - OSM";
  84		case QETH_CARD_TYPE_OSX:
  85			return " Virtual NIC QDIO - OSX";
  86		default:
  87			return " unknown";
  88		}
  89	} else {
  90		switch (card->info.type) {
  91		case QETH_CARD_TYPE_OSD:
  92			return " OSD Express";
  93		case QETH_CARD_TYPE_IQD:
  94			return " HiperSockets";
 
 
  95		case QETH_CARD_TYPE_OSM:
  96			return " OSM QDIO";
  97		case QETH_CARD_TYPE_OSX:
  98			return " OSX QDIO";
  99		default:
 100			return " unknown";
 101		}
 102	}
 103	return " n/a";
 104}
 105
 106/* max length to be returned: 14 */
 107const char *qeth_get_cardname_short(struct qeth_card *card)
 108{
 109	if (IS_VM_NIC(card)) {
 110		switch (card->info.type) {
 111		case QETH_CARD_TYPE_OSD:
 112			return "Virt.NIC QDIO";
 113		case QETH_CARD_TYPE_IQD:
 114			return "Virt.NIC Hiper";
 115		case QETH_CARD_TYPE_OSM:
 116			return "Virt.NIC OSM";
 117		case QETH_CARD_TYPE_OSX:
 118			return "Virt.NIC OSX";
 119		default:
 120			return "unknown";
 121		}
 122	} else {
 123		switch (card->info.type) {
 124		case QETH_CARD_TYPE_OSD:
 125			switch (card->info.link_type) {
 126			case QETH_LINK_TYPE_FAST_ETH:
 127				return "OSD_100";
 128			case QETH_LINK_TYPE_HSTR:
 129				return "HSTR";
 130			case QETH_LINK_TYPE_GBIT_ETH:
 131				return "OSD_1000";
 132			case QETH_LINK_TYPE_10GBIT_ETH:
 133				return "OSD_10GIG";
 134			case QETH_LINK_TYPE_25GBIT_ETH:
 135				return "OSD_25GIG";
 136			case QETH_LINK_TYPE_LANE_ETH100:
 137				return "OSD_FE_LANE";
 138			case QETH_LINK_TYPE_LANE_TR:
 139				return "OSD_TR_LANE";
 140			case QETH_LINK_TYPE_LANE_ETH1000:
 141				return "OSD_GbE_LANE";
 142			case QETH_LINK_TYPE_LANE:
 143				return "OSD_ATM_LANE";
 144			default:
 145				return "OSD_Express";
 146			}
 147		case QETH_CARD_TYPE_IQD:
 148			return "HiperSockets";
 
 
 149		case QETH_CARD_TYPE_OSM:
 150			return "OSM_1000";
 151		case QETH_CARD_TYPE_OSX:
 152			return "OSX_10GIG";
 153		default:
 154			return "unknown";
 155		}
 156	}
 157	return "n/a";
 158}
 159
 160void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
 161			 int clear_start_mask)
 162{
 163	unsigned long flags;
 164
 165	spin_lock_irqsave(&card->thread_mask_lock, flags);
 166	card->thread_allowed_mask = threads;
 167	if (clear_start_mask)
 168		card->thread_start_mask &= threads;
 169	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
 170	wake_up(&card->wait_q);
 171}
 172EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
 173
 174int qeth_threads_running(struct qeth_card *card, unsigned long threads)
 175{
 176	unsigned long flags;
 177	int rc = 0;
 178
 179	spin_lock_irqsave(&card->thread_mask_lock, flags);
 180	rc = (card->thread_running_mask & threads);
 181	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
 182	return rc;
 183}
 184EXPORT_SYMBOL_GPL(qeth_threads_running);
 185
 186static void qeth_clear_working_pool_list(struct qeth_card *card)
 187{
 188	struct qeth_buffer_pool_entry *pool_entry, *tmp;
 189	struct qeth_qdio_q *queue = card->qdio.in_q;
 190	unsigned int i;
 191
 192	QETH_CARD_TEXT(card, 5, "clwrklst");
 193	list_for_each_entry_safe(pool_entry, tmp,
 194				 &card->qdio.in_buf_pool.entry_list, list)
 195		list_del(&pool_entry->list);
 
 196
 197	for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
 198		queue->bufs[i].pool_entry = NULL;
 199}
 
 200
 201static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry)
 202{
 203	unsigned int i;
 204
 205	for (i = 0; i < ARRAY_SIZE(entry->elements); i++) {
 206		if (entry->elements[i])
 207			__free_page(entry->elements[i]);
 208	}
 209
 210	kfree(entry);
 211}
 212
 213static void qeth_free_buffer_pool(struct qeth_card *card)
 214{
 215	struct qeth_buffer_pool_entry *entry, *tmp;
 216
 217	list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list,
 218				 init_list) {
 219		list_del(&entry->init_list);
 220		qeth_free_pool_entry(entry);
 221	}
 222}
 223
 224static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages)
 225{
 226	struct qeth_buffer_pool_entry *entry;
 227	unsigned int i;
 228
 229	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 230	if (!entry)
 231		return NULL;
 232
 233	for (i = 0; i < pages; i++) {
 234		entry->elements[i] = __dev_alloc_page(GFP_KERNEL);
 235
 236		if (!entry->elements[i]) {
 237			qeth_free_pool_entry(entry);
 238			return NULL;
 239		}
 240	}
 241
 242	return entry;
 243}
 244
 245static int qeth_alloc_buffer_pool(struct qeth_card *card)
 246{
 247	unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
 248	unsigned int i;
 249
 250	QETH_CARD_TEXT(card, 5, "alocpool");
 251	for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
 252		struct qeth_buffer_pool_entry *entry;
 253
 254		entry = qeth_alloc_pool_entry(buf_elements);
 255		if (!entry) {
 256			qeth_free_buffer_pool(card);
 257			return -ENOMEM;
 258		}
 259
 260		list_add(&entry->init_list, &card->qdio.init_pool.entry_list);
 261	}
 262	return 0;
 263}
 264
 265int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count)
 266{
 267	unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
 268	struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool;
 269	struct qeth_buffer_pool_entry *entry, *tmp;
 270	int delta = count - pool->buf_count;
 271	LIST_HEAD(entries);
 272
 273	QETH_CARD_TEXT(card, 2, "realcbp");
 274
 275	/* Defer until pool is allocated: */
 276	if (list_empty(&pool->entry_list))
 277		goto out;
 278
 279	/* Remove entries from the pool: */
 280	while (delta < 0) {
 281		entry = list_first_entry(&pool->entry_list,
 282					 struct qeth_buffer_pool_entry,
 283					 init_list);
 284		list_del(&entry->init_list);
 285		qeth_free_pool_entry(entry);
 286
 287		delta++;
 288	}
 289
 290	/* Allocate additional entries: */
 291	while (delta > 0) {
 292		entry = qeth_alloc_pool_entry(buf_elements);
 293		if (!entry) {
 294			list_for_each_entry_safe(entry, tmp, &entries,
 295						 init_list) {
 296				list_del(&entry->init_list);
 297				qeth_free_pool_entry(entry);
 298			}
 299
 300			return -ENOMEM;
 301		}
 302
 303		list_add(&entry->init_list, &entries);
 304
 305		delta--;
 306	}
 307
 308	list_splice(&entries, &pool->entry_list);
 309
 310out:
 311	card->qdio.in_buf_pool.buf_count = count;
 312	pool->buf_count = count;
 313	return 0;
 314}
 315EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool);
 316
 317static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
 318{
 319	if (!q)
 320		return;
 321
 322	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
 323	kfree(q);
 324}
 325
 326static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
 327{
 328	struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
 329	int i;
 330
 331	if (!q)
 332		return NULL;
 333
 334	if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
 335		kfree(q);
 336		return NULL;
 337	}
 338
 339	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
 340		q->bufs[i].buffer = q->qdio_bufs[i];
 341
 342	QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
 343	return q;
 344}
 345
 346static int qeth_cq_init(struct qeth_card *card)
 347{
 348	int rc;
 349
 350	if (card->options.cq == QETH_CQ_ENABLED) {
 351		QETH_CARD_TEXT(card, 2, "cqinit");
 352		qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
 353				   QDIO_MAX_BUFFERS_PER_Q);
 354		card->qdio.c_q->next_buf_to_init = 127;
 355
 356		rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), 1, 0, 127);
 
 357		if (rc) {
 358			QETH_CARD_TEXT_(card, 2, "1err%d", rc);
 359			goto out;
 360		}
 361	}
 362	rc = 0;
 363out:
 364	return rc;
 365}
 366
 367static int qeth_alloc_cq(struct qeth_card *card)
 368{
 
 
 369	if (card->options.cq == QETH_CQ_ENABLED) {
 
 
 
 370		QETH_CARD_TEXT(card, 2, "cqon");
 371		card->qdio.c_q = qeth_alloc_qdio_queue();
 372		if (!card->qdio.c_q) {
 373			dev_err(&card->gdev->dev, "Failed to create completion queue\n");
 374			return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 375		}
 376	} else {
 377		QETH_CARD_TEXT(card, 2, "nocq");
 378		card->qdio.c_q = NULL;
 
 379	}
 380	return 0;
 
 
 
 
 
 
 
 
 
 381}
 382
 383static void qeth_free_cq(struct qeth_card *card)
 384{
 385	if (card->qdio.c_q) {
 
 386		qeth_free_qdio_queue(card->qdio.c_q);
 387		card->qdio.c_q = NULL;
 388	}
 
 
 389}
 390
 391static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
 392							int delayed)
 393{
 394	enum iucv_tx_notify n;
 395
 396	switch (sbalf15) {
 397	case 0:
 398		n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
 399		break;
 400	case 4:
 401	case 16:
 402	case 17:
 403	case 18:
 404		n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
 405			TX_NOTIFY_UNREACHABLE;
 406		break;
 407	default:
 408		n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
 409			TX_NOTIFY_GENERALERROR;
 410		break;
 411	}
 412
 413	return n;
 414}
 415
 416static void qeth_put_cmd(struct qeth_cmd_buffer *iob)
 
 417{
 418	if (refcount_dec_and_test(&iob->ref_count)) {
 419		kfree(iob->data);
 420		kfree(iob);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 421	}
 
 
 
 422}
 
 423static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
 424			   void *data)
 425{
 426	ccw->cmd_code = cmd_code;
 427	ccw->flags = flags | CCW_FLAG_SLI;
 428	ccw->count = len;
 429	ccw->cda = (__u32)virt_to_phys(data);
 430}
 431
 432static int __qeth_issue_next_read(struct qeth_card *card)
 433{
 434	struct qeth_cmd_buffer *iob = card->read_cmd;
 435	struct qeth_channel *channel = iob->channel;
 436	struct ccw1 *ccw = __ccw_from_cmd(iob);
 437	int rc;
 438
 439	QETH_CARD_TEXT(card, 5, "issnxrd");
 440	if (channel->state != CH_STATE_UP)
 441		return -EIO;
 442
 443	memset(iob->data, 0, iob->length);
 444	qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data);
 445	iob->callback = qeth_issue_next_read_cb;
 446	/* keep the cmd alive after completion: */
 447	qeth_get_cmd(iob);
 448
 449	QETH_CARD_TEXT(card, 6, "noirqpnd");
 450	rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
 451	if (!rc) {
 452		channel->active_cmd = iob;
 453	} else {
 454		QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
 455				 rc, CARD_DEVID(card));
 456		qeth_unlock_channel(card, channel);
 457		qeth_put_cmd(iob);
 458		card->read_or_write_problem = 1;
 459		qeth_schedule_recovery(card);
 460	}
 461	return rc;
 462}
 463
 464static int qeth_issue_next_read(struct qeth_card *card)
 465{
 466	int ret;
 467
 468	spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
 469	ret = __qeth_issue_next_read(card);
 470	spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
 471
 472	return ret;
 473}
 474
 475static void qeth_enqueue_cmd(struct qeth_card *card,
 476			     struct qeth_cmd_buffer *iob)
 477{
 478	spin_lock_irq(&card->lock);
 479	list_add_tail(&iob->list_entry, &card->cmd_waiter_list);
 480	spin_unlock_irq(&card->lock);
 481}
 482
 483static void qeth_dequeue_cmd(struct qeth_card *card,
 484			     struct qeth_cmd_buffer *iob)
 485{
 486	spin_lock_irq(&card->lock);
 487	list_del(&iob->list_entry);
 488	spin_unlock_irq(&card->lock);
 489}
 490
 491static void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
 492{
 493	iob->rc = reason;
 494	complete(&iob->done);
 495}
 
 496
 497static void qeth_flush_local_addrs4(struct qeth_card *card)
 498{
 499	struct qeth_local_addr *addr;
 500	struct hlist_node *tmp;
 501	unsigned int i;
 502
 503	spin_lock_irq(&card->local_addrs4_lock);
 504	hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) {
 505		hash_del_rcu(&addr->hnode);
 506		kfree_rcu(addr, rcu);
 507	}
 508	spin_unlock_irq(&card->local_addrs4_lock);
 509}
 510
 511static void qeth_flush_local_addrs6(struct qeth_card *card)
 512{
 513	struct qeth_local_addr *addr;
 514	struct hlist_node *tmp;
 515	unsigned int i;
 516
 517	spin_lock_irq(&card->local_addrs6_lock);
 518	hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) {
 519		hash_del_rcu(&addr->hnode);
 520		kfree_rcu(addr, rcu);
 521	}
 522	spin_unlock_irq(&card->local_addrs6_lock);
 523}
 524
 525static void qeth_flush_local_addrs(struct qeth_card *card)
 526{
 527	qeth_flush_local_addrs4(card);
 528	qeth_flush_local_addrs6(card);
 529}
 
 530
 531static void qeth_add_local_addrs4(struct qeth_card *card,
 532				  struct qeth_ipacmd_local_addrs4 *cmd)
 533{
 534	unsigned int i;
 535
 536	if (cmd->addr_length !=
 537	    sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
 538		dev_err_ratelimited(&card->gdev->dev,
 539				    "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n",
 540				    cmd->addr_length);
 541		return;
 542	}
 543
 544	spin_lock(&card->local_addrs4_lock);
 545	for (i = 0; i < cmd->count; i++) {
 546		unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr);
 547		struct qeth_local_addr *addr;
 548		bool duplicate = false;
 549
 550		hash_for_each_possible(card->local_addrs4, addr, hnode, key) {
 551			if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) {
 552				duplicate = true;
 553				break;
 554			}
 555		}
 556
 557		if (duplicate)
 558			continue;
 559
 560		addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
 561		if (!addr) {
 562			dev_err(&card->gdev->dev,
 563				"Failed to allocate local addr object. Traffic to %pI4 might suffer.\n",
 564				&cmd->addrs[i].addr);
 565			continue;
 566		}
 567
 568		ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr);
 569		hash_add_rcu(card->local_addrs4, &addr->hnode, key);
 570	}
 571	spin_unlock(&card->local_addrs4_lock);
 572}
 573
 574static void qeth_add_local_addrs6(struct qeth_card *card,
 575				  struct qeth_ipacmd_local_addrs6 *cmd)
 576{
 577	unsigned int i;
 578
 579	if (cmd->addr_length !=
 580	    sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
 581		dev_err_ratelimited(&card->gdev->dev,
 582				    "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n",
 583				    cmd->addr_length);
 584		return;
 585	}
 586
 587	spin_lock(&card->local_addrs6_lock);
 588	for (i = 0; i < cmd->count; i++) {
 589		u32 key = ipv6_addr_hash(&cmd->addrs[i].addr);
 590		struct qeth_local_addr *addr;
 591		bool duplicate = false;
 592
 593		hash_for_each_possible(card->local_addrs6, addr, hnode, key) {
 594			if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) {
 595				duplicate = true;
 596				break;
 597			}
 598		}
 599
 600		if (duplicate)
 601			continue;
 602
 603		addr = kmalloc(sizeof(*addr), GFP_ATOMIC);
 604		if (!addr) {
 605			dev_err(&card->gdev->dev,
 606				"Failed to allocate local addr object. Traffic to %pI6c might suffer.\n",
 607				&cmd->addrs[i].addr);
 608			continue;
 609		}
 610
 611		addr->addr = cmd->addrs[i].addr;
 612		hash_add_rcu(card->local_addrs6, &addr->hnode, key);
 613	}
 614	spin_unlock(&card->local_addrs6_lock);
 615}
 616
 617static void qeth_del_local_addrs4(struct qeth_card *card,
 618				  struct qeth_ipacmd_local_addrs4 *cmd)
 619{
 620	unsigned int i;
 621
 622	if (cmd->addr_length !=
 623	    sizeof_field(struct qeth_ipacmd_local_addr4, addr)) {
 624		dev_err_ratelimited(&card->gdev->dev,
 625				    "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n",
 626				    cmd->addr_length);
 627		return;
 628	}
 629
 630	spin_lock(&card->local_addrs4_lock);
 631	for (i = 0; i < cmd->count; i++) {
 632		struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i];
 633		unsigned int key = ipv4_addr_hash(addr->addr);
 634		struct qeth_local_addr *tmp;
 635
 636		hash_for_each_possible(card->local_addrs4, tmp, hnode, key) {
 637			if (tmp->addr.s6_addr32[3] == addr->addr) {
 638				hash_del_rcu(&tmp->hnode);
 639				kfree_rcu(tmp, rcu);
 640				break;
 641			}
 642		}
 643	}
 644	spin_unlock(&card->local_addrs4_lock);
 645}
 646
 647static void qeth_del_local_addrs6(struct qeth_card *card,
 648				  struct qeth_ipacmd_local_addrs6 *cmd)
 649{
 650	unsigned int i;
 651
 652	if (cmd->addr_length !=
 653	    sizeof_field(struct qeth_ipacmd_local_addr6, addr)) {
 654		dev_err_ratelimited(&card->gdev->dev,
 655				    "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n",
 656				    cmd->addr_length);
 657		return;
 658	}
 659
 660	spin_lock(&card->local_addrs6_lock);
 661	for (i = 0; i < cmd->count; i++) {
 662		struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i];
 663		u32 key = ipv6_addr_hash(&addr->addr);
 664		struct qeth_local_addr *tmp;
 665
 666		hash_for_each_possible(card->local_addrs6, tmp, hnode, key) {
 667			if (ipv6_addr_equal(&tmp->addr, &addr->addr)) {
 668				hash_del_rcu(&tmp->hnode);
 669				kfree_rcu(tmp, rcu);
 670				break;
 671			}
 672		}
 673	}
 674	spin_unlock(&card->local_addrs6_lock);
 675}
 676
 677static bool qeth_next_hop_is_local_v4(struct qeth_card *card,
 678				      struct sk_buff *skb)
 679{
 680	struct qeth_local_addr *tmp;
 681	bool is_local = false;
 682	unsigned int key;
 683	__be32 next_hop;
 684
 685	if (hash_empty(card->local_addrs4))
 686		return false;
 687
 688	rcu_read_lock();
 689	next_hop = qeth_next_hop_v4_rcu(skb,
 690					qeth_dst_check_rcu(skb, htons(ETH_P_IP)));
 691	key = ipv4_addr_hash(next_hop);
 692
 693	hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) {
 694		if (tmp->addr.s6_addr32[3] == next_hop) {
 695			is_local = true;
 696			break;
 697		}
 698	}
 699	rcu_read_unlock();
 700
 701	return is_local;
 702}
 703
 704static bool qeth_next_hop_is_local_v6(struct qeth_card *card,
 705				      struct sk_buff *skb)
 706{
 707	struct qeth_local_addr *tmp;
 708	struct in6_addr *next_hop;
 709	bool is_local = false;
 710	u32 key;
 711
 712	if (hash_empty(card->local_addrs6))
 713		return false;
 714
 715	rcu_read_lock();
 716	next_hop = qeth_next_hop_v6_rcu(skb,
 717					qeth_dst_check_rcu(skb, htons(ETH_P_IPV6)));
 718	key = ipv6_addr_hash(next_hop);
 719
 720	hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) {
 721		if (ipv6_addr_equal(&tmp->addr, next_hop)) {
 722			is_local = true;
 723			break;
 724		}
 725	}
 726	rcu_read_unlock();
 727
 728	return is_local;
 729}
 730
 731static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v)
 732{
 733	struct qeth_card *card = m->private;
 734	struct qeth_local_addr *tmp;
 735	unsigned int i;
 736
 737	rcu_read_lock();
 738	hash_for_each_rcu(card->local_addrs4, i, tmp, hnode)
 739		seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]);
 740	hash_for_each_rcu(card->local_addrs6, i, tmp, hnode)
 741		seq_printf(m, "%pI6c\n", &tmp->addr);
 742	rcu_read_unlock();
 743
 744	return 0;
 745}
 746
 747DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr);
 748
 749static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
 750		struct qeth_card *card)
 751{
 752	const char *ipa_name;
 753	int com = cmd->hdr.command;
 754
 755	ipa_name = qeth_get_ipa_cmd_name(com);
 756
 757	if (rc)
 758		QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
 759				 ipa_name, com, CARD_DEVID(card), rc,
 760				 qeth_get_ipa_msg(rc));
 761	else
 762		QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
 763				 ipa_name, com, CARD_DEVID(card));
 764}
 765
 766static void qeth_default_link_info(struct qeth_card *card)
 767{
 768	struct qeth_link_info *link_info = &card->info.link_info;
 769
 770	QETH_CARD_TEXT(card, 2, "dftlinfo");
 771	link_info->duplex = DUPLEX_FULL;
 772
 773	if (IS_IQD(card) || IS_VM_NIC(card)) {
 774		link_info->speed = SPEED_10000;
 775		link_info->port = PORT_FIBRE;
 776		link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT;
 777	} else {
 778		switch (card->info.link_type) {
 779		case QETH_LINK_TYPE_FAST_ETH:
 780		case QETH_LINK_TYPE_LANE_ETH100:
 781			link_info->speed = SPEED_100;
 782			link_info->port = PORT_TP;
 783			break;
 784		case QETH_LINK_TYPE_GBIT_ETH:
 785		case QETH_LINK_TYPE_LANE_ETH1000:
 786			link_info->speed = SPEED_1000;
 787			link_info->port = PORT_FIBRE;
 788			break;
 789		case QETH_LINK_TYPE_10GBIT_ETH:
 790			link_info->speed = SPEED_10000;
 791			link_info->port = PORT_FIBRE;
 792			break;
 793		case QETH_LINK_TYPE_25GBIT_ETH:
 794			link_info->speed = SPEED_25000;
 795			link_info->port = PORT_FIBRE;
 796			break;
 797		default:
 798			dev_info(&card->gdev->dev,
 799				 "Unknown link type %x\n",
 800				 card->info.link_type);
 801			link_info->speed = SPEED_UNKNOWN;
 802			link_info->port = PORT_OTHER;
 803		}
 804
 805		link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
 806	}
 807}
 808
 809static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
 810						struct qeth_ipa_cmd *cmd)
 811{
 812	QETH_CARD_TEXT(card, 5, "chkipad");
 813
 814	if (IS_IPA_REPLY(cmd)) {
 815		if (cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
 
 
 
 816			qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
 817		return cmd;
 818	}
 819
 820	/* handle unsolicited event: */
 821	switch (cmd->hdr.command) {
 822	case IPA_CMD_STOPLAN:
 823		if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
 824			dev_err(&card->gdev->dev,
 825				"Adjacent port of interface %s is no longer in reflective relay mode, trigger recovery\n",
 826				netdev_name(card->dev));
 827			/* Set offline, then probably fail to set online: */
 828			qeth_schedule_recovery(card);
 829		} else {
 830			/* stay online for subsequent STARTLAN */
 831			dev_warn(&card->gdev->dev,
 832				 "The link for interface %s on CHPID 0x%X failed\n",
 833				 netdev_name(card->dev), card->info.chpid);
 834			qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
 835			netif_carrier_off(card->dev);
 836			qeth_default_link_info(card);
 837		}
 838		return NULL;
 839	case IPA_CMD_STARTLAN:
 840		dev_info(&card->gdev->dev,
 841			 "The link for %s on CHPID 0x%X has been restored\n",
 842			 netdev_name(card->dev), card->info.chpid);
 843		if (card->info.hwtrap)
 844			card->info.hwtrap = 2;
 845		qeth_schedule_recovery(card);
 846		return NULL;
 847	case IPA_CMD_SETBRIDGEPORT_IQD:
 848	case IPA_CMD_SETBRIDGEPORT_OSA:
 849	case IPA_CMD_ADDRESS_CHANGE_NOTIF:
 850		if (card->discipline->control_event_handler(card, cmd))
 851			return cmd;
 852		return NULL;
 
 
 853	case IPA_CMD_REGISTER_LOCAL_ADDR:
 854		if (cmd->hdr.prot_version == QETH_PROT_IPV4)
 855			qeth_add_local_addrs4(card, &cmd->data.local_addrs4);
 856		else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
 857			qeth_add_local_addrs6(card, &cmd->data.local_addrs6);
 858
 859		QETH_CARD_TEXT(card, 3, "irla");
 860		return NULL;
 861	case IPA_CMD_UNREGISTER_LOCAL_ADDR:
 862		if (cmd->hdr.prot_version == QETH_PROT_IPV4)
 863			qeth_del_local_addrs4(card, &cmd->data.local_addrs4);
 864		else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
 865			qeth_del_local_addrs6(card, &cmd->data.local_addrs6);
 866
 867		QETH_CARD_TEXT(card, 3, "urla");
 868		return NULL;
 869	default:
 870		QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
 871		return cmd;
 872	}
 873}
 874
 875static void qeth_clear_ipacmd_list(struct qeth_card *card)
 876{
 877	struct qeth_cmd_buffer *iob;
 878	unsigned long flags;
 879
 880	QETH_CARD_TEXT(card, 4, "clipalst");
 881
 882	spin_lock_irqsave(&card->lock, flags);
 883	list_for_each_entry(iob, &card->cmd_waiter_list, list_entry)
 884		qeth_notify_cmd(iob, -ECANCELED);
 885	spin_unlock_irqrestore(&card->lock, flags);
 886}
 
 887
 888static int qeth_check_idx_response(struct qeth_card *card,
 889	unsigned char *buffer)
 890{
 891	QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
 892	if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) {
 893		QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
 894				 buffer[4]);
 895		QETH_CARD_TEXT(card, 2, "ckidxres");
 896		QETH_CARD_TEXT(card, 2, " idxterm");
 897		QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]);
 898		if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT ||
 899		    buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) {
 900			dev_err(&card->gdev->dev,
 901				"The device does not support the configured transport mode\n");
 902			return -EPROTONOSUPPORT;
 903		}
 904		return -EIO;
 905	}
 906	return 0;
 907}
 908
 
 
 
 
 
 
 
 
 
 909static void qeth_release_buffer_cb(struct qeth_card *card,
 910				   struct qeth_cmd_buffer *iob,
 911				   unsigned int data_length)
 912{
 913	qeth_put_cmd(iob);
 914}
 915
 916static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
 917{
 918	qeth_notify_cmd(iob, rc);
 919	qeth_put_cmd(iob);
 920}
 921
 922static struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
 923					      unsigned int length,
 924					      unsigned int ccws, long timeout)
 925{
 926	struct qeth_cmd_buffer *iob;
 927
 928	if (length > QETH_BUFSIZE)
 929		return NULL;
 930
 931	iob = kzalloc(sizeof(*iob), GFP_KERNEL);
 932	if (!iob)
 933		return NULL;
 934
 935	iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1),
 936			    GFP_KERNEL | GFP_DMA);
 937	if (!iob->data) {
 938		kfree(iob);
 939		return NULL;
 940	}
 941
 942	init_completion(&iob->done);
 943	spin_lock_init(&iob->lock);
 
 944	refcount_set(&iob->ref_count, 1);
 945	iob->channel = channel;
 946	iob->timeout = timeout;
 947	iob->length = length;
 948	return iob;
 949}
 
 950
 951static void qeth_issue_next_read_cb(struct qeth_card *card,
 952				    struct qeth_cmd_buffer *iob,
 953				    unsigned int data_length)
 954{
 955	struct qeth_cmd_buffer *request = NULL;
 956	struct qeth_ipa_cmd *cmd = NULL;
 957	struct qeth_reply *reply = NULL;
 958	struct qeth_cmd_buffer *tmp;
 959	unsigned long flags;
 960	int rc = 0;
 961
 962	QETH_CARD_TEXT(card, 4, "sndctlcb");
 963	rc = qeth_check_idx_response(card, iob->data);
 964	switch (rc) {
 965	case 0:
 966		break;
 967	case -EIO:
 968		qeth_schedule_recovery(card);
 969		fallthrough;
 970	default:
 971		qeth_clear_ipacmd_list(card);
 972		goto err_idx;
 973	}
 974
 975	cmd = __ipa_reply(iob);
 976	if (cmd) {
 977		cmd = qeth_check_ipa_data(card, cmd);
 978		if (!cmd)
 979			goto out;
 
 
 
 
 
 980	}
 981
 982	/* match against pending cmd requests */
 983	spin_lock_irqsave(&card->lock, flags);
 984	list_for_each_entry(tmp, &card->cmd_waiter_list, list_entry) {
 985		if (tmp->match && tmp->match(tmp, iob)) {
 986			request = tmp;
 987			/* take the object outside the lock */
 988			qeth_get_cmd(request);
 989			break;
 990		}
 991	}
 992	spin_unlock_irqrestore(&card->lock, flags);
 993
 994	if (!request)
 995		goto out;
 996
 997	reply = &request->reply;
 998	if (!reply->callback) {
 999		rc = 0;
1000		goto no_callback;
1001	}
1002
1003	spin_lock_irqsave(&request->lock, flags);
1004	if (request->rc)
1005		/* Bail out when the requestor has already left: */
1006		rc = request->rc;
1007	else
1008		rc = reply->callback(card, reply, cmd ? (unsigned long)cmd :
1009							(unsigned long)iob);
1010	spin_unlock_irqrestore(&request->lock, flags);
1011
1012no_callback:
1013	if (rc <= 0)
1014		qeth_notify_cmd(request, rc);
1015	qeth_put_cmd(request);
1016out:
1017	memcpy(&card->seqno.pdu_hdr_ack,
1018		QETH_PDU_HEADER_SEQ_NO(iob->data),
1019		QETH_SEQ_NO_LENGTH);
1020	__qeth_issue_next_read(card);
1021err_idx:
1022	qeth_put_cmd(iob);
1023}
1024
1025static int qeth_set_thread_start_bit(struct qeth_card *card,
1026		unsigned long thread)
1027{
1028	unsigned long flags;
1029	int rc = 0;
1030
1031	spin_lock_irqsave(&card->thread_mask_lock, flags);
1032	if (!(card->thread_allowed_mask & thread))
1033		rc = -EPERM;
1034	else if (card->thread_start_mask & thread)
1035		rc = -EBUSY;
1036	else
1037		card->thread_start_mask |= thread;
1038	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1039
1040	return rc;
1041}
1042
1043static void qeth_clear_thread_start_bit(struct qeth_card *card,
1044					unsigned long thread)
1045{
1046	unsigned long flags;
1047
1048	spin_lock_irqsave(&card->thread_mask_lock, flags);
1049	card->thread_start_mask &= ~thread;
1050	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1051	wake_up(&card->wait_q);
1052}
1053
1054static void qeth_clear_thread_running_bit(struct qeth_card *card,
1055					  unsigned long thread)
1056{
1057	unsigned long flags;
1058
1059	spin_lock_irqsave(&card->thread_mask_lock, flags);
1060	card->thread_running_mask &= ~thread;
1061	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1062	wake_up_all(&card->wait_q);
1063}
1064
1065static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
1066{
1067	unsigned long flags;
1068	int rc = 0;
1069
1070	spin_lock_irqsave(&card->thread_mask_lock, flags);
1071	if (card->thread_start_mask & thread) {
1072		if ((card->thread_allowed_mask & thread) &&
1073		    !(card->thread_running_mask & thread)) {
1074			rc = 1;
1075			card->thread_start_mask &= ~thread;
1076			card->thread_running_mask |= thread;
1077		} else
1078			rc = -EPERM;
1079	}
1080	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1081	return rc;
1082}
1083
1084static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
1085{
1086	int rc = 0;
1087
1088	wait_event(card->wait_q,
1089		   (rc = __qeth_do_run_thread(card, thread)) >= 0);
1090	return rc;
1091}
1092
1093int qeth_schedule_recovery(struct qeth_card *card)
1094{
1095	int rc;
1096
1097	QETH_CARD_TEXT(card, 2, "startrec");
1098
1099	rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD);
1100	if (!rc)
1101		schedule_work(&card->kernel_thread_starter);
1102
1103	return rc;
1104}
1105
1106static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
1107			    struct irb *irb)
1108{
1109	int dstat, cstat;
1110	char *sense;
1111
1112	sense = (char *) irb->ecw;
1113	cstat = irb->scsw.cmd.cstat;
1114	dstat = irb->scsw.cmd.dstat;
1115
1116	if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
1117		     SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
1118		     SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
1119		QETH_CARD_TEXT(card, 2, "CGENCHK");
1120		dev_warn(&cdev->dev, "The qeth device driver "
1121			"failed to recover an error on the device\n");
1122		QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
1123				 CCW_DEVID(cdev), dstat, cstat);
1124		print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
1125				16, 1, irb, 64, 1);
1126		return -EIO;
1127	}
1128
1129	if (dstat & DEV_STAT_UNIT_CHECK) {
1130		if (sense[SENSE_RESETTING_EVENT_BYTE] &
1131		    SENSE_RESETTING_EVENT_FLAG) {
1132			QETH_CARD_TEXT(card, 2, "REVIND");
1133			return -EIO;
1134		}
1135		if (sense[SENSE_COMMAND_REJECT_BYTE] &
1136		    SENSE_COMMAND_REJECT_FLAG) {
1137			QETH_CARD_TEXT(card, 2, "CMDREJi");
1138			return -EIO;
1139		}
1140		if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
1141			QETH_CARD_TEXT(card, 2, "AFFE");
1142			return -EIO;
1143		}
1144		if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
1145			QETH_CARD_TEXT(card, 2, "ZEROSEN");
1146			return 0;
1147		}
1148		QETH_CARD_TEXT(card, 2, "DGENCHK");
1149		return -EIO;
1150	}
1151	return 0;
1152}
1153
1154static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
1155				struct irb *irb)
1156{
1157	if (!IS_ERR(irb))
1158		return 0;
1159
1160	switch (PTR_ERR(irb)) {
1161	case -EIO:
1162		QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
1163				 CCW_DEVID(cdev));
1164		QETH_CARD_TEXT(card, 2, "ckirberr");
1165		QETH_CARD_TEXT_(card, 2, "  rc%d", -EIO);
1166		return -EIO;
1167	case -ETIMEDOUT:
1168		dev_warn(&cdev->dev, "A hardware operation timed out"
1169			" on the device\n");
1170		QETH_CARD_TEXT(card, 2, "ckirberr");
1171		QETH_CARD_TEXT_(card, 2, "  rc%d", -ETIMEDOUT);
1172		return -ETIMEDOUT;
1173	default:
1174		QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
1175				 PTR_ERR(irb), CCW_DEVID(cdev));
1176		QETH_CARD_TEXT(card, 2, "ckirberr");
1177		QETH_CARD_TEXT(card, 2, "  rc???");
1178		return PTR_ERR(irb);
1179	}
1180}
1181
1182static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1183		struct irb *irb)
1184{
1185	int rc;
1186	int cstat, dstat;
1187	struct qeth_cmd_buffer *iob = NULL;
1188	struct ccwgroup_device *gdev;
1189	struct qeth_channel *channel;
1190	struct qeth_card *card;
1191
1192	/* while we hold the ccwdev lock, this stays valid: */
1193	gdev = dev_get_drvdata(&cdev->dev);
1194	card = dev_get_drvdata(&gdev->dev);
1195
1196	QETH_CARD_TEXT(card, 5, "irq");
1197
1198	if (card->read.ccwdev == cdev) {
1199		channel = &card->read;
1200		QETH_CARD_TEXT(card, 5, "read");
1201	} else if (card->write.ccwdev == cdev) {
1202		channel = &card->write;
1203		QETH_CARD_TEXT(card, 5, "write");
1204	} else {
1205		channel = &card->data;
1206		QETH_CARD_TEXT(card, 5, "data");
1207	}
1208
1209	if (intparm == 0) {
1210		QETH_CARD_TEXT(card, 5, "irqunsol");
1211	} else if ((addr_t)intparm != (addr_t)channel->active_cmd) {
1212		QETH_CARD_TEXT(card, 5, "irqunexp");
1213
1214		dev_err(&cdev->dev,
1215			"Received IRQ with intparm %lx, expected %px\n",
1216			intparm, channel->active_cmd);
1217		if (channel->active_cmd)
1218			qeth_cancel_cmd(channel->active_cmd, -EIO);
1219	} else {
1220		iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
1221	}
1222
 
1223	qeth_unlock_channel(card, channel);
1224
1225	rc = qeth_check_irb_error(card, cdev, irb);
1226	if (rc) {
1227		/* IO was terminated, free its resources. */
1228		if (iob)
1229			qeth_cancel_cmd(iob, rc);
1230		return;
1231	}
1232
1233	if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) {
1234		channel->state = CH_STATE_STOPPED;
1235		wake_up(&card->wait_q);
1236	}
1237
1238	if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) {
1239		channel->state = CH_STATE_HALTED;
1240		wake_up(&card->wait_q);
1241	}
1242
1243	if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
1244					  SCSW_FCTL_HALT_FUNC))) {
1245		qeth_cancel_cmd(iob, -ECANCELED);
1246		iob = NULL;
1247	}
1248
1249	cstat = irb->scsw.cmd.cstat;
1250	dstat = irb->scsw.cmd.dstat;
1251
1252	if ((dstat & DEV_STAT_UNIT_EXCEP) ||
1253	    (dstat & DEV_STAT_UNIT_CHECK) ||
1254	    (cstat)) {
1255		if (irb->esw.esw0.erw.cons) {
1256			dev_warn(&channel->ccwdev->dev,
1257				"The qeth device driver failed to recover "
1258				"an error on the device\n");
1259			QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
1260					 CCW_DEVID(channel->ccwdev), cstat,
1261					 dstat);
1262			print_hex_dump(KERN_WARNING, "qeth: irb ",
1263				DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
1264			print_hex_dump(KERN_WARNING, "qeth: sense data ",
1265				DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
1266		}
1267
1268		rc = qeth_get_problem(card, cdev, irb);
1269		if (rc) {
1270			card->read_or_write_problem = 1;
1271			if (iob)
1272				qeth_cancel_cmd(iob, rc);
1273			qeth_clear_ipacmd_list(card);
1274			qeth_schedule_recovery(card);
1275			return;
1276		}
1277	}
1278
1279	if (iob) {
1280		/* sanity check: */
1281		if (irb->scsw.cmd.count > iob->length) {
1282			qeth_cancel_cmd(iob, -EIO);
1283			return;
1284		}
1285		if (iob->callback)
1286			iob->callback(card, iob,
1287				      iob->length - irb->scsw.cmd.count);
1288	}
1289}
1290
1291static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
1292		struct qeth_qdio_out_buffer *buf,
1293		enum iucv_tx_notify notification)
1294{
1295	struct sk_buff *skb;
1296
1297	skb_queue_walk(&buf->skb_list, skb) {
1298		struct sock *sk = skb->sk;
1299
1300		QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
1301		QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1302		if (sk && sk->sk_family == PF_IUCV)
1303			iucv_sk(sk)->sk_txnotify(sk, notification);
1304	}
1305}
1306
1307static void qeth_tx_complete_buf(struct qeth_qdio_out_q *queue,
1308				 struct qeth_qdio_out_buffer *buf, bool error,
1309				 int budget)
1310{
 
1311	struct sk_buff *skb;
1312
 
 
 
 
 
 
1313	/* Empty buffer? */
1314	if (buf->next_element_to_fill == 0)
1315		return;
1316
1317	QETH_TXQ_STAT_INC(queue, bufs);
1318	QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
1319	if (error) {
1320		QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames);
1321	} else {
1322		QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames);
1323		QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes);
1324	}
1325
1326	while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
1327		unsigned int bytes = qdisc_pkt_len(skb);
1328		bool is_tso = skb_is_gso(skb);
1329		unsigned int packets;
1330
1331		packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
1332		if (!error) {
1333			if (skb->ip_summed == CHECKSUM_PARTIAL)
1334				QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
1335			if (skb_is_nonlinear(skb))
1336				QETH_TXQ_STAT_INC(queue, skbs_sg);
1337			if (is_tso) {
1338				QETH_TXQ_STAT_INC(queue, skbs_tso);
1339				QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes);
1340			}
1341		}
1342
1343		napi_consume_skb(skb, budget);
1344	}
1345}
1346
1347static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1348				     struct qeth_qdio_out_buffer *buf,
1349				     bool error, int budget)
1350{
1351	int i;
1352
1353	/* is PCI flag set on buffer? */
1354	if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) {
1355		atomic_dec(&queue->set_pci_flags_count);
1356		QETH_TXQ_STAT_INC(queue, completion_irq);
1357	}
1358
1359	qeth_tx_complete_buf(queue, buf, error, budget);
1360
1361	for (i = 0; i < queue->max_elements; ++i) {
1362		void *data = phys_to_virt(buf->buffer->element[i].addr);
1363
1364		if (__test_and_clear_bit(i, buf->from_kmem_cache) && data)
1365			kmem_cache_free(qeth_core_header_cache, data);
 
1366	}
1367
1368	qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
1369	buf->next_element_to_fill = 0;
1370	buf->frames = 0;
1371	buf->bytes = 0;
1372	atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
1373}
1374
1375static void qeth_free_out_buf(struct qeth_qdio_out_buffer *buf)
1376{
1377	if (buf->aob)
1378		kmem_cache_free(qeth_qaob_cache, buf->aob);
1379	kmem_cache_free(qeth_qdio_outbuf_cache, buf);
1380}
1381
1382static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
1383					  struct qeth_qdio_out_q *queue,
1384					  bool drain, int budget)
1385{
1386	struct qeth_qdio_out_buffer *buf, *tmp;
1387
1388	list_for_each_entry_safe(buf, tmp, &queue->pending_bufs, list_entry) {
1389		struct qeth_qaob_priv1 *priv;
1390		struct qaob *aob = buf->aob;
1391		enum iucv_tx_notify notify;
1392		unsigned int i;
1393
1394		priv = (struct qeth_qaob_priv1 *)&aob->user1;
1395		if (drain || READ_ONCE(priv->state) == QETH_QAOB_DONE) {
1396			QETH_CARD_TEXT(card, 5, "fp");
1397			QETH_CARD_TEXT_(card, 5, "%lx", (long) buf);
1398
1399			notify = drain ? TX_NOTIFY_GENERALERROR :
1400					 qeth_compute_cq_notification(aob->aorc, 1);
1401			qeth_notify_skbs(queue, buf, notify);
1402			qeth_tx_complete_buf(queue, buf, drain, budget);
1403
1404			for (i = 0;
1405			     i < aob->sb_count && i < queue->max_elements;
1406			     i++) {
1407				void *data = phys_to_virt(aob->sba[i]);
1408
1409				if (test_bit(i, buf->from_kmem_cache) && data)
1410					kmem_cache_free(qeth_core_header_cache,
1411							data);
1412			}
1413
1414			list_del(&buf->list_entry);
1415			qeth_free_out_buf(buf);
1416		}
1417	}
1418}
1419
1420static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
1421{
1422	int j;
1423
1424	qeth_tx_complete_pending_bufs(q->card, q, true, 0);
1425
1426	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
1427		if (!q->bufs[j])
1428			continue;
1429
1430		qeth_clear_output_buffer(q, q->bufs[j], true, 0);
1431		if (free) {
1432			qeth_free_out_buf(q->bufs[j]);
1433			q->bufs[j] = NULL;
1434		}
1435	}
1436}
1437
1438static void qeth_drain_output_queues(struct qeth_card *card)
1439{
1440	int i;
1441
1442	QETH_CARD_TEXT(card, 2, "clearqdbf");
1443	/* clear outbound buffers to free skbs */
1444	for (i = 0; i < card->qdio.no_out_queues; ++i) {
1445		if (card->qdio.out_qs[i])
1446			qeth_drain_output_queue(card->qdio.out_qs[i], false);
1447	}
1448}
 
1449
1450static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
1451{
1452	unsigned int max = single ? 1 : card->dev->num_tx_queues;
 
 
 
 
 
 
 
 
 
 
 
1453
1454	if (card->qdio.no_out_queues == max)
1455		return;
1456
1457	if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
1458		qeth_free_qdio_queues(card);
1459
1460	if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT)
1461		dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
1462
1463	card->qdio.no_out_queues = max;
 
1464}
1465
1466static int qeth_update_from_chp_desc(struct qeth_card *card)
1467{
1468	struct ccw_device *ccwdev;
1469	struct channel_path_desc_fmt0 *chp_dsc;
 
1470
1471	QETH_CARD_TEXT(card, 2, "chp_desc");
1472
1473	ccwdev = card->data.ccwdev;
1474	chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
1475	if (!chp_dsc)
1476		return -ENOMEM;
1477
1478	card->info.func_level = 0x4100 + chp_dsc->desc;
1479
1480	if (IS_OSD(card) || IS_OSX(card))
1481		/* CHPP field bit 6 == 1 -> single queue */
1482		qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
1483
1484	kfree(chp_dsc);
1485	QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
1486	QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
1487	return 0;
1488}
1489
1490static void qeth_init_qdio_info(struct qeth_card *card)
1491{
1492	QETH_CARD_TEXT(card, 4, "intqdinf");
1493	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1494	card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1495	card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1496
1497	/* inbound */
 
1498	card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1499	if (IS_IQD(card))
1500		card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
1501	else
1502		card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
1503	card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
1504	INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
1505	INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
1506}
1507
1508static void qeth_set_initial_options(struct qeth_card *card)
1509{
1510	card->options.route4.type = NO_ROUTER;
1511	card->options.route6.type = NO_ROUTER;
1512	card->options.isolation = ISOLATION_MODE_NONE;
1513	card->options.cq = QETH_CQ_DISABLED;
1514	card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
1515}
1516
1517static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1518{
1519	unsigned long flags;
1520	int rc = 0;
1521
1522	spin_lock_irqsave(&card->thread_mask_lock, flags);
1523	QETH_CARD_TEXT_(card, 4, "  %02x%02x%02x",
1524			(u8) card->thread_start_mask,
1525			(u8) card->thread_allowed_mask,
1526			(u8) card->thread_running_mask);
1527	rc = (card->thread_start_mask & thread);
1528	spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1529	return rc;
1530}
1531
1532static int qeth_do_reset(void *data);
1533static void qeth_start_kernel_thread(struct work_struct *work)
1534{
1535	struct task_struct *ts;
1536	struct qeth_card *card = container_of(work, struct qeth_card,
1537					kernel_thread_starter);
1538	QETH_CARD_TEXT(card, 2, "strthrd");
1539
1540	if (card->read.state != CH_STATE_UP &&
1541	    card->write.state != CH_STATE_UP)
1542		return;
1543	if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
1544		ts = kthread_run(qeth_do_reset, card, "qeth_recover");
1545		if (IS_ERR(ts)) {
1546			qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1547			qeth_clear_thread_running_bit(card,
1548				QETH_RECOVER_THREAD);
1549		}
1550	}
1551}
1552
1553static void qeth_buffer_reclaim_work(struct work_struct *);
1554static void qeth_setup_card(struct qeth_card *card)
1555{
1556	QETH_CARD_TEXT(card, 2, "setupcrd");
1557
1558	card->info.type = CARD_RDEV(card)->id.driver_info;
1559	card->state = CARD_STATE_DOWN;
1560	spin_lock_init(&card->lock);
1561	spin_lock_init(&card->thread_mask_lock);
1562	mutex_init(&card->conf_mutex);
1563	mutex_init(&card->discipline_mutex);
1564	INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1565	INIT_LIST_HEAD(&card->cmd_waiter_list);
1566	init_waitqueue_head(&card->wait_q);
1567	qeth_set_initial_options(card);
1568	/* IP address takeover */
1569	INIT_LIST_HEAD(&card->ipato.entries);
1570	qeth_init_qdio_info(card);
1571	INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
 
1572	hash_init(card->rx_mode_addrs);
1573	hash_init(card->local_addrs4);
1574	hash_init(card->local_addrs6);
1575	spin_lock_init(&card->local_addrs4_lock);
1576	spin_lock_init(&card->local_addrs6_lock);
1577}
1578
1579static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
1580{
1581	struct qeth_card *card = container_of(slr, struct qeth_card,
1582					qeth_service_level);
1583	if (card->info.mcl_level[0])
1584		seq_printf(m, "qeth: %s firmware level %s\n",
1585			CARD_BUS_ID(card), card->info.mcl_level);
1586}
1587
1588static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
1589{
1590	struct qeth_card *card;
1591
1592	QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1593	card = kzalloc(sizeof(*card), GFP_KERNEL);
1594	if (!card)
1595		goto out;
1596	QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1597
1598	card->gdev = gdev;
1599	dev_set_drvdata(&gdev->dev, card);
1600	CARD_RDEV(card) = gdev->cdev[0];
1601	CARD_WDEV(card) = gdev->cdev[1];
1602	CARD_DDEV(card) = gdev->cdev[2];
1603
1604	card->event_wq = alloc_ordered_workqueue("%s_event", 0,
1605						 dev_name(&gdev->dev));
1606	if (!card->event_wq)
1607		goto out_wq;
1608
1609	card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0);
1610	if (!card->read_cmd)
1611		goto out_read_cmd;
1612
1613	card->debugfs = debugfs_create_dir(dev_name(&gdev->dev),
1614					   qeth_debugfs_root);
1615	debugfs_create_file("local_addrs", 0400, card->debugfs, card,
1616			    &qeth_debugfs_local_addr_fops);
1617
1618	card->qeth_service_level.seq_print = qeth_core_sl_print;
1619	register_service_level(&card->qeth_service_level);
1620	return card;
1621
1622out_read_cmd:
1623	destroy_workqueue(card->event_wq);
1624out_wq:
1625	dev_set_drvdata(&gdev->dev, NULL);
1626	kfree(card);
1627out:
1628	return NULL;
1629}
1630
1631static int qeth_clear_channel(struct qeth_card *card,
1632			      struct qeth_channel *channel)
1633{
1634	int rc;
1635
1636	QETH_CARD_TEXT(card, 3, "clearch");
1637	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1638	rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd);
1639	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1640
1641	if (rc)
1642		return rc;
1643	rc = wait_event_interruptible_timeout(card->wait_q,
1644			channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
1645	if (rc == -ERESTARTSYS)
1646		return rc;
1647	if (channel->state != CH_STATE_STOPPED)
1648		return -ETIME;
1649	channel->state = CH_STATE_DOWN;
1650	return 0;
1651}
1652
1653static int qeth_halt_channel(struct qeth_card *card,
1654			     struct qeth_channel *channel)
1655{
1656	int rc;
1657
1658	QETH_CARD_TEXT(card, 3, "haltch");
1659	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1660	rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd);
1661	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1662
1663	if (rc)
1664		return rc;
1665	rc = wait_event_interruptible_timeout(card->wait_q,
1666			channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
1667	if (rc == -ERESTARTSYS)
1668		return rc;
1669	if (channel->state != CH_STATE_HALTED)
1670		return -ETIME;
1671	return 0;
1672}
1673
1674static int qeth_stop_channel(struct qeth_channel *channel)
1675{
1676	struct ccw_device *cdev = channel->ccwdev;
1677	int rc;
1678
1679	rc = ccw_device_set_offline(cdev);
1680
1681	spin_lock_irq(get_ccwdev_lock(cdev));
1682	if (channel->active_cmd)
1683		dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
1684			channel->active_cmd);
1685
 
1686	cdev->handler = NULL;
1687	spin_unlock_irq(get_ccwdev_lock(cdev));
1688
1689	return rc;
1690}
 
1691
1692static int qeth_start_channel(struct qeth_channel *channel)
1693{
1694	struct ccw_device *cdev = channel->ccwdev;
1695	int rc;
1696
1697	channel->state = CH_STATE_DOWN;
1698	xchg(&channel->active_cmd, NULL);
1699
1700	spin_lock_irq(get_ccwdev_lock(cdev));
1701	cdev->handler = qeth_irq;
1702	spin_unlock_irq(get_ccwdev_lock(cdev));
1703
1704	rc = ccw_device_set_online(cdev);
1705	if (rc)
1706		goto err;
1707
1708	return 0;
1709
1710err:
1711	spin_lock_irq(get_ccwdev_lock(cdev));
1712	cdev->handler = NULL;
1713	spin_unlock_irq(get_ccwdev_lock(cdev));
1714	return rc;
1715}
1716
1717static int qeth_halt_channels(struct qeth_card *card)
1718{
1719	int rc1 = 0, rc2 = 0, rc3 = 0;
1720
1721	QETH_CARD_TEXT(card, 3, "haltchs");
1722	rc1 = qeth_halt_channel(card, &card->read);
1723	rc2 = qeth_halt_channel(card, &card->write);
1724	rc3 = qeth_halt_channel(card, &card->data);
1725	if (rc1)
1726		return rc1;
1727	if (rc2)
1728		return rc2;
1729	return rc3;
1730}
1731
1732static int qeth_clear_channels(struct qeth_card *card)
1733{
1734	int rc1 = 0, rc2 = 0, rc3 = 0;
1735
1736	QETH_CARD_TEXT(card, 3, "clearchs");
1737	rc1 = qeth_clear_channel(card, &card->read);
1738	rc2 = qeth_clear_channel(card, &card->write);
1739	rc3 = qeth_clear_channel(card, &card->data);
1740	if (rc1)
1741		return rc1;
1742	if (rc2)
1743		return rc2;
1744	return rc3;
1745}
1746
1747static int qeth_clear_halt_card(struct qeth_card *card, int halt)
1748{
1749	int rc = 0;
1750
1751	QETH_CARD_TEXT(card, 3, "clhacrd");
1752
1753	if (halt)
1754		rc = qeth_halt_channels(card);
1755	if (rc)
1756		return rc;
1757	return qeth_clear_channels(card);
1758}
1759
1760static int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
1761{
1762	int rc = 0;
1763
1764	QETH_CARD_TEXT(card, 3, "qdioclr");
1765	switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
1766		QETH_QDIO_CLEANING)) {
1767	case QETH_QDIO_ESTABLISHED:
1768		if (IS_IQD(card))
1769			rc = qdio_shutdown(CARD_DDEV(card),
1770				QDIO_FLAG_CLEANUP_USING_HALT);
1771		else
1772			rc = qdio_shutdown(CARD_DDEV(card),
1773				QDIO_FLAG_CLEANUP_USING_CLEAR);
1774		if (rc)
1775			QETH_CARD_TEXT_(card, 3, "1err%d", rc);
1776		atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
1777		break;
1778	case QETH_QDIO_CLEANING:
1779		return rc;
1780	default:
1781		break;
1782	}
1783	rc = qeth_clear_halt_card(card, use_halt);
1784	if (rc)
1785		QETH_CARD_TEXT_(card, 3, "2err%d", rc);
1786	return rc;
1787}
 
1788
1789static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
1790{
1791	enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1792	struct diag26c_vnic_resp *response = NULL;
1793	struct diag26c_vnic_req *request = NULL;
1794	struct ccw_dev_id id;
1795	char userid[80];
1796	int rc = 0;
1797
1798	QETH_CARD_TEXT(card, 2, "vmlayer");
1799
1800	cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
1801	if (rc)
1802		goto out;
1803
1804	request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
1805	response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
1806	if (!request || !response) {
1807		rc = -ENOMEM;
1808		goto out;
1809	}
1810
1811	ccw_device_get_id(CARD_RDEV(card), &id);
1812	request->resp_buf_len = sizeof(*response);
1813	request->resp_version = DIAG26C_VERSION6_VM65918;
1814	request->req_format = DIAG26C_VNIC_INFO;
1815	ASCEBC(userid, 8);
1816	memcpy(&request->sys_name, userid, 8);
1817	request->devno = id.devno;
1818
1819	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1820	rc = diag26c(request, response, DIAG26C_PORT_VNIC);
1821	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1822	if (rc)
1823		goto out;
1824	QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
1825
1826	if (request->resp_buf_len < sizeof(*response) ||
1827	    response->version != request->resp_version) {
1828		rc = -EIO;
1829		goto out;
1830	}
1831
1832	if (response->protocol == VNIC_INFO_PROT_L2)
1833		disc = QETH_DISCIPLINE_LAYER2;
1834	else if (response->protocol == VNIC_INFO_PROT_L3)
1835		disc = QETH_DISCIPLINE_LAYER3;
1836
1837out:
1838	kfree(response);
1839	kfree(request);
1840	if (rc)
1841		QETH_CARD_TEXT_(card, 2, "err%x", rc);
1842	return disc;
1843}
1844
1845/* Determine whether the device requires a specific layer discipline */
1846static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
1847{
1848	enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1849
1850	if (IS_OSM(card))
1851		disc = QETH_DISCIPLINE_LAYER2;
1852	else if (IS_VM_NIC(card))
1853		disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
1854				      qeth_vm_detect_layer(card);
1855
1856	switch (disc) {
1857	case QETH_DISCIPLINE_LAYER2:
1858		QETH_CARD_TEXT(card, 3, "force l2");
1859		break;
1860	case QETH_DISCIPLINE_LAYER3:
1861		QETH_CARD_TEXT(card, 3, "force l3");
1862		break;
1863	default:
1864		QETH_CARD_TEXT(card, 3, "force no");
1865	}
1866
1867	return disc;
1868}
1869
1870static void qeth_set_blkt_defaults(struct qeth_card *card)
1871{
1872	QETH_CARD_TEXT(card, 2, "cfgblkt");
1873
1874	if (card->info.use_v1_blkt) {
1875		card->info.blkt.time_total = 0;
1876		card->info.blkt.inter_packet = 0;
1877		card->info.blkt.inter_packet_jumbo = 0;
1878	} else {
1879		card->info.blkt.time_total = 250;
1880		card->info.blkt.inter_packet = 5;
1881		card->info.blkt.inter_packet_jumbo = 15;
1882	}
1883}
1884
1885static void qeth_idx_init(struct qeth_card *card)
1886{
1887	memset(&card->seqno, 0, sizeof(card->seqno));
1888
1889	card->token.issuer_rm_w = 0x00010103UL;
1890	card->token.cm_filter_w = 0x00010108UL;
1891	card->token.cm_connection_w = 0x0001010aUL;
1892	card->token.ulp_filter_w = 0x0001010bUL;
1893	card->token.ulp_connection_w = 0x0001010dUL;
1894
1895	switch (card->info.type) {
1896	case QETH_CARD_TYPE_IQD:
1897		card->info.func_level =	QETH_IDX_FUNC_LEVEL_IQD;
1898		break;
1899	case QETH_CARD_TYPE_OSD:
 
1900		card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
1901		break;
1902	default:
1903		break;
1904	}
1905}
1906
1907static void qeth_idx_finalize_cmd(struct qeth_card *card,
1908				  struct qeth_cmd_buffer *iob)
1909{
1910	memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
1911	       QETH_SEQ_NO_LENGTH);
1912	if (iob->channel == &card->write)
1913		card->seqno.trans_hdr++;
1914}
1915
1916static int qeth_peer_func_level(int level)
1917{
1918	if ((level & 0xff) == 8)
1919		return (level & 0xff) + 0x400;
1920	if (((level >> 8) & 3) == 1)
1921		return (level & 0xff) + 0x200;
1922	return level;
1923}
1924
1925static void qeth_mpc_finalize_cmd(struct qeth_card *card,
1926				  struct qeth_cmd_buffer *iob)
1927{
1928	qeth_idx_finalize_cmd(card, iob);
1929
1930	memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
1931	       &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
1932	card->seqno.pdu_hdr++;
1933	memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
1934	       &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1935
1936	iob->callback = qeth_release_buffer_cb;
1937}
1938
1939static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob,
1940				 struct qeth_cmd_buffer *reply)
1941{
1942	/* MPC cmds are issued strictly in sequence. */
1943	return !IS_IPA(reply->data);
1944}
1945
1946static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
1947						  const void *data,
1948						  unsigned int data_length)
1949{
1950	struct qeth_cmd_buffer *iob;
1951
1952	iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT);
1953	if (!iob)
1954		return NULL;
1955
1956	memcpy(iob->data, data, data_length);
1957	qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
1958		       iob->data);
1959	iob->finalize = qeth_mpc_finalize_cmd;
1960	iob->match = qeth_mpc_match_reply;
1961	return iob;
1962}
1963
1964/**
1965 * qeth_send_control_data() -	send control command to the card
1966 * @card:			qeth_card structure pointer
1967 * @iob:			qeth_cmd_buffer pointer
1968 * @reply_cb:			callback function pointer
1969 *  cb_card:			pointer to the qeth_card structure
1970 *  cb_reply:			pointer to the qeth_reply structure
1971 *  cb_cmd:			pointer to the original iob for non-IPA
1972 *				commands, or to the qeth_ipa_cmd structure
1973 *				for the IPA commands.
1974 * @reply_param:		private pointer passed to the callback
1975 *
1976 * Callback function gets called one or more times, with cb_cmd
1977 * pointing to the response returned by the hardware. Callback
1978 * function must return
1979 *   > 0 if more reply blocks are expected,
1980 *     0 if the last or only reply block is received, and
1981 *   < 0 on error.
1982 * Callback function can get the value of the reply_param pointer from the
1983 * field 'param' of the structure qeth_reply.
1984 */
1985
1986static int qeth_send_control_data(struct qeth_card *card,
1987				  struct qeth_cmd_buffer *iob,
1988				  int (*reply_cb)(struct qeth_card *cb_card,
1989						  struct qeth_reply *cb_reply,
1990						  unsigned long cb_cmd),
1991				  void *reply_param)
1992{
1993	struct qeth_channel *channel = iob->channel;
1994	struct qeth_reply *reply = &iob->reply;
1995	long timeout = iob->timeout;
1996	int rc;
1997
1998	QETH_CARD_TEXT(card, 2, "sendctl");
1999
2000	reply->callback = reply_cb;
2001	reply->param = reply_param;
2002
2003	timeout = wait_event_interruptible_timeout(card->wait_q,
2004						   qeth_trylock_channel(channel, iob),
2005						   timeout);
2006	if (timeout <= 0) {
2007		qeth_put_cmd(iob);
2008		return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
2009	}
2010
2011	if (iob->finalize)
2012		iob->finalize(card, iob);
2013	QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN));
2014
2015	qeth_enqueue_cmd(card, iob);
2016
2017	/* This pairs with iob->callback, and keeps the iob alive after IO: */
2018	qeth_get_cmd(iob);
2019
2020	QETH_CARD_TEXT(card, 6, "noirqpnd");
2021	spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
2022	rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
2023				      (addr_t) iob, 0, 0, timeout);
 
 
2024	spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
2025	if (rc) {
2026		QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
2027				 CARD_DEVID(card), rc);
2028		QETH_CARD_TEXT_(card, 2, " err%d", rc);
2029		qeth_dequeue_cmd(card, iob);
2030		qeth_put_cmd(iob);
2031		qeth_unlock_channel(card, channel);
2032		goto out;
2033	}
2034
2035	timeout = wait_for_completion_interruptible_timeout(&iob->done,
2036							    timeout);
2037	if (timeout <= 0)
2038		rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
2039
2040	qeth_dequeue_cmd(card, iob);
2041
2042	if (reply_cb) {
2043		/* Wait until the callback for a late reply has completed: */
2044		spin_lock_irq(&iob->lock);
2045		if (rc)
2046			/* Zap any callback that's still pending: */
2047			iob->rc = rc;
2048		spin_unlock_irq(&iob->lock);
2049	}
2050
2051	if (!rc)
2052		rc = iob->rc;
2053
2054out:
2055	qeth_put_cmd(iob);
2056	return rc;
2057}
2058
2059struct qeth_node_desc {
2060	struct node_descriptor nd1;
2061	struct node_descriptor nd2;
2062	struct node_descriptor nd3;
2063};
2064
2065static void qeth_read_conf_data_cb(struct qeth_card *card,
2066				   struct qeth_cmd_buffer *iob,
2067				   unsigned int data_length)
2068{
2069	struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data;
2070	int rc = 0;
2071	u8 *tag;
2072
2073	QETH_CARD_TEXT(card, 2, "cfgunit");
2074
2075	if (data_length < sizeof(*nd)) {
2076		rc = -EINVAL;
2077		goto out;
2078	}
2079
2080	card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] &&
2081			       nd->nd1.plant[1] == _ascebc['M'];
2082	tag = (u8 *)&nd->nd1.tag;
2083	card->info.chpid = tag[0];
2084	card->info.unit_addr2 = tag[1];
2085
2086	tag = (u8 *)&nd->nd2.tag;
2087	card->info.cula = tag[1];
2088
2089	card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 &&
2090				 nd->nd3.model[1] == 0xF0 &&
2091				 nd->nd3.model[2] >= 0xF1 &&
2092				 nd->nd3.model[2] <= 0xF4;
2093
2094out:
2095	qeth_notify_cmd(iob, rc);
2096	qeth_put_cmd(iob);
2097}
2098
2099static int qeth_read_conf_data(struct qeth_card *card)
2100{
2101	struct qeth_channel *channel = &card->data;
2102	struct qeth_cmd_buffer *iob;
2103	struct ciw *ciw;
2104
2105	/* scan for RCD command in extended SenseID data */
2106	ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
2107	if (!ciw || ciw->cmd == 0)
2108		return -EOPNOTSUPP;
2109	if (ciw->count < sizeof(struct qeth_node_desc))
2110		return -EINVAL;
2111
2112	iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT);
2113	if (!iob)
2114		return -ENOMEM;
2115
2116	iob->callback = qeth_read_conf_data_cb;
2117	qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length,
2118		       iob->data);
2119
2120	return qeth_send_control_data(card, iob, NULL, NULL);
2121}
2122
2123static int qeth_idx_check_activate_response(struct qeth_card *card,
2124					    struct qeth_channel *channel,
2125					    struct qeth_cmd_buffer *iob)
2126{
2127	int rc;
2128
2129	rc = qeth_check_idx_response(card, iob->data);
2130	if (rc)
2131		return rc;
2132
2133	if (QETH_IS_IDX_ACT_POS_REPLY(iob->data))
2134		return 0;
2135
2136	/* negative reply: */
2137	QETH_CARD_TEXT_(card, 2, "idxneg%c",
2138			QETH_IDX_ACT_CAUSE_CODE(iob->data));
2139
2140	switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
2141	case QETH_IDX_ACT_ERR_EXCL:
2142		dev_err(&channel->ccwdev->dev,
2143			"The adapter is used exclusively by another host\n");
2144		return -EBUSY;
2145	case QETH_IDX_ACT_ERR_AUTH:
2146	case QETH_IDX_ACT_ERR_AUTH_USER:
2147		dev_err(&channel->ccwdev->dev,
2148			"Setting the device online failed because of insufficient authorization\n");
2149		return -EPERM;
2150	default:
2151		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
2152				 CCW_DEVID(channel->ccwdev));
2153		return -EIO;
2154	}
2155}
2156
2157static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
2158					      struct qeth_cmd_buffer *iob,
2159					      unsigned int data_length)
2160{
2161	struct qeth_channel *channel = iob->channel;
2162	u16 peer_level;
2163	int rc;
2164
2165	QETH_CARD_TEXT(card, 2, "idxrdcb");
2166
2167	rc = qeth_idx_check_activate_response(card, channel, iob);
2168	if (rc)
2169		goto out;
2170
2171	memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
2172	if (peer_level != qeth_peer_func_level(card->info.func_level)) {
2173		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2174				 CCW_DEVID(channel->ccwdev),
2175				 card->info.func_level, peer_level);
2176		rc = -EINVAL;
2177		goto out;
2178	}
2179
2180	memcpy(&card->token.issuer_rm_r,
2181	       QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
2182	       QETH_MPC_TOKEN_LENGTH);
2183	memcpy(&card->info.mcl_level[0],
2184	       QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
2185
2186out:
2187	qeth_notify_cmd(iob, rc);
2188	qeth_put_cmd(iob);
2189}
2190
2191static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
2192					       struct qeth_cmd_buffer *iob,
2193					       unsigned int data_length)
2194{
2195	struct qeth_channel *channel = iob->channel;
2196	u16 peer_level;
2197	int rc;
2198
2199	QETH_CARD_TEXT(card, 2, "idxwrcb");
2200
2201	rc = qeth_idx_check_activate_response(card, channel, iob);
2202	if (rc)
2203		goto out;
2204
2205	memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
2206	if ((peer_level & ~0x0100) !=
2207	    qeth_peer_func_level(card->info.func_level)) {
2208		QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
2209				 CCW_DEVID(channel->ccwdev),
2210				 card->info.func_level, peer_level);
2211		rc = -EINVAL;
2212	}
2213
2214out:
2215	qeth_notify_cmd(iob, rc);
2216	qeth_put_cmd(iob);
2217}
2218
2219static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
2220					struct qeth_cmd_buffer *iob)
2221{
2222	u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
2223	u8 port = ((u8)card->dev->dev_port) | 0x80;
2224	struct ccw1 *ccw = __ccw_from_cmd(iob);
 
2225
2226	qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
2227		       iob->data);
2228	qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
 
2229	iob->finalize = qeth_idx_finalize_cmd;
2230
2231	port |= QETH_IDX_ACT_INVAL_FRAME;
2232	memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
2233	memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
2234	       &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
2235	memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
2236	       &card->info.func_level, 2);
2237	memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2);
2238	memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
2239}
2240
2241static int qeth_idx_activate_read_channel(struct qeth_card *card)
2242{
2243	struct qeth_channel *channel = &card->read;
2244	struct qeth_cmd_buffer *iob;
2245	int rc;
2246
2247	QETH_CARD_TEXT(card, 2, "idxread");
2248
2249	iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2250	if (!iob)
2251		return -ENOMEM;
2252
2253	memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
2254	qeth_idx_setup_activate_cmd(card, iob);
2255	iob->callback = qeth_idx_activate_read_channel_cb;
2256
2257	rc = qeth_send_control_data(card, iob, NULL, NULL);
2258	if (rc)
2259		return rc;
2260
2261	channel->state = CH_STATE_UP;
2262	return 0;
2263}
2264
2265static int qeth_idx_activate_write_channel(struct qeth_card *card)
2266{
2267	struct qeth_channel *channel = &card->write;
2268	struct qeth_cmd_buffer *iob;
2269	int rc;
2270
2271	QETH_CARD_TEXT(card, 2, "idxwrite");
2272
2273	iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2274	if (!iob)
2275		return -ENOMEM;
2276
2277	memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
2278	qeth_idx_setup_activate_cmd(card, iob);
2279	iob->callback = qeth_idx_activate_write_channel_cb;
2280
2281	rc = qeth_send_control_data(card, iob, NULL, NULL);
2282	if (rc)
2283		return rc;
2284
2285	channel->state = CH_STATE_UP;
2286	return 0;
2287}
2288
2289static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2290		unsigned long data)
2291{
2292	struct qeth_cmd_buffer *iob;
2293
2294	QETH_CARD_TEXT(card, 2, "cmenblcb");
2295
2296	iob = (struct qeth_cmd_buffer *) data;
2297	memcpy(&card->token.cm_filter_r,
2298	       QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
2299	       QETH_MPC_TOKEN_LENGTH);
2300	return 0;
2301}
2302
2303static int qeth_cm_enable(struct qeth_card *card)
2304{
2305	struct qeth_cmd_buffer *iob;
2306
2307	QETH_CARD_TEXT(card, 2, "cmenable");
2308
2309	iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE);
2310	if (!iob)
2311		return -ENOMEM;
2312
2313	memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
2314	       &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2315	memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
2316	       &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
2317
2318	return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL);
2319}
2320
2321static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2322		unsigned long data)
2323{
2324	struct qeth_cmd_buffer *iob;
2325
2326	QETH_CARD_TEXT(card, 2, "cmsetpcb");
2327
2328	iob = (struct qeth_cmd_buffer *) data;
2329	memcpy(&card->token.cm_connection_r,
2330	       QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
2331	       QETH_MPC_TOKEN_LENGTH);
2332	return 0;
2333}
2334
2335static int qeth_cm_setup(struct qeth_card *card)
2336{
2337	struct qeth_cmd_buffer *iob;
2338
2339	QETH_CARD_TEXT(card, 2, "cmsetup");
2340
2341	iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE);
2342	if (!iob)
2343		return -ENOMEM;
2344
2345	memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
2346	       &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2347	memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
2348	       &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
2349	memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
2350	       &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2351	return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
2352}
2353
2354static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type)
2355{
2356	if (link_type == QETH_LINK_TYPE_LANE_TR ||
2357	    link_type == QETH_LINK_TYPE_HSTR) {
2358		dev_err(&card->gdev->dev, "Unsupported Token Ring device\n");
2359		return false;
2360	}
2361
2362	return true;
2363}
2364
2365static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
2366{
2367	struct net_device *dev = card->dev;
2368	unsigned int new_mtu;
2369
2370	if (!max_mtu) {
2371		/* IQD needs accurate max MTU to set up its RX buffers: */
2372		if (IS_IQD(card))
2373			return -EINVAL;
2374		/* tolerate quirky HW: */
2375		max_mtu = ETH_MAX_MTU;
2376	}
2377
2378	rtnl_lock();
2379	if (IS_IQD(card)) {
2380		/* move any device with default MTU to new max MTU: */
2381		new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;
2382
2383		/* adjust RX buffer size to new max MTU: */
2384		card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
2385		if (dev->max_mtu && dev->max_mtu != max_mtu)
2386			qeth_free_qdio_queues(card);
2387	} else {
2388		if (dev->mtu)
2389			new_mtu = dev->mtu;
2390		/* default MTUs for first setup: */
2391		else if (IS_LAYER2(card))
2392			new_mtu = ETH_DATA_LEN;
2393		else
2394			new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
2395	}
2396
2397	dev->max_mtu = max_mtu;
2398	dev->mtu = min(new_mtu, max_mtu);
2399	rtnl_unlock();
2400	return 0;
2401}
2402
2403static int qeth_get_mtu_outof_framesize(int framesize)
2404{
2405	switch (framesize) {
2406	case 0x4000:
2407		return 8192;
2408	case 0x6000:
2409		return 16384;
2410	case 0xa000:
2411		return 32768;
2412	case 0xffff:
2413		return 57344;
2414	default:
2415		return 0;
2416	}
2417}
2418
2419static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2420		unsigned long data)
2421{
2422	__u16 mtu, framesize;
2423	__u16 len;
2424	struct qeth_cmd_buffer *iob;
2425	u8 link_type = 0;
2426
2427	QETH_CARD_TEXT(card, 2, "ulpenacb");
2428
2429	iob = (struct qeth_cmd_buffer *) data;
2430	memcpy(&card->token.ulp_filter_r,
2431	       QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
2432	       QETH_MPC_TOKEN_LENGTH);
2433	if (IS_IQD(card)) {
2434		memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
2435		mtu = qeth_get_mtu_outof_framesize(framesize);
2436	} else {
2437		mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
2438	}
2439	*(u16 *)reply->param = mtu;
2440
2441	memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
2442	if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
2443		memcpy(&link_type,
2444		       QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2445		if (!qeth_is_supported_link_type(card, link_type))
2446			return -EPROTONOSUPPORT;
2447	}
2448
2449	card->info.link_type = link_type;
2450	QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
2451	return 0;
2452}
2453
2454static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
2455{
2456	return IS_LAYER2(card) ? QETH_MPC_PROT_L2 : QETH_MPC_PROT_L3;
 
 
2457}
2458
2459static int qeth_ulp_enable(struct qeth_card *card)
2460{
2461	u8 prot_type = qeth_mpc_select_prot_type(card);
2462	struct qeth_cmd_buffer *iob;
2463	u16 max_mtu;
2464	int rc;
2465
2466	QETH_CARD_TEXT(card, 2, "ulpenabl");
2467
2468	iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE);
2469	if (!iob)
2470		return -ENOMEM;
2471
2472	*(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
2473	memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
2474	memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
2475	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2476	memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
2477	       &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2478	rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu);
2479	if (rc)
2480		return rc;
2481	return qeth_update_max_mtu(card, max_mtu);
2482}
2483
2484static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2485		unsigned long data)
2486{
2487	struct qeth_cmd_buffer *iob;
2488
2489	QETH_CARD_TEXT(card, 2, "ulpstpcb");
2490
2491	iob = (struct qeth_cmd_buffer *) data;
2492	memcpy(&card->token.ulp_connection_r,
2493	       QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2494	       QETH_MPC_TOKEN_LENGTH);
2495	if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2496		     3)) {
2497		QETH_CARD_TEXT(card, 2, "olmlimit");
2498		dev_err(&card->gdev->dev, "A connection could not be "
2499			"established because of an OLM limit\n");
2500		return -EMLINK;
2501	}
2502	return 0;
2503}
2504
2505static int qeth_ulp_setup(struct qeth_card *card)
2506{
2507	__u16 temp;
2508	struct qeth_cmd_buffer *iob;
 
2509
2510	QETH_CARD_TEXT(card, 2, "ulpsetup");
2511
2512	iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE);
2513	if (!iob)
2514		return -ENOMEM;
2515
2516	memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2517	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2518	memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2519	       &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2520	memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2521	       &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2522
2523	memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2);
 
2524	temp = (card->info.cula << 8) + card->info.unit_addr2;
2525	memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2526	return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
2527}
2528
2529static int qeth_alloc_out_buf(struct qeth_qdio_out_q *q, unsigned int bidx,
2530			      gfp_t gfp)
2531{
2532	struct qeth_qdio_out_buffer *newbuf;
2533
2534	newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, gfp);
2535	if (!newbuf)
2536		return -ENOMEM;
2537
2538	newbuf->buffer = q->qdio_bufs[bidx];
2539	skb_queue_head_init(&newbuf->skb_list);
2540	lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
 
 
2541	atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
2542	q->bufs[bidx] = newbuf;
2543	return 0;
2544}
2545
2546static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2547{
2548	if (!q)
2549		return;
2550
2551	qeth_drain_output_queue(q, true);
2552	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2553	kfree(q);
2554}
2555
2556static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
2557{
2558	struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
2559	unsigned int i;
2560
2561	if (!q)
2562		return NULL;
2563
2564	if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q))
2565		goto err_qdio_bufs;
2566
2567	for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
2568		if (qeth_alloc_out_buf(q, i, GFP_KERNEL))
2569			goto err_out_bufs;
2570	}
2571
2572	return q;
2573
2574err_out_bufs:
2575	while (i > 0)
2576		qeth_free_out_buf(q->bufs[--i]);
2577	qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2578err_qdio_bufs:
2579	kfree(q);
2580	return NULL;
2581}
2582
2583static void qeth_tx_completion_timer(struct timer_list *timer)
2584{
2585	struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);
2586
2587	napi_schedule(&queue->napi);
2588	QETH_TXQ_STAT_INC(queue, completion_timer);
2589}
2590
2591static int qeth_alloc_qdio_queues(struct qeth_card *card)
2592{
2593	unsigned int i;
2594
2595	QETH_CARD_TEXT(card, 2, "allcqdbf");
2596
2597	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
2598		QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
2599		return 0;
2600
 
 
 
 
 
2601	/* inbound buffer pool */
2602	if (qeth_alloc_buffer_pool(card))
2603		goto out_buffer_pool;
2604
2605	/* outbound */
2606	for (i = 0; i < card->qdio.no_out_queues; ++i) {
2607		struct qeth_qdio_out_q *queue;
2608
2609		queue = qeth_alloc_output_queue();
2610		if (!queue)
2611			goto out_freeoutq;
2612		QETH_CARD_TEXT_(card, 2, "outq %i", i);
2613		QETH_CARD_HEX(card, 2, &queue, sizeof(void *));
2614		card->qdio.out_qs[i] = queue;
2615		queue->card = card;
2616		queue->queue_no = i;
2617		INIT_LIST_HEAD(&queue->pending_bufs);
2618		spin_lock_init(&queue->lock);
2619		timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
2620		if (IS_IQD(card)) {
2621			queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
2622			queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
2623			queue->rescan_usecs = QETH_TX_TIMER_USECS;
2624		} else {
2625			queue->coalesce_usecs = USEC_PER_SEC;
2626			queue->max_coalesced_frames = 0;
2627			queue->rescan_usecs = 10 * USEC_PER_SEC;
2628		}
2629		queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
2630	}
2631
2632	/* completion */
2633	if (qeth_alloc_cq(card))
2634		goto out_freeoutq;
2635
2636	return 0;
2637
 
 
 
 
 
 
 
2638out_freeoutq:
2639	while (i > 0) {
2640		qeth_free_output_queue(card->qdio.out_qs[--i]);
2641		card->qdio.out_qs[i] = NULL;
2642	}
2643	qeth_free_buffer_pool(card);
2644out_buffer_pool:
 
 
 
2645	atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
2646	return -ENOMEM;
2647}
2648
2649static void qeth_free_qdio_queues(struct qeth_card *card)
2650{
2651	int i, j;
2652
2653	if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
2654		QETH_QDIO_UNINITIALIZED)
2655		return;
2656
2657	qeth_free_cq(card);
2658	for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2659		if (card->qdio.in_q->bufs[j].rx_skb) {
2660			consume_skb(card->qdio.in_q->bufs[j].rx_skb);
2661			card->qdio.in_q->bufs[j].rx_skb = NULL;
2662		}
2663	}
2664
 
2665	/* inbound buffer pool */
2666	qeth_free_buffer_pool(card);
2667	/* free outbound qdio_qs */
2668	for (i = 0; i < card->qdio.no_out_queues; i++) {
2669		qeth_free_output_queue(card->qdio.out_qs[i]);
2670		card->qdio.out_qs[i] = NULL;
2671	}
2672}
2673
2674static void qeth_fill_qib_parms(struct qeth_card *card,
2675				struct qeth_qib_parms *parms)
2676{
2677	struct qeth_qdio_out_q *queue;
2678	unsigned int i;
2679
2680	parms->pcit_magic[0] = 'P';
2681	parms->pcit_magic[1] = 'C';
2682	parms->pcit_magic[2] = 'I';
2683	parms->pcit_magic[3] = 'T';
2684	ASCEBC(parms->pcit_magic, sizeof(parms->pcit_magic));
2685	parms->pcit_a = QETH_PCI_THRESHOLD_A(card);
2686	parms->pcit_b = QETH_PCI_THRESHOLD_B(card);
2687	parms->pcit_c = QETH_PCI_TIMER_VALUE(card);
2688
2689	parms->blkt_magic[0] = 'B';
2690	parms->blkt_magic[1] = 'L';
2691	parms->blkt_magic[2] = 'K';
2692	parms->blkt_magic[3] = 'T';
2693	ASCEBC(parms->blkt_magic, sizeof(parms->blkt_magic));
2694	parms->blkt_total = card->info.blkt.time_total;
2695	parms->blkt_inter_packet = card->info.blkt.inter_packet;
2696	parms->blkt_inter_packet_jumbo = card->info.blkt.inter_packet_jumbo;
2697
2698	/* Prio-queueing implicitly uses the default priorities: */
2699	if (qeth_uses_tx_prio_queueing(card) || card->qdio.no_out_queues == 1)
2700		return;
2701
2702	parms->pque_magic[0] = 'P';
2703	parms->pque_magic[1] = 'Q';
2704	parms->pque_magic[2] = 'U';
2705	parms->pque_magic[3] = 'E';
2706	ASCEBC(parms->pque_magic, sizeof(parms->pque_magic));
2707	parms->pque_order = QETH_QIB_PQUE_ORDER_RR;
2708	parms->pque_units = QETH_QIB_PQUE_UNITS_SBAL;
2709
2710	qeth_for_each_output_queue(card, queue, i)
2711		parms->pque_priority[i] = queue->priority;
 
 
 
 
 
 
 
 
 
 
2712}
2713
2714static int qeth_qdio_activate(struct qeth_card *card)
2715{
2716	QETH_CARD_TEXT(card, 3, "qdioact");
2717	return qdio_activate(CARD_DDEV(card));
2718}
2719
2720static int qeth_dm_act(struct qeth_card *card)
2721{
2722	struct qeth_cmd_buffer *iob;
2723
2724	QETH_CARD_TEXT(card, 2, "dmact");
2725
2726	iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE);
2727	if (!iob)
2728		return -ENOMEM;
2729
2730	memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
2731	       &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2732	memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
2733	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2734	return qeth_send_control_data(card, iob, NULL, NULL);
2735}
2736
2737static int qeth_mpc_initialize(struct qeth_card *card)
2738{
2739	int rc;
2740
2741	QETH_CARD_TEXT(card, 2, "mpcinit");
2742
2743	rc = qeth_issue_next_read(card);
2744	if (rc) {
2745		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
2746		return rc;
2747	}
2748	rc = qeth_cm_enable(card);
2749	if (rc) {
2750		QETH_CARD_TEXT_(card, 2, "2err%d", rc);
2751		return rc;
2752	}
2753	rc = qeth_cm_setup(card);
2754	if (rc) {
2755		QETH_CARD_TEXT_(card, 2, "3err%d", rc);
2756		return rc;
2757	}
2758	rc = qeth_ulp_enable(card);
2759	if (rc) {
2760		QETH_CARD_TEXT_(card, 2, "4err%d", rc);
2761		return rc;
2762	}
2763	rc = qeth_ulp_setup(card);
2764	if (rc) {
2765		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2766		return rc;
2767	}
2768	rc = qeth_alloc_qdio_queues(card);
2769	if (rc) {
2770		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2771		return rc;
2772	}
2773	rc = qeth_qdio_establish(card);
2774	if (rc) {
2775		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
2776		qeth_free_qdio_queues(card);
2777		return rc;
2778	}
2779	rc = qeth_qdio_activate(card);
2780	if (rc) {
2781		QETH_CARD_TEXT_(card, 2, "7err%d", rc);
2782		return rc;
2783	}
2784	rc = qeth_dm_act(card);
2785	if (rc) {
2786		QETH_CARD_TEXT_(card, 2, "8err%d", rc);
2787		return rc;
2788	}
2789
2790	return 0;
2791}
2792
2793static void qeth_print_status_message(struct qeth_card *card)
2794{
2795	switch (card->info.type) {
2796	case QETH_CARD_TYPE_OSD:
2797	case QETH_CARD_TYPE_OSM:
2798	case QETH_CARD_TYPE_OSX:
2799		/* VM will use a non-zero first character
2800		 * to indicate a HiperSockets like reporting
2801		 * of the level OSA sets the first character to zero
2802		 * */
2803		if (!card->info.mcl_level[0]) {
2804			scnprintf(card->info.mcl_level,
2805				  sizeof(card->info.mcl_level),
2806				  "%02x%02x",
2807				  card->info.mcl_level[2],
2808				  card->info.mcl_level[3]);
2809			break;
2810		}
2811		fallthrough;
2812	case QETH_CARD_TYPE_IQD:
2813		if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
2814			card->info.mcl_level[0] = (char) _ebcasc[(__u8)
2815				card->info.mcl_level[0]];
2816			card->info.mcl_level[1] = (char) _ebcasc[(__u8)
2817				card->info.mcl_level[1]];
2818			card->info.mcl_level[2] = (char) _ebcasc[(__u8)
2819				card->info.mcl_level[2]];
2820			card->info.mcl_level[3] = (char) _ebcasc[(__u8)
2821				card->info.mcl_level[3]];
2822			card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2823		}
2824		break;
2825	default:
2826		memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
2827	}
2828	dev_info(&card->gdev->dev,
2829		 "Device is a%s card%s%s%s\nwith link type %s.\n",
2830		 qeth_get_cardname(card),
2831		 (card->info.mcl_level[0]) ? " (level: " : "",
2832		 (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2833		 (card->info.mcl_level[0]) ? ")" : "",
2834		 qeth_get_cardname_short(card));
2835}
 
2836
2837static void qeth_initialize_working_pool_list(struct qeth_card *card)
2838{
2839	struct qeth_buffer_pool_entry *entry;
2840
2841	QETH_CARD_TEXT(card, 5, "inwrklst");
2842
2843	list_for_each_entry(entry,
2844			    &card->qdio.init_pool.entry_list, init_list) {
2845		qeth_put_buffer_pool_entry(card, entry);
2846	}
2847}
2848
2849static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
2850					struct qeth_card *card)
2851{
2852	struct qeth_buffer_pool_entry *entry;
2853	int i, free;
2854
2855	if (list_empty(&card->qdio.in_buf_pool.entry_list))
2856		return NULL;
2857
2858	list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) {
2859		free = 1;
2860		for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2861			if (page_count(entry->elements[i]) > 1) {
2862				free = 0;
2863				break;
2864			}
2865		}
2866		if (free) {
2867			list_del_init(&entry->list);
2868			return entry;
2869		}
2870	}
2871
2872	/* no free buffer in pool so take first one and swap pages */
2873	entry = list_first_entry(&card->qdio.in_buf_pool.entry_list,
2874				 struct qeth_buffer_pool_entry, list);
2875	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2876		if (page_count(entry->elements[i]) > 1) {
2877			struct page *page = dev_alloc_page();
2878
2879			if (!page)
2880				return NULL;
2881
2882			__free_page(entry->elements[i]);
2883			entry->elements[i] = page;
2884			QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
2885		}
2886	}
2887	list_del_init(&entry->list);
2888	return entry;
2889}
2890
2891static int qeth_init_input_buffer(struct qeth_card *card,
2892		struct qeth_qdio_buffer *buf)
2893{
2894	struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry;
2895	int i;
2896
2897	if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
2898		buf->rx_skb = netdev_alloc_skb(card->dev,
2899					       ETH_HLEN +
2900					       sizeof(struct ipv6hdr));
2901		if (!buf->rx_skb)
2902			return -ENOMEM;
2903	}
2904
2905	if (!pool_entry) {
2906		pool_entry = qeth_find_free_buffer_pool_entry(card);
2907		if (!pool_entry)
2908			return -ENOBUFS;
2909
2910		buf->pool_entry = pool_entry;
2911	}
2912
2913	/*
2914	 * since the buffer is accessed only from the input_tasklet
2915	 * there shouldn't be a need to synchronize; also, since we use
2916	 * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run  out off
2917	 * buffers
2918	 */
2919	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2920		buf->buffer->element[i].length = PAGE_SIZE;
2921		buf->buffer->element[i].addr =
2922			page_to_phys(pool_entry->elements[i]);
2923		if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2924			buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
2925		else
2926			buf->buffer->element[i].eflags = 0;
2927		buf->buffer->element[i].sflags = 0;
2928	}
2929	return 0;
2930}
2931
2932static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
2933					    struct qeth_qdio_out_q *queue)
2934{
2935	if (!IS_IQD(card) ||
2936	    qeth_iqd_is_mcast_queue(card, queue) ||
2937	    card->options.cq == QETH_CQ_ENABLED ||
2938	    qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
2939		return 1;
2940
2941	return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
2942}
2943
2944static int qeth_init_qdio_queues(struct qeth_card *card)
2945{
2946	unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count;
2947	unsigned int i;
2948	int rc;
2949
2950	QETH_CARD_TEXT(card, 2, "initqdqs");
2951
2952	/* inbound queue */
2953	qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2954	memset(&card->rx, 0, sizeof(struct qeth_rx));
2955
2956	qeth_initialize_working_pool_list(card);
2957	/*give only as many buffers to hardware as we have buffer pool entries*/
2958	for (i = 0; i < rx_bufs; i++) {
2959		rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
2960		if (rc)
2961			return rc;
2962	}
2963
2964	card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs);
2965	rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), 0, 0, rx_bufs);
2966	if (rc) {
2967		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
2968		return rc;
2969	}
2970
2971	/* completion */
2972	rc = qeth_cq_init(card);
2973	if (rc) {
2974		return rc;
2975	}
2976
2977	/* outbound queue */
2978	for (i = 0; i < card->qdio.no_out_queues; ++i) {
2979		struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];
2980
2981		qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2982		queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
2983		queue->next_buf_to_fill = 0;
2984		queue->do_pack = 0;
2985		queue->prev_hdr = NULL;
2986		queue->coalesced_frames = 0;
2987		queue->bulk_start = 0;
2988		queue->bulk_count = 0;
2989		queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
2990		atomic_set(&queue->used_buffers, 0);
2991		atomic_set(&queue->set_pci_flags_count, 0);
 
2992		netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
2993	}
2994	return 0;
2995}
2996
2997static void qeth_ipa_finalize_cmd(struct qeth_card *card,
2998				  struct qeth_cmd_buffer *iob)
2999{
3000	qeth_mpc_finalize_cmd(card, iob);
3001
3002	/* override with IPA-specific values: */
3003	__ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++;
3004}
3005
3006static void qeth_prepare_ipa_cmd(struct qeth_card *card,
3007				 struct qeth_cmd_buffer *iob, u16 cmd_length)
 
 
3008{
3009	u8 prot_type = qeth_mpc_select_prot_type(card);
3010	u16 total_length = iob->length;
3011
3012	qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
3013		       iob->data);
3014	iob->finalize = qeth_ipa_finalize_cmd;
 
3015
3016	memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
3017	memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
3018	memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
3019	memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2);
3020	memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2);
3021	memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
3022	       &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
3023	memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
3024}
 
3025
3026static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob,
3027				 struct qeth_cmd_buffer *reply)
3028{
3029	struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply);
3030
3031	return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno);
3032}
3033
3034struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
3035					   enum qeth_ipa_cmds cmd_code,
3036					   enum qeth_prot_versions prot,
3037					   unsigned int data_length)
3038{
3039	struct qeth_cmd_buffer *iob;
3040	struct qeth_ipacmd_hdr *hdr;
3041
3042	data_length += offsetof(struct qeth_ipa_cmd, data);
3043	iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1,
3044			     QETH_IPA_TIMEOUT);
3045	if (!iob)
3046		return NULL;
3047
3048	qeth_prepare_ipa_cmd(card, iob, data_length);
3049	iob->match = qeth_ipa_match_reply;
3050
3051	hdr = &__ipa_cmd(iob)->hdr;
3052	hdr->command = cmd_code;
3053	hdr->initiator = IPA_CMD_INITIATOR_HOST;
3054	/* hdr->seqno is set by qeth_send_control_data() */
3055	hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH;
3056	hdr->rel_adapter_no = (u8) card->dev->dev_port;
3057	hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
3058	hdr->param_count = 1;
3059	hdr->prot_version = prot;
3060	return iob;
3061}
3062EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd);
3063
3064static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
3065				struct qeth_reply *reply, unsigned long data)
3066{
3067	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3068
3069	return (cmd->hdr.return_code) ? -EIO : 0;
3070}
3071
3072/*
3073 * qeth_send_ipa_cmd() - send an IPA command
3074 *
3075 * See qeth_send_control_data() for explanation of the arguments.
3076 */
3077
3078int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
3079		int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
3080			unsigned long),
3081		void *reply_param)
3082{
3083	int rc;
3084
3085	QETH_CARD_TEXT(card, 4, "sendipa");
3086
3087	if (card->read_or_write_problem) {
3088		qeth_put_cmd(iob);
3089		return -EIO;
3090	}
3091
3092	if (reply_cb == NULL)
3093		reply_cb = qeth_send_ipa_cmd_cb;
3094	rc = qeth_send_control_data(card, iob, reply_cb, reply_param);
3095	if (rc == -ETIME) {
3096		qeth_clear_ipacmd_list(card);
3097		qeth_schedule_recovery(card);
3098	}
3099	return rc;
3100}
3101EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
3102
3103static int qeth_send_startlan_cb(struct qeth_card *card,
3104				 struct qeth_reply *reply, unsigned long data)
3105{
3106	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3107
3108	if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE)
3109		return -ENETDOWN;
3110
3111	return (cmd->hdr.return_code) ? -EIO : 0;
3112}
3113
3114static int qeth_send_startlan(struct qeth_card *card)
3115{
3116	struct qeth_cmd_buffer *iob;
3117
3118	QETH_CARD_TEXT(card, 2, "strtlan");
3119
3120	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0);
3121	if (!iob)
3122		return -ENOMEM;
3123	return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
3124}
3125
3126static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
3127{
3128	if (!cmd->hdr.return_code)
3129		cmd->hdr.return_code =
3130			cmd->data.setadapterparms.hdr.return_code;
3131	return cmd->hdr.return_code;
3132}
3133
3134static int qeth_query_setadapterparms_cb(struct qeth_card *card,
3135		struct qeth_reply *reply, unsigned long data)
3136{
3137	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3138	struct qeth_query_cmds_supp *query_cmd;
3139
3140	QETH_CARD_TEXT(card, 3, "quyadpcb");
3141	if (qeth_setadpparms_inspect_rc(cmd))
3142		return -EIO;
3143
3144	query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp;
3145	if (query_cmd->lan_type & 0x7f) {
3146		if (!qeth_is_supported_link_type(card, query_cmd->lan_type))
3147			return -EPROTONOSUPPORT;
3148
3149		card->info.link_type = query_cmd->lan_type;
3150		QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
3151	}
3152
3153	card->options.adp.supported = query_cmd->supported_cmds;
3154	return 0;
3155}
3156
3157static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
3158						    enum qeth_ipa_setadp_cmd adp_cmd,
3159						    unsigned int data_length)
3160{
3161	struct qeth_ipacmd_setadpparms_hdr *hdr;
3162	struct qeth_cmd_buffer *iob;
3163
3164	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4,
3165				 data_length +
3166				 offsetof(struct qeth_ipacmd_setadpparms,
3167					  data));
3168	if (!iob)
3169		return NULL;
3170
3171	hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr;
3172	hdr->cmdlength = sizeof(*hdr) + data_length;
3173	hdr->command_code = adp_cmd;
3174	hdr->used_total = 1;
3175	hdr->seq_no = 1;
3176	return iob;
3177}
3178
3179static int qeth_query_setadapterparms(struct qeth_card *card)
3180{
3181	int rc;
3182	struct qeth_cmd_buffer *iob;
3183
3184	QETH_CARD_TEXT(card, 3, "queryadp");
3185	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
3186				   SETADP_DATA_SIZEOF(query_cmds_supp));
3187	if (!iob)
3188		return -ENOMEM;
3189	rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
3190	return rc;
3191}
3192
3193static int qeth_query_ipassists_cb(struct qeth_card *card,
3194		struct qeth_reply *reply, unsigned long data)
3195{
3196	struct qeth_ipa_cmd *cmd;
3197
3198	QETH_CARD_TEXT(card, 2, "qipasscb");
3199
3200	cmd = (struct qeth_ipa_cmd *) data;
3201
3202	switch (cmd->hdr.return_code) {
3203	case IPA_RC_SUCCESS:
3204		break;
3205	case IPA_RC_NOTSUPP:
3206	case IPA_RC_L2_UNSUPPORTED_CMD:
3207		QETH_CARD_TEXT(card, 2, "ipaunsup");
3208		card->options.ipa4.supported |= IPA_SETADAPTERPARMS;
3209		card->options.ipa6.supported |= IPA_SETADAPTERPARMS;
3210		return -EOPNOTSUPP;
3211	default:
3212		QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
3213				 CARD_DEVID(card), cmd->hdr.return_code);
3214		return -EIO;
3215	}
3216
3217	if (cmd->hdr.prot_version == QETH_PROT_IPV4)
3218		card->options.ipa4 = cmd->hdr.assists;
3219	else if (cmd->hdr.prot_version == QETH_PROT_IPV6)
3220		card->options.ipa6 = cmd->hdr.assists;
3221	else
3222		QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
3223				 CARD_DEVID(card));
3224	return 0;
3225}
3226
3227static int qeth_query_ipassists(struct qeth_card *card,
3228				enum qeth_prot_versions prot)
3229{
3230	int rc;
3231	struct qeth_cmd_buffer *iob;
3232
3233	QETH_CARD_TEXT_(card, 2, "qipassi%i", prot);
3234	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0);
3235	if (!iob)
3236		return -ENOMEM;
3237	rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
3238	return rc;
3239}
3240
3241static int qeth_query_switch_attributes_cb(struct qeth_card *card,
3242				struct qeth_reply *reply, unsigned long data)
3243{
3244	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3245	struct qeth_query_switch_attributes *attrs;
3246	struct qeth_switch_info *sw_info;
3247
3248	QETH_CARD_TEXT(card, 2, "qswiatcb");
3249	if (qeth_setadpparms_inspect_rc(cmd))
3250		return -EIO;
3251
3252	sw_info = (struct qeth_switch_info *)reply->param;
3253	attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
3254	sw_info->capabilities = attrs->capabilities;
3255	sw_info->settings = attrs->settings;
3256	QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
3257			sw_info->settings);
3258	return 0;
3259}
3260
3261int qeth_query_switch_attributes(struct qeth_card *card,
3262				 struct qeth_switch_info *sw_info)
3263{
3264	struct qeth_cmd_buffer *iob;
3265
3266	QETH_CARD_TEXT(card, 2, "qswiattr");
3267	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
3268		return -EOPNOTSUPP;
3269	if (!netif_carrier_ok(card->dev))
3270		return -ENOMEDIUM;
3271	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0);
3272	if (!iob)
3273		return -ENOMEM;
3274	return qeth_send_ipa_cmd(card, iob,
3275				qeth_query_switch_attributes_cb, sw_info);
3276}
3277
3278struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
3279					  enum qeth_diags_cmds sub_cmd,
3280					  unsigned int data_length)
3281{
3282	struct qeth_ipacmd_diagass *cmd;
3283	struct qeth_cmd_buffer *iob;
3284
3285	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE,
3286				 DIAG_HDR_LEN + data_length);
3287	if (!iob)
3288		return NULL;
3289
3290	cmd = &__ipa_cmd(iob)->data.diagass;
3291	cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length;
3292	cmd->subcmd = sub_cmd;
3293	return iob;
3294}
3295EXPORT_SYMBOL_GPL(qeth_get_diag_cmd);
3296
3297static int qeth_query_setdiagass_cb(struct qeth_card *card,
3298		struct qeth_reply *reply, unsigned long data)
3299{
3300	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3301	u16 rc = cmd->hdr.return_code;
3302
3303	if (rc) {
3304		QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
3305		return -EIO;
3306	}
3307
3308	card->info.diagass_support = cmd->data.diagass.ext;
3309	return 0;
3310}
3311
3312static int qeth_query_setdiagass(struct qeth_card *card)
3313{
3314	struct qeth_cmd_buffer *iob;
3315
3316	QETH_CARD_TEXT(card, 2, "qdiagass");
3317	iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0);
3318	if (!iob)
3319		return -ENOMEM;
3320	return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
3321}
3322
3323static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
3324{
3325	unsigned long info = get_zeroed_page(GFP_KERNEL);
3326	struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
3327	struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
3328	struct ccw_dev_id ccwid;
3329	int level;
3330
3331	tid->chpid = card->info.chpid;
3332	ccw_device_get_id(CARD_RDEV(card), &ccwid);
3333	tid->ssid = ccwid.ssid;
3334	tid->devno = ccwid.devno;
3335	if (!info)
3336		return;
3337	level = stsi(NULL, 0, 0, 0);
3338	if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
3339		tid->lparnr = info222->lpar_number;
3340	if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
3341		EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
3342		memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
3343	}
3344	free_page(info);
 
3345}
3346
3347static int qeth_hw_trap_cb(struct qeth_card *card,
3348		struct qeth_reply *reply, unsigned long data)
3349{
3350	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3351	u16 rc = cmd->hdr.return_code;
3352
3353	if (rc) {
3354		QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
3355		return -EIO;
3356	}
3357	return 0;
3358}
3359
3360int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
3361{
3362	struct qeth_cmd_buffer *iob;
3363	struct qeth_ipa_cmd *cmd;
3364
3365	QETH_CARD_TEXT(card, 2, "diagtrap");
3366	iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64);
3367	if (!iob)
3368		return -ENOMEM;
3369	cmd = __ipa_cmd(iob);
3370	cmd->data.diagass.type = 1;
3371	cmd->data.diagass.action = action;
3372	switch (action) {
3373	case QETH_DIAGS_TRAP_ARM:
3374		cmd->data.diagass.options = 0x0003;
3375		cmd->data.diagass.ext = 0x00010000 +
3376			sizeof(struct qeth_trap_id);
3377		qeth_get_trap_id(card,
3378			(struct qeth_trap_id *)cmd->data.diagass.cdata);
3379		break;
3380	case QETH_DIAGS_TRAP_DISARM:
3381		cmd->data.diagass.options = 0x0001;
3382		break;
3383	case QETH_DIAGS_TRAP_CAPTURE:
3384		break;
3385	}
3386	return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
3387}
3388
3389static int qeth_check_qdio_errors(struct qeth_card *card,
3390				  struct qdio_buffer *buf,
3391				  unsigned int qdio_error,
3392				  const char *dbftext)
3393{
3394	if (qdio_error) {
3395		QETH_CARD_TEXT(card, 2, dbftext);
3396		QETH_CARD_TEXT_(card, 2, " F15=%02X",
3397			       buf->element[15].sflags);
3398		QETH_CARD_TEXT_(card, 2, " F14=%02X",
3399			       buf->element[14].sflags);
3400		QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
3401		if ((buf->element[15].sflags) == 0x12) {
3402			QETH_CARD_STAT_INC(card, rx_fifo_errors);
3403			return 0;
3404		} else
3405			return 1;
3406	}
3407	return 0;
3408}
3409
3410static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
3411					 unsigned int count)
3412{
3413	struct qeth_qdio_q *queue = card->qdio.in_q;
3414	struct list_head *lh;
3415	int i;
3416	int rc;
3417	int newcount = 0;
3418
3419	/* only requeue at a certain threshold to avoid SIGAs */
3420	if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
3421		for (i = queue->next_buf_to_init;
3422		     i < queue->next_buf_to_init + count; ++i) {
3423			if (qeth_init_input_buffer(card,
3424				&queue->bufs[QDIO_BUFNR(i)])) {
3425				break;
3426			} else {
3427				newcount++;
3428			}
3429		}
3430
3431		if (newcount < count) {
3432			/* we are in memory shortage so we switch back to
3433			   traditional skb allocation and drop packages */
3434			atomic_set(&card->force_alloc_skb, 3);
3435			count = newcount;
3436		} else {
3437			atomic_add_unless(&card->force_alloc_skb, -1, 0);
3438		}
3439
3440		if (!count) {
3441			i = 0;
3442			list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
3443				i++;
3444			if (i == card->qdio.in_buf_pool.buf_count) {
3445				QETH_CARD_TEXT(card, 2, "qsarbw");
3446				schedule_delayed_work(
3447					&card->buffer_reclaim_work,
3448					QETH_RECLAIM_WORK_TIME);
3449			}
3450			return 0;
3451		}
3452
3453		rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), 0,
3454						  queue->next_buf_to_init,
3455						  count);
3456		if (rc) {
3457			QETH_CARD_TEXT(card, 2, "qinberr");
3458		}
3459		queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init +
3460						     count);
3461		return count;
3462	}
3463
3464	return 0;
3465}
3466
3467static void qeth_buffer_reclaim_work(struct work_struct *work)
3468{
3469	struct qeth_card *card = container_of(to_delayed_work(work),
3470					      struct qeth_card,
3471					      buffer_reclaim_work);
3472
3473	local_bh_disable();
3474	napi_schedule(&card->napi);
3475	/* kick-start the NAPI softirq: */
3476	local_bh_enable();
3477}
3478
3479static void qeth_handle_send_error(struct qeth_card *card,
3480		struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
3481{
3482	int sbalf15 = buffer->buffer->element[15].sflags;
3483
3484	QETH_CARD_TEXT(card, 6, "hdsnderr");
3485	qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
3486
3487	if (!qdio_err)
3488		return;
3489
3490	if ((sbalf15 >= 15) && (sbalf15 <= 31))
3491		return;
3492
3493	QETH_CARD_TEXT(card, 1, "lnkfail");
3494	QETH_CARD_TEXT_(card, 1, "%04x %02x",
3495		       (u16)qdio_err, (u8)sbalf15);
3496}
3497
3498/**
3499 * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
3500 * @queue: queue to check for packing buffer
3501 *
3502 * Returns number of buffers that were prepared for flush.
3503 */
3504static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
3505{
3506	struct qeth_qdio_out_buffer *buffer;
3507
3508	buffer = queue->bufs[queue->next_buf_to_fill];
3509	if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
3510	    (buffer->next_element_to_fill > 0)) {
3511		/* it's a packing buffer */
3512		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3513		queue->next_buf_to_fill =
3514			QDIO_BUFNR(queue->next_buf_to_fill + 1);
3515		return 1;
3516	}
3517	return 0;
3518}
3519
3520/*
3521 * Switched to packing state if the number of used buffers on a queue
3522 * reaches a certain limit.
3523 */
3524static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
3525{
3526	if (!queue->do_pack) {
3527		if (atomic_read(&queue->used_buffers)
3528		    >= QETH_HIGH_WATERMARK_PACK){
3529			/* switch non-PACKING -> PACKING */
3530			QETH_CARD_TEXT(queue->card, 6, "np->pack");
3531			QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3532			queue->do_pack = 1;
3533		}
3534	}
3535}
3536
3537/*
3538 * Switches from packing to non-packing mode. If there is a packing
3539 * buffer on the queue this buffer will be prepared to be flushed.
3540 * In that case 1 is returned to inform the caller. If no buffer
3541 * has to be flushed, zero is returned.
3542 */
3543static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
3544{
3545	if (queue->do_pack) {
3546		if (atomic_read(&queue->used_buffers)
3547		    <= QETH_LOW_WATERMARK_PACK) {
3548			/* switch PACKING -> non-PACKING */
3549			QETH_CARD_TEXT(queue->card, 6, "pack->np");
3550			QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3551			queue->do_pack = 0;
3552			return qeth_prep_flush_pack_buffer(queue);
3553		}
3554	}
3555	return 0;
3556}
3557
3558static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
3559			       int count)
3560{
3561	struct qeth_qdio_out_buffer *buf = queue->bufs[index];
 
3562	struct qeth_card *card = queue->card;
3563	unsigned int frames, usecs;
3564	struct qaob *aob = NULL;
3565	int rc;
3566	int i;
3567
3568	for (i = index; i < index + count; ++i) {
3569		unsigned int bidx = QDIO_BUFNR(i);
3570		struct sk_buff *skb;
3571
3572		buf = queue->bufs[bidx];
3573		buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
3574				SBAL_EFLAGS_LAST_ENTRY;
3575		queue->coalesced_frames += buf->frames;
3576
 
 
 
3577		if (IS_IQD(card)) {
3578			skb_queue_walk(&buf->skb_list, skb)
3579				skb_tx_timestamp(skb);
3580		}
3581	}
3582
3583	if (IS_IQD(card)) {
3584		if (card->options.cq == QETH_CQ_ENABLED &&
3585		    !qeth_iqd_is_mcast_queue(card, queue) &&
3586		    count == 1) {
3587			if (!buf->aob)
3588				buf->aob = kmem_cache_zalloc(qeth_qaob_cache,
3589							     GFP_ATOMIC);
3590			if (buf->aob) {
3591				struct qeth_qaob_priv1 *priv;
3592
3593				aob = buf->aob;
3594				priv = (struct qeth_qaob_priv1 *)&aob->user1;
3595				priv->state = QETH_QAOB_ISSUED;
3596				priv->queue_no = queue->queue_no;
3597			}
3598		}
3599	} else {
3600		if (!queue->do_pack) {
3601			if ((atomic_read(&queue->used_buffers) >=
3602				(QETH_HIGH_WATERMARK_PACK -
3603				 QETH_WATERMARK_PACK_FUZZ)) &&
3604			    !atomic_read(&queue->set_pci_flags_count)) {
3605				/* it's likely that we'll go to packing
3606				 * mode soon */
3607				atomic_inc(&queue->set_pci_flags_count);
3608				buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3609			}
3610		} else {
3611			if (!atomic_read(&queue->set_pci_flags_count)) {
3612				/*
3613				 * there's no outstanding PCI any more, so we
3614				 * have to request a PCI to be sure the PCI
3615				 * will wake at some time in the future then we
3616				 * can flush packed buffers that might still be
3617				 * hanging around, which can happen if no
3618				 * further send was requested by the stack
3619				 */
3620				atomic_inc(&queue->set_pci_flags_count);
3621				buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3622			}
3623		}
 
 
 
3624	}
3625
3626	QETH_TXQ_STAT_INC(queue, doorbell);
3627	rc = qdio_add_bufs_to_output_queue(CARD_DDEV(card), queue->queue_no,
3628					   index, count, aob);
3629
3630	switch (rc) {
3631	case 0:
3632	case -ENOBUFS:
3633		/* ignore temporary SIGA errors without busy condition */
3634
3635		/* Fake the TX completion interrupt: */
3636		frames = READ_ONCE(queue->max_coalesced_frames);
3637		usecs = READ_ONCE(queue->coalesce_usecs);
3638
3639		if (frames && queue->coalesced_frames >= frames) {
3640			napi_schedule(&queue->napi);
3641			queue->coalesced_frames = 0;
3642			QETH_TXQ_STAT_INC(queue, coal_frames);
3643		} else if (qeth_use_tx_irqs(card) &&
3644			   atomic_read(&queue->used_buffers) >= 32) {
3645			/* Old behaviour carried over from the qdio layer: */
3646			napi_schedule(&queue->napi);
3647			QETH_TXQ_STAT_INC(queue, coal_frames);
3648		} else if (usecs) {
3649			qeth_tx_arm_timer(queue, usecs);
3650		}
 
3651
3652		break;
3653	default:
 
 
3654		QETH_CARD_TEXT(queue->card, 2, "flushbuf");
3655		QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
3656		QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
3657		QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
3658		QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
3659
3660		/* this must not happen under normal circumstances. if it
3661		 * happens something is really wrong -> recover */
3662		qeth_schedule_recovery(queue->card);
 
3663	}
3664}
3665
3666static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
3667{
3668	qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
3669
3670	queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
3671	queue->prev_hdr = NULL;
3672	queue->bulk_count = 0;
3673}
3674
3675static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
3676{
 
 
 
 
3677	/*
3678	 * check if we have to switch to non-packing mode or if
3679	 * we have to get a pci flag out on the queue
3680	 */
3681	if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
3682	    !atomic_read(&queue->set_pci_flags_count)) {
3683		unsigned int index, flush_cnt;
3684
3685		spin_lock(&queue->lock);
3686
3687		index = queue->next_buf_to_fill;
3688
3689		flush_cnt = qeth_switch_to_nonpacking_if_needed(queue);
3690		if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count))
3691			flush_cnt = qeth_prep_flush_pack_buffer(queue);
3692
3693		if (flush_cnt) {
3694			qeth_flush_buffers(queue, index, flush_cnt);
3695			QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
 
 
 
 
 
 
 
3696		}
3697
3698		spin_unlock(&queue->lock);
3699	}
3700}
3701
3702static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
3703{
3704	struct qeth_card *card = (struct qeth_card *)card_ptr;
3705
3706	napi_schedule_irqoff(&card->napi);
3707}
3708
3709int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
3710{
3711	int rc;
3712
3713	if (card->options.cq ==  QETH_CQ_NOTAVAILABLE) {
3714		rc = -1;
3715		goto out;
3716	} else {
3717		if (card->options.cq == cq) {
3718			rc = 0;
3719			goto out;
3720		}
3721
3722		qeth_free_qdio_queues(card);
3723		card->options.cq = cq;
3724		rc = 0;
3725	}
3726out:
3727	return rc;
3728
3729}
3730EXPORT_SYMBOL_GPL(qeth_configure_cq);
3731
3732static void qeth_qdio_handle_aob(struct qeth_card *card, struct qaob *aob)
3733{
3734	struct qeth_qaob_priv1 *priv = (struct qeth_qaob_priv1 *)&aob->user1;
3735	unsigned int queue_no = priv->queue_no;
3736
3737	BUILD_BUG_ON(sizeof(*priv) > ARRAY_SIZE(aob->user1));
3738
3739	if (xchg(&priv->state, QETH_QAOB_DONE) == QETH_QAOB_PENDING &&
3740	    queue_no < card->qdio.no_out_queues)
3741		napi_schedule(&card->qdio.out_qs[queue_no]->napi);
3742}
3743
3744static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
3745				 unsigned int queue, int first_element,
3746				 int count)
3747{
3748	struct qeth_qdio_q *cq = card->qdio.c_q;
3749	int i;
3750	int rc;
3751
3752	QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
3753	QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
3754	QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
3755
3756	if (qdio_err) {
3757		netif_tx_stop_all_queues(card->dev);
3758		qeth_schedule_recovery(card);
3759		return;
3760	}
3761
3762	for (i = first_element; i < first_element + count; ++i) {
3763		struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)];
3764		int e = 0;
3765
3766		while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
3767		       buffer->element[e].addr) {
3768			unsigned long phys_aob_addr = buffer->element[e].addr;
3769
3770			qeth_qdio_handle_aob(card, phys_to_virt(phys_aob_addr));
3771			++e;
3772		}
3773		qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
3774	}
3775	rc = qdio_add_bufs_to_input_queue(CARD_DDEV(card), queue,
3776					  cq->next_buf_to_init, count);
 
3777	if (rc) {
3778		dev_warn(&card->gdev->dev,
3779			"QDIO reported an error, rc=%i\n", rc);
3780		QETH_CARD_TEXT(card, 2, "qcqherr");
3781	}
3782
3783	cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count);
3784}
3785
3786static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
3787				    unsigned int qdio_err, int queue,
3788				    int first_elem, int count,
3789				    unsigned long card_ptr)
3790{
3791	struct qeth_card *card = (struct qeth_card *)card_ptr;
3792
3793	QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
3794	QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
3795
3796	if (qdio_err)
3797		qeth_schedule_recovery(card);
3798}
3799
3800static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3801				     unsigned int qdio_error, int __queue,
3802				     int first_element, int count,
3803				     unsigned long card_ptr)
3804{
3805	struct qeth_card *card        = (struct qeth_card *) card_ptr;
 
 
 
 
 
 
 
 
 
 
 
 
3806
3807	QETH_CARD_TEXT(card, 2, "achkcond");
3808	netif_tx_stop_all_queues(card->dev);
3809	qeth_schedule_recovery(card);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3810}
3811
3812/*
3813 * Note: Function assumes that we have 4 outbound queues.
3814 */
3815static int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
3816{
3817	struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
3818	u8 tos;
3819
3820	switch (card->qdio.do_prio_queueing) {
3821	case QETH_PRIO_Q_ING_TOS:
3822	case QETH_PRIO_Q_ING_PREC:
3823		switch (vlan_get_protocol(skb)) {
3824		case htons(ETH_P_IP):
3825			tos = ipv4_get_dsfield(ip_hdr(skb));
3826			break;
3827		case htons(ETH_P_IPV6):
3828			tos = ipv6_get_dsfield(ipv6_hdr(skb));
3829			break;
3830		default:
3831			return card->qdio.default_out_queue;
3832		}
3833		if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
3834			return ~tos >> 6 & 3;
3835		if (tos & IPTOS_MINCOST)
3836			return 3;
3837		if (tos & IPTOS_RELIABILITY)
3838			return 2;
3839		if (tos & IPTOS_THROUGHPUT)
3840			return 1;
3841		if (tos & IPTOS_LOWDELAY)
3842			return 0;
3843		break;
3844	case QETH_PRIO_Q_ING_SKB:
3845		if (skb->priority > 5)
3846			return 0;
3847		return ~skb->priority >> 1 & 3;
3848	case QETH_PRIO_Q_ING_VLAN:
3849		if (veth->h_vlan_proto == htons(ETH_P_8021Q))
3850			return ~ntohs(veth->h_vlan_TCI) >>
3851			       (VLAN_PRIO_SHIFT + 1) & 3;
3852		break;
3853	case QETH_PRIO_Q_ING_FIXED:
3854		return card->qdio.default_out_queue;
3855	default:
3856		break;
3857	}
3858	return card->qdio.default_out_queue;
3859}
 
3860
3861/**
3862 * qeth_get_elements_for_frags() -	find number of SBALEs for skb frags.
3863 * @skb:				SKB address
3864 *
3865 * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3866 * fragmented part of the SKB. Returns zero for linear SKB.
3867 */
3868static int qeth_get_elements_for_frags(struct sk_buff *skb)
3869{
3870	int cnt, elements = 0;
3871
3872	for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3873		skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3874
3875		elements += qeth_get_elements_for_range(
3876			(addr_t)skb_frag_address(frag),
3877			(addr_t)skb_frag_address(frag) + skb_frag_size(frag));
3878	}
3879	return elements;
3880}
3881
3882/**
3883 * qeth_count_elements() -	Counts the number of QDIO buffer elements needed
3884 *				to transmit an skb.
3885 * @skb:			the skb to operate on.
3886 * @data_offset:		skip this part of the skb's linear data
3887 *
3888 * Returns the number of pages, and thus QDIO buffer elements, needed to map the
3889 * skb's data (both its linear part and paged fragments).
3890 */
3891static unsigned int qeth_count_elements(struct sk_buff *skb,
3892					unsigned int data_offset)
3893{
3894	unsigned int elements = qeth_get_elements_for_frags(skb);
3895	addr_t end = (addr_t)skb->data + skb_headlen(skb);
3896	addr_t start = (addr_t)skb->data + data_offset;
3897
3898	if (start != end)
3899		elements += qeth_get_elements_for_range(start, end);
3900	return elements;
3901}
 
3902
3903#define QETH_HDR_CACHE_OBJ_SIZE		(sizeof(struct qeth_hdr_tso) + \
3904					 MAX_TCP_HEADER)
3905
3906/**
3907 * qeth_add_hw_header() - add a HW header to an skb.
3908 * @queue: TX queue that the skb will be placed on.
3909 * @skb: skb that the HW header should be added to.
3910 * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
3911 *	 it contains a valid pointer to a qeth_hdr.
3912 * @hdr_len: length of the HW header.
3913 * @proto_len: length of protocol headers that need to be in same page as the
3914 *	       HW header.
3915 * @elements: returns the required number of buffer elements for this skb.
3916 *
3917 * Returns the pushed length. If the header can't be pushed on
3918 * (eg. because it would cross a page boundary), it is allocated from
3919 * the cache instead and 0 is returned.
3920 * The number of needed buffer elements is returned in @elements.
3921 * Error to create the hdr is indicated by returning with < 0.
3922 */
3923static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
3924			      struct sk_buff *skb, struct qeth_hdr **hdr,
3925			      unsigned int hdr_len, unsigned int proto_len,
3926			      unsigned int *elements)
3927{
3928	gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
3929	const unsigned int contiguous = proto_len ? proto_len : 1;
3930	const unsigned int max_elements = queue->max_elements;
3931	unsigned int __elements;
3932	addr_t start, end;
3933	bool push_ok;
3934	int rc;
3935
3936check_layout:
3937	start = (addr_t)skb->data - hdr_len;
3938	end = (addr_t)skb->data;
3939
3940	if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
3941		/* Push HW header into same page as first protocol header. */
3942		push_ok = true;
3943		/* ... but TSO always needs a separate element for headers: */
3944		if (skb_is_gso(skb))
3945			__elements = 1 + qeth_count_elements(skb, proto_len);
3946		else
3947			__elements = qeth_count_elements(skb, 0);
3948	} else if (!proto_len && PAGE_ALIGNED(skb->data)) {
3949		/* Push HW header into preceding page, flush with skb->data. */
3950		push_ok = true;
3951		__elements = 1 + qeth_count_elements(skb, 0);
3952	} else {
3953		/* Use header cache, copy protocol headers up. */
3954		push_ok = false;
3955		__elements = 1 + qeth_count_elements(skb, proto_len);
3956	}
3957
3958	/* Compress skb to fit into one IO buffer: */
3959	if (__elements > max_elements) {
3960		if (!skb_is_nonlinear(skb)) {
3961			/* Drop it, no easy way of shrinking it further. */
3962			QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
3963					 max_elements, __elements, skb->len);
3964			return -E2BIG;
3965		}
3966
3967		rc = skb_linearize(skb);
3968		if (rc) {
3969			QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
3970			return rc;
3971		}
3972
3973		QETH_TXQ_STAT_INC(queue, skbs_linearized);
3974		/* Linearization changed the layout, re-evaluate: */
3975		goto check_layout;
3976	}
3977
3978	*elements = __elements;
3979	/* Add the header: */
3980	if (push_ok) {
3981		*hdr = skb_push(skb, hdr_len);
3982		return hdr_len;
3983	}
3984
3985	/* Fall back to cache element with known-good alignment: */
3986	if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
3987		return -E2BIG;
3988	*hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
3989	if (!*hdr)
3990		return -ENOMEM;
3991	/* Copy protocol headers behind HW header: */
3992	skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
3993	return 0;
3994}
3995
3996static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
3997			      struct sk_buff *curr_skb,
3998			      struct qeth_hdr *curr_hdr)
3999{
4000	struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
4001	struct qeth_hdr *prev_hdr = queue->prev_hdr;
4002
4003	if (!prev_hdr)
4004		return true;
4005
4006	/* All packets must have the same target: */
4007	if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
4008		struct sk_buff *prev_skb = skb_peek(&buffer->skb_list);
4009
4010		return ether_addr_equal(eth_hdr(prev_skb)->h_dest,
4011					eth_hdr(curr_skb)->h_dest) &&
4012		       qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2);
4013	}
4014
4015	return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) &&
4016	       qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
4017}
4018
4019/**
4020 * qeth_fill_buffer() - map skb into an output buffer
4021 * @buf:	buffer to transport the skb
4022 * @skb:	skb to map into the buffer
4023 * @hdr:	qeth_hdr for this skb. Either at skb->data, or allocated
4024 *		from qeth_core_header_cache.
4025 * @offset:	when mapping the skb, start at skb->data + offset
4026 * @hd_len:	if > 0, build a dedicated header element of this size
4027 */
4028static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
4029				     struct sk_buff *skb, struct qeth_hdr *hdr,
4030				     unsigned int offset, unsigned int hd_len)
4031{
4032	struct qdio_buffer *buffer = buf->buffer;
4033	int element = buf->next_element_to_fill;
4034	int length = skb_headlen(skb) - offset;
4035	char *data = skb->data + offset;
4036	unsigned int elem_length, cnt;
4037	bool is_first_elem = true;
4038
4039	__skb_queue_tail(&buf->skb_list, skb);
4040
4041	/* build dedicated element for HW Header */
4042	if (hd_len) {
4043		is_first_elem = false;
4044
4045		buffer->element[element].addr = virt_to_phys(hdr);
4046		buffer->element[element].length = hd_len;
4047		buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
4048
4049		/* HW header is allocated from cache: */
4050		if ((void *)hdr != skb->data)
4051			__set_bit(element, buf->from_kmem_cache);
4052		/* HW header was pushed and is contiguous with linear part: */
4053		else if (length > 0 && !PAGE_ALIGNED(data) &&
4054			 (data == (char *)hdr + hd_len))
4055			buffer->element[element].eflags |=
4056				SBAL_EFLAGS_CONTIGUOUS;
4057
4058		element++;
4059	}
4060
4061	/* map linear part into buffer element(s) */
4062	while (length > 0) {
4063		elem_length = min_t(unsigned int, length,
4064				    PAGE_SIZE - offset_in_page(data));
4065
4066		buffer->element[element].addr = virt_to_phys(data);
4067		buffer->element[element].length = elem_length;
4068		length -= elem_length;
4069		if (is_first_elem) {
4070			is_first_elem = false;
4071			if (length || skb_is_nonlinear(skb))
4072				/* skb needs additional elements */
4073				buffer->element[element].eflags =
4074					SBAL_EFLAGS_FIRST_FRAG;
4075			else
4076				buffer->element[element].eflags = 0;
4077		} else {
4078			buffer->element[element].eflags =
4079				SBAL_EFLAGS_MIDDLE_FRAG;
4080		}
4081
4082		data += elem_length;
4083		element++;
4084	}
4085
4086	/* map page frags into buffer element(s) */
4087	for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
4088		skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
4089
4090		data = skb_frag_address(frag);
4091		length = skb_frag_size(frag);
4092		while (length > 0) {
4093			elem_length = min_t(unsigned int, length,
4094					    PAGE_SIZE - offset_in_page(data));
4095
4096			buffer->element[element].addr = virt_to_phys(data);
4097			buffer->element[element].length = elem_length;
4098			buffer->element[element].eflags =
4099				SBAL_EFLAGS_MIDDLE_FRAG;
4100
4101			length -= elem_length;
4102			data += elem_length;
4103			element++;
4104		}
4105	}
4106
4107	if (buffer->element[element - 1].eflags)
4108		buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
4109	buf->next_element_to_fill = element;
4110	return element;
4111}
4112
4113static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
4114		       struct sk_buff *skb, unsigned int elements,
4115		       struct qeth_hdr *hdr, unsigned int offset,
4116		       unsigned int hd_len)
4117{
4118	unsigned int bytes = qdisc_pkt_len(skb);
4119	struct qeth_qdio_out_buffer *buffer;
4120	unsigned int next_element;
4121	struct netdev_queue *txq;
4122	bool stopped = false;
4123	bool flush;
4124
4125	buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
4126	txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
4127
4128	/* Just a sanity check, the wake/stop logic should ensure that we always
4129	 * get a free buffer.
4130	 */
4131	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4132		return -EBUSY;
4133
4134	flush = !qeth_iqd_may_bulk(queue, skb, hdr);
4135
4136	if (flush ||
4137	    (buffer->next_element_to_fill + elements > queue->max_elements)) {
4138		if (buffer->next_element_to_fill > 0) {
4139			atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4140			queue->bulk_count++;
4141		}
4142
4143		if (queue->bulk_count >= queue->bulk_max)
4144			flush = true;
4145
4146		if (flush)
4147			qeth_flush_queue(queue);
4148
4149		buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
4150						queue->bulk_count)];
4151
4152		/* Sanity-check again: */
4153		if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
4154			return -EBUSY;
4155	}
4156
4157	if (buffer->next_element_to_fill == 0 &&
4158	    atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4159		/* If a TX completion happens right _here_ and misses to wake
4160		 * the txq, then our re-check below will catch the race.
4161		 */
4162		QETH_TXQ_STAT_INC(queue, stopped);
4163		netif_tx_stop_queue(txq);
4164		stopped = true;
4165	}
4166
4167	next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4168	buffer->bytes += bytes;
4169	buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4170	queue->prev_hdr = hdr;
4171
4172	flush = __netdev_tx_sent_queue(txq, bytes,
4173				       !stopped && netdev_xmit_more());
4174
4175	if (flush || next_element >= queue->max_elements) {
4176		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4177		queue->bulk_count++;
4178
4179		if (queue->bulk_count >= queue->bulk_max)
4180			flush = true;
4181
4182		if (flush)
4183			qeth_flush_queue(queue);
4184	}
4185
4186	if (stopped && !qeth_out_queue_is_full(queue))
4187		netif_tx_start_queue(txq);
4188	return 0;
4189}
4190
4191static int qeth_do_send_packet(struct qeth_card *card,
4192			       struct qeth_qdio_out_q *queue,
4193			       struct sk_buff *skb, struct qeth_hdr *hdr,
4194			       unsigned int offset, unsigned int hd_len,
4195			       unsigned int elements_needed)
4196{
4197	unsigned int start_index = queue->next_buf_to_fill;
4198	struct qeth_qdio_out_buffer *buffer;
4199	unsigned int next_element;
4200	struct netdev_queue *txq;
4201	bool stopped = false;
 
4202	int flush_count = 0;
4203	int do_pack = 0;
 
4204	int rc = 0;
4205
 
 
 
 
4206	buffer = queue->bufs[queue->next_buf_to_fill];
4207
4208	/* Just a sanity check, the wake/stop logic should ensure that we always
4209	 * get a free buffer.
4210	 */
4211	if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
 
4212		return -EBUSY;
 
4213
4214	txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
4215
4216	/* check if we need to switch packing state of this queue */
4217	qeth_switch_to_packing_if_needed(queue);
4218	if (queue->do_pack) {
4219		do_pack = 1;
4220		/* does packet fit in current buffer? */
4221		if (buffer->next_element_to_fill + elements_needed >
4222		    queue->max_elements) {
4223			/* ... no -> set state PRIMED */
4224			atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4225			flush_count++;
4226			queue->next_buf_to_fill =
4227				QDIO_BUFNR(queue->next_buf_to_fill + 1);
4228			buffer = queue->bufs[queue->next_buf_to_fill];
4229
4230			/* We stepped forward, so sanity-check again: */
4231			if (atomic_read(&buffer->state) !=
4232			    QETH_QDIO_BUF_EMPTY) {
4233				qeth_flush_buffers(queue, start_index,
4234							   flush_count);
 
 
4235				rc = -EBUSY;
4236				goto out;
4237			}
4238		}
4239	}
4240
4241	if (buffer->next_element_to_fill == 0 &&
4242	    atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
4243		/* If a TX completion happens right _here_ and misses to wake
4244		 * the txq, then our re-check below will catch the race.
4245		 */
4246		QETH_TXQ_STAT_INC(queue, stopped);
4247		netif_tx_stop_queue(txq);
4248		stopped = true;
4249	}
4250
4251	next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
4252	buffer->bytes += qdisc_pkt_len(skb);
4253	buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
4254
4255	if (queue->do_pack)
4256		QETH_TXQ_STAT_INC(queue, skbs_pack);
4257	if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
4258		flush_count++;
4259		atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
4260		queue->next_buf_to_fill =
4261				QDIO_BUFNR(queue->next_buf_to_fill + 1);
4262	}
4263
4264	if (flush_count)
4265		qeth_flush_buffers(queue, start_index, flush_count);
4266
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4267out:
 
4268	if (do_pack)
4269		QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
4270
4271	if (stopped && !qeth_out_queue_is_full(queue))
4272		netif_tx_start_queue(txq);
4273	return rc;
4274}
 
4275
4276static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
4277			      unsigned int payload_len, struct sk_buff *skb,
4278			      unsigned int proto_len)
4279{
4280	struct qeth_hdr_ext_tso *ext = &hdr->ext;
4281
4282	ext->hdr_tot_len = sizeof(*ext);
4283	ext->imb_hdr_no = 1;
4284	ext->hdr_type = 1;
4285	ext->hdr_version = 1;
4286	ext->hdr_len = 28;
4287	ext->payload_len = payload_len;
4288	ext->mss = skb_shinfo(skb)->gso_size;
4289	ext->dg_hdr_len = proto_len;
4290}
4291
4292int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
4293	      struct qeth_qdio_out_q *queue, __be16 proto,
4294	      void (*fill_header)(struct qeth_qdio_out_q *queue,
4295				  struct qeth_hdr *hdr, struct sk_buff *skb,
4296				  __be16 proto, unsigned int data_len))
4297{
4298	unsigned int proto_len, hw_hdr_len;
4299	unsigned int frame_len = skb->len;
4300	bool is_tso = skb_is_gso(skb);
4301	unsigned int data_offset = 0;
4302	struct qeth_hdr *hdr = NULL;
4303	unsigned int hd_len = 0;
4304	unsigned int elements;
4305	int push_len, rc;
4306
4307	if (is_tso) {
4308		hw_hdr_len = sizeof(struct qeth_hdr_tso);
4309		proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4310	} else {
4311		hw_hdr_len = sizeof(struct qeth_hdr);
4312		proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
4313	}
4314
4315	rc = skb_cow_head(skb, hw_hdr_len);
4316	if (rc)
4317		return rc;
4318
4319	push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
4320				      &elements);
4321	if (push_len < 0)
4322		return push_len;
4323	if (is_tso || !push_len) {
4324		/* HW header needs its own buffer element. */
4325		hd_len = hw_hdr_len + proto_len;
4326		data_offset = push_len + proto_len;
4327	}
4328	memset(hdr, 0, hw_hdr_len);
4329	fill_header(queue, hdr, skb, proto, frame_len);
4330	if (is_tso)
4331		qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
4332				  frame_len - proto_len, skb, proto_len);
4333
4334	if (IS_IQD(card)) {
4335		rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
4336				 hd_len);
4337	} else {
4338		/* TODO: drop skb_orphan() once TX completion is fast enough */
4339		skb_orphan(skb);
4340		spin_lock(&queue->lock);
4341		rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
4342					 hd_len, elements);
4343		spin_unlock(&queue->lock);
4344	}
4345
4346	if (rc && !push_len)
4347		kmem_cache_free(qeth_core_header_cache, hdr);
4348
4349	return rc;
4350}
4351EXPORT_SYMBOL_GPL(qeth_xmit);
4352
4353static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
4354		struct qeth_reply *reply, unsigned long data)
4355{
4356	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4357	struct qeth_ipacmd_setadpparms *setparms;
4358
4359	QETH_CARD_TEXT(card, 4, "prmadpcb");
4360
4361	setparms = &(cmd->data.setadapterparms);
4362	if (qeth_setadpparms_inspect_rc(cmd)) {
4363		QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
4364		setparms->data.mode = SET_PROMISC_MODE_OFF;
4365	}
4366	card->info.promisc_mode = setparms->data.mode;
4367	return (cmd->hdr.return_code) ? -EIO : 0;
4368}
4369
4370void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable)
4371{
4372	enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON :
4373						    SET_PROMISC_MODE_OFF;
4374	struct qeth_cmd_buffer *iob;
4375	struct qeth_ipa_cmd *cmd;
4376
4377	QETH_CARD_TEXT(card, 4, "setprom");
4378	QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
4379
4380	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4381				   SETADP_DATA_SIZEOF(mode));
4382	if (!iob)
4383		return;
4384	cmd = __ipa_cmd(iob);
4385	cmd->data.setadapterparms.data.mode = mode;
4386	qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
4387}
4388EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
4389
4390static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
4391		struct qeth_reply *reply, unsigned long data)
4392{
4393	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4394	struct qeth_ipacmd_setadpparms *adp_cmd;
4395
4396	QETH_CARD_TEXT(card, 4, "chgmaccb");
4397	if (qeth_setadpparms_inspect_rc(cmd))
4398		return -EIO;
4399
4400	adp_cmd = &cmd->data.setadapterparms;
4401	if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr))
4402		return -EADDRNOTAVAIL;
4403
4404	if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
4405	    !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
4406		return -EADDRNOTAVAIL;
4407
4408	eth_hw_addr_set(card->dev, adp_cmd->data.change_addr.addr);
4409	return 0;
4410}
4411
4412int qeth_setadpparms_change_macaddr(struct qeth_card *card)
4413{
4414	int rc;
4415	struct qeth_cmd_buffer *iob;
4416	struct qeth_ipa_cmd *cmd;
4417
4418	QETH_CARD_TEXT(card, 4, "chgmac");
4419
4420	iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4421				   SETADP_DATA_SIZEOF(change_addr));
4422	if (!iob)
4423		return -ENOMEM;
4424	cmd = __ipa_cmd(iob);
4425	cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
4426	cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
4427	ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
4428			card->dev->dev_addr);
4429	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
4430			       NULL);
4431	return rc;
4432}
4433EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
4434
4435static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4436		struct qeth_reply *reply, unsigned long data)
4437{
4438	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4439	struct qeth_set_access_ctrl *access_ctrl_req;
4440
4441	QETH_CARD_TEXT(card, 4, "setaccb");
4442
4443	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4444	QETH_CARD_TEXT_(card, 2, "rc=%d",
4445			cmd->data.setadapterparms.hdr.return_code);
4446	if (cmd->data.setadapterparms.hdr.return_code !=
4447						SET_ACCESS_CTRL_RC_SUCCESS)
4448		QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
4449				 access_ctrl_req->subcmd_code, CARD_DEVID(card),
4450				 cmd->data.setadapterparms.hdr.return_code);
4451	switch (qeth_setadpparms_inspect_rc(cmd)) {
4452	case SET_ACCESS_CTRL_RC_SUCCESS:
4453		if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE)
4454			dev_info(&card->gdev->dev,
4455			    "QDIO data connection isolation is deactivated\n");
4456		else
4457			dev_info(&card->gdev->dev,
4458			    "QDIO data connection isolation is activated\n");
4459		return 0;
4460	case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
4461		QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
4462				 CARD_DEVID(card));
4463		return 0;
4464	case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
4465		QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
4466				 CARD_DEVID(card));
4467		return 0;
4468	case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
4469		dev_err(&card->gdev->dev, "Adapter does not "
4470			"support QDIO data connection isolation\n");
4471		return -EOPNOTSUPP;
4472	case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
4473		dev_err(&card->gdev->dev,
4474			"Adapter is dedicated. "
4475			"QDIO data connection isolation not supported\n");
4476		return -EOPNOTSUPP;
4477	case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
4478		dev_err(&card->gdev->dev,
4479			"TSO does not permit QDIO data connection isolation\n");
4480		return -EPERM;
4481	case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
4482		dev_err(&card->gdev->dev, "The adjacent switch port does not "
4483			"support reflective relay mode\n");
4484		return -EOPNOTSUPP;
4485	case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
4486		dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
4487					"enabled at the adjacent switch port");
4488		return -EREMOTEIO;
4489	case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
4490		dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
4491					"at the adjacent switch failed\n");
4492		/* benign error while disabling ISOLATION_MODE_FWD */
4493		return 0;
4494	default:
4495		return -EIO;
4496	}
4497}
4498
4499int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
4500				     enum qeth_ipa_isolation_modes mode)
4501{
4502	int rc;
4503	struct qeth_cmd_buffer *iob;
4504	struct qeth_ipa_cmd *cmd;
4505	struct qeth_set_access_ctrl *access_ctrl_req;
4506
4507	QETH_CARD_TEXT(card, 4, "setacctl");
4508
4509	if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
4510		dev_err(&card->gdev->dev,
4511			"Adapter does not support QDIO data connection isolation\n");
4512		return -EOPNOTSUPP;
4513	}
4514
4515	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
4516				   SETADP_DATA_SIZEOF(set_access_ctrl));
4517	if (!iob)
4518		return -ENOMEM;
4519	cmd = __ipa_cmd(iob);
4520	access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4521	access_ctrl_req->subcmd_code = mode;
4522
4523	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
4524			       NULL);
4525	if (rc) {
4526		QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
4527		QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
4528				 rc, CARD_DEVID(card));
4529	}
4530
4531	return rc;
4532}
4533
4534void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue)
4535{
4536	struct qeth_card *card;
4537
4538	card = dev->ml_priv;
4539	QETH_CARD_TEXT(card, 4, "txtimeo");
4540	qeth_schedule_recovery(card);
4541}
4542EXPORT_SYMBOL_GPL(qeth_tx_timeout);
4543
4544static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4545{
4546	struct qeth_card *card = dev->ml_priv;
4547	int rc = 0;
4548
4549	switch (regnum) {
4550	case MII_BMCR: /* Basic mode control register */
4551		rc = BMCR_FULLDPLX;
4552		if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
 
4553		    (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
4554		    (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
4555			rc |= BMCR_SPEED100;
4556		break;
4557	case MII_BMSR: /* Basic mode status register */
4558		rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
4559		     BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
4560		     BMSR_100BASE4;
4561		break;
4562	case MII_PHYSID1: /* PHYS ID 1 */
4563		rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
4564		     dev->dev_addr[2];
4565		rc = (rc >> 5) & 0xFFFF;
4566		break;
4567	case MII_PHYSID2: /* PHYS ID 2 */
4568		rc = (dev->dev_addr[2] << 10) & 0xFFFF;
4569		break;
4570	case MII_ADVERTISE: /* Advertisement control reg */
4571		rc = ADVERTISE_ALL;
4572		break;
4573	case MII_LPA: /* Link partner ability reg */
4574		rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
4575		     LPA_100BASE4 | LPA_LPACK;
4576		break;
4577	case MII_EXPANSION: /* Expansion register */
4578		break;
4579	case MII_DCOUNTER: /* disconnect counter */
4580		break;
4581	case MII_FCSCOUNTER: /* false carrier counter */
4582		break;
4583	case MII_NWAYTEST: /* N-way auto-neg test register */
4584		break;
4585	case MII_RERRCOUNTER: /* rx error counter */
4586		rc = card->stats.rx_length_errors +
4587		     card->stats.rx_frame_errors +
4588		     card->stats.rx_fifo_errors;
4589		break;
4590	case MII_SREVISION: /* silicon revision */
4591		break;
4592	case MII_RESV1: /* reserved 1 */
4593		break;
4594	case MII_LBRERROR: /* loopback, rx, bypass error */
4595		break;
4596	case MII_PHYADDR: /* physical address */
4597		break;
4598	case MII_RESV2: /* reserved 2 */
4599		break;
4600	case MII_TPISTATUS: /* TPI status for 10mbps */
4601		break;
4602	case MII_NCONFIG: /* network interface config */
4603		break;
4604	default:
4605		break;
4606	}
4607	return rc;
4608}
4609
4610static int qeth_snmp_command_cb(struct qeth_card *card,
4611				struct qeth_reply *reply, unsigned long data)
4612{
4613	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4614	struct qeth_arp_query_info *qinfo = reply->param;
4615	struct qeth_ipacmd_setadpparms *adp_cmd;
4616	unsigned int data_len;
4617	void *snmp_data;
4618
4619	QETH_CARD_TEXT(card, 3, "snpcmdcb");
4620
4621	if (cmd->hdr.return_code) {
4622		QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
4623		return -EIO;
4624	}
4625	if (cmd->data.setadapterparms.hdr.return_code) {
4626		cmd->hdr.return_code =
4627			cmd->data.setadapterparms.hdr.return_code;
4628		QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
4629		return -EIO;
4630	}
4631
4632	adp_cmd = &cmd->data.setadapterparms;
4633	data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr);
4634	if (adp_cmd->hdr.seq_no == 1) {
4635		snmp_data = &adp_cmd->data.snmp;
4636	} else {
4637		snmp_data = &adp_cmd->data.snmp.request;
4638		data_len -= offsetof(struct qeth_snmp_cmd, request);
4639	}
4640
4641	/* check if there is enough room in userspace */
4642	if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4643		QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC);
4644		return -ENOSPC;
4645	}
4646	QETH_CARD_TEXT_(card, 4, "snore%i",
4647			cmd->data.setadapterparms.hdr.used_total);
4648	QETH_CARD_TEXT_(card, 4, "sseqn%i",
4649			cmd->data.setadapterparms.hdr.seq_no);
4650	/*copy entries to user buffer*/
4651	memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
4652	qinfo->udata_offset += data_len;
4653
4654	if (cmd->data.setadapterparms.hdr.seq_no <
4655	    cmd->data.setadapterparms.hdr.used_total)
4656		return 1;
4657	return 0;
4658}
4659
4660static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4661{
4662	struct qeth_snmp_ureq __user *ureq;
4663	struct qeth_cmd_buffer *iob;
4664	unsigned int req_len;
4665	struct qeth_arp_query_info qinfo = {0, };
4666	int rc = 0;
4667
4668	QETH_CARD_TEXT(card, 3, "snmpcmd");
4669
4670	if (IS_VM_NIC(card))
4671		return -EOPNOTSUPP;
4672
4673	if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
4674	    IS_LAYER3(card))
4675		return -EOPNOTSUPP;
4676
4677	ureq = (struct qeth_snmp_ureq __user *) udata;
4678	if (get_user(qinfo.udata_len, &ureq->hdr.data_len) ||
4679	    get_user(req_len, &ureq->hdr.req_len))
4680		return -EFAULT;
4681
4682	/* Sanitize user input, to avoid overflows in iob size calculation: */
4683	if (req_len > QETH_BUFSIZE)
4684		return -EINVAL;
4685
4686	iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
4687	if (!iob)
4688		return -ENOMEM;
4689
4690	if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp,
4691			   &ureq->cmd, req_len)) {
4692		qeth_put_cmd(iob);
4693		return -EFAULT;
4694	}
4695
4696	qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
4697	if (!qinfo.udata) {
4698		qeth_put_cmd(iob);
4699		return -ENOMEM;
4700	}
4701	qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
4702
4703	rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
4704	if (rc)
4705		QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
4706				 CARD_DEVID(card), rc);
4707	else {
4708		if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
4709			rc = -EFAULT;
4710	}
4711
4712	kfree(qinfo.udata);
4713	return rc;
4714}
4715
4716static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
4717					 struct qeth_reply *reply,
4718					 unsigned long data)
4719{
4720	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4721	struct qeth_qoat_priv *priv = reply->param;
4722	int resdatalen;
4723
4724	QETH_CARD_TEXT(card, 3, "qoatcb");
4725	if (qeth_setadpparms_inspect_rc(cmd))
4726		return -EIO;
4727
4728	resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
4729
4730	if (resdatalen > (priv->buffer_len - priv->response_len))
4731		return -ENOSPC;
4732
4733	memcpy(priv->buffer + priv->response_len,
4734	       &cmd->data.setadapterparms.hdr, resdatalen);
4735	priv->response_len += resdatalen;
4736
4737	if (cmd->data.setadapterparms.hdr.seq_no <
4738	    cmd->data.setadapterparms.hdr.used_total)
4739		return 1;
4740	return 0;
4741}
4742
4743static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4744{
4745	int rc = 0;
4746	struct qeth_cmd_buffer *iob;
4747	struct qeth_ipa_cmd *cmd;
4748	struct qeth_query_oat *oat_req;
4749	struct qeth_query_oat_data oat_data;
4750	struct qeth_qoat_priv priv;
4751	void __user *tmp;
4752
4753	QETH_CARD_TEXT(card, 3, "qoatcmd");
4754
4755	if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT))
4756		return -EOPNOTSUPP;
4757
4758	if (copy_from_user(&oat_data, udata, sizeof(oat_data)))
4759		return -EFAULT;
4760
4761	priv.buffer_len = oat_data.buffer_len;
4762	priv.response_len = 0;
4763	priv.buffer = vzalloc(oat_data.buffer_len);
4764	if (!priv.buffer)
4765		return -ENOMEM;
4766
4767	iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4768				   SETADP_DATA_SIZEOF(query_oat));
4769	if (!iob) {
4770		rc = -ENOMEM;
4771		goto out_free;
4772	}
4773	cmd = __ipa_cmd(iob);
4774	oat_req = &cmd->data.setadapterparms.data.query_oat;
4775	oat_req->subcmd_code = oat_data.command;
4776
4777	rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv);
4778	if (!rc) {
4779		tmp = is_compat_task() ? compat_ptr(oat_data.ptr) :
4780					 u64_to_user_ptr(oat_data.ptr);
4781		oat_data.response_len = priv.response_len;
4782
4783		if (copy_to_user(tmp, priv.buffer, priv.response_len) ||
4784		    copy_to_user(udata, &oat_data, sizeof(oat_data)))
4785			rc = -EFAULT;
4786	}
4787
4788out_free:
4789	vfree(priv.buffer);
4790	return rc;
4791}
4792
4793static int qeth_init_link_info_oat_cb(struct qeth_card *card,
4794				      struct qeth_reply *reply_priv,
4795				      unsigned long data)
4796{
 
4797	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4798	struct qeth_link_info *link_info = reply_priv->param;
4799	struct qeth_query_oat_physical_if *phys_if;
4800	struct qeth_query_oat_reply *reply;
4801
4802	QETH_CARD_TEXT(card, 2, "qoatincb");
4803	if (qeth_setadpparms_inspect_rc(cmd))
4804		return -EIO;
4805
4806	/* Multi-part reply is unexpected, don't bother: */
4807	if (cmd->data.setadapterparms.hdr.used_total > 1)
4808		return -EINVAL;
4809
4810	/* Expect the reply to start with phys_if data: */
4811	reply = &cmd->data.setadapterparms.data.query_oat.reply[0];
4812	if (reply->type != QETH_QOAT_REPLY_TYPE_PHYS_IF ||
4813	    reply->length < sizeof(*reply))
4814		return -EINVAL;
4815
4816	phys_if = &reply->phys_if;
4817
4818	switch (phys_if->speed_duplex) {
4819	case QETH_QOAT_PHYS_SPEED_10M_HALF:
4820		link_info->speed = SPEED_10;
4821		link_info->duplex = DUPLEX_HALF;
4822		break;
4823	case QETH_QOAT_PHYS_SPEED_10M_FULL:
4824		link_info->speed = SPEED_10;
4825		link_info->duplex = DUPLEX_FULL;
4826		break;
4827	case QETH_QOAT_PHYS_SPEED_100M_HALF:
4828		link_info->speed = SPEED_100;
4829		link_info->duplex = DUPLEX_HALF;
4830		break;
4831	case QETH_QOAT_PHYS_SPEED_100M_FULL:
4832		link_info->speed = SPEED_100;
4833		link_info->duplex = DUPLEX_FULL;
4834		break;
4835	case QETH_QOAT_PHYS_SPEED_1000M_HALF:
4836		link_info->speed = SPEED_1000;
4837		link_info->duplex = DUPLEX_HALF;
4838		break;
4839	case QETH_QOAT_PHYS_SPEED_1000M_FULL:
4840		link_info->speed = SPEED_1000;
4841		link_info->duplex = DUPLEX_FULL;
4842		break;
4843	case QETH_QOAT_PHYS_SPEED_10G_FULL:
4844		link_info->speed = SPEED_10000;
4845		link_info->duplex = DUPLEX_FULL;
4846		break;
4847	case QETH_QOAT_PHYS_SPEED_25G_FULL:
4848		link_info->speed = SPEED_25000;
4849		link_info->duplex = DUPLEX_FULL;
4850		break;
4851	case QETH_QOAT_PHYS_SPEED_UNKNOWN:
4852	default:
4853		link_info->speed = SPEED_UNKNOWN;
4854		link_info->duplex = DUPLEX_UNKNOWN;
4855		break;
4856	}
4857
4858	switch (phys_if->media_type) {
4859	case QETH_QOAT_PHYS_MEDIA_COPPER:
4860		link_info->port = PORT_TP;
4861		link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
4862		break;
4863	case QETH_QOAT_PHYS_MEDIA_FIBRE_SHORT:
4864		link_info->port = PORT_FIBRE;
4865		link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT;
4866		break;
4867	case QETH_QOAT_PHYS_MEDIA_FIBRE_LONG:
4868		link_info->port = PORT_FIBRE;
4869		link_info->link_mode = QETH_LINK_MODE_FIBRE_LONG;
4870		break;
4871	default:
4872		link_info->port = PORT_OTHER;
4873		link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
4874		break;
4875	}
4876
4877	return 0;
4878}
4879
4880static void qeth_init_link_info(struct qeth_card *card)
 
4881{
4882	qeth_default_link_info(card);
4883
4884	/* Get more accurate data via QUERY OAT: */
4885	if (qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
4886		struct qeth_link_info link_info;
4887		struct qeth_cmd_buffer *iob;
4888
4889		iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4890					   SETADP_DATA_SIZEOF(query_oat));
4891		if (iob) {
4892			struct qeth_ipa_cmd *cmd = __ipa_cmd(iob);
4893			struct qeth_query_oat *oat_req;
4894
4895			oat_req = &cmd->data.setadapterparms.data.query_oat;
4896			oat_req->subcmd_code = QETH_QOAT_SCOPE_INTERFACE;
4897
4898			if (!qeth_send_ipa_cmd(card, iob,
4899					       qeth_init_link_info_oat_cb,
4900					       &link_info)) {
4901				if (link_info.speed != SPEED_UNKNOWN)
4902					card->info.link_info.speed = link_info.speed;
4903				if (link_info.duplex != DUPLEX_UNKNOWN)
4904					card->info.link_info.duplex = link_info.duplex;
4905				if (link_info.port != PORT_OTHER)
4906					card->info.link_info.port = link_info.port;
4907				if (link_info.link_mode != QETH_LINK_MODE_UNKNOWN)
4908					card->info.link_info.link_mode = link_info.link_mode;
4909			}
4910		}
4911	}
4912}
4913
4914/**
4915 * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
4916 * @card: pointer to a qeth_card
4917 *
4918 * Returns
4919 *	0, if a MAC address has been set for the card's netdevice
4920 *	a return code, for various error conditions
4921 */
4922int qeth_vm_request_mac(struct qeth_card *card)
4923{
4924	struct diag26c_mac_resp *response;
4925	struct diag26c_mac_req *request;
 
4926	int rc;
4927
4928	QETH_CARD_TEXT(card, 2, "vmreqmac");
4929
4930	request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
4931	response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
4932	if (!request || !response) {
4933		rc = -ENOMEM;
4934		goto out;
4935	}
4936
 
4937	request->resp_buf_len = sizeof(*response);
4938	request->resp_version = DIAG26C_VERSION2;
4939	request->op_code = DIAG26C_GET_MAC;
4940	request->devno = card->info.ddev_devno;
4941
4942	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4943	rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
4944	QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4945	if (rc)
4946		goto out;
4947	QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
4948
4949	if (request->resp_buf_len < sizeof(*response) ||
4950	    response->version != request->resp_version) {
4951		rc = -EIO;
4952		QETH_CARD_TEXT(card, 2, "badresp");
4953		QETH_CARD_HEX(card, 2, &request->resp_buf_len,
4954			      sizeof(request->resp_buf_len));
4955	} else if (!is_valid_ether_addr(response->mac)) {
4956		rc = -EINVAL;
4957		QETH_CARD_TEXT(card, 2, "badmac");
4958		QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
4959	} else {
4960		eth_hw_addr_set(card->dev, response->mac);
4961	}
4962
4963out:
4964	kfree(response);
4965	kfree(request);
4966	return rc;
4967}
4968EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
4969
4970static void qeth_determine_capabilities(struct qeth_card *card)
4971{
4972	struct qeth_channel *channel = &card->data;
4973	struct ccw_device *ddev = channel->ccwdev;
4974	int rc;
4975	int ddev_offline = 0;
4976
4977	QETH_CARD_TEXT(card, 2, "detcapab");
4978	if (!ddev->online) {
4979		ddev_offline = 1;
4980		rc = qeth_start_channel(channel);
4981		if (rc) {
4982			QETH_CARD_TEXT_(card, 2, "3err%d", rc);
4983			goto out;
4984		}
4985	}
4986
4987	rc = qeth_read_conf_data(card);
4988	if (rc) {
4989		QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
4990				 CARD_DEVID(card), rc);
4991		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
4992		goto out_offline;
4993	}
4994
4995	rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
4996	if (rc)
4997		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
4998
4999	QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt);
5000	QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1);
5001	QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2);
5002	QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3);
5003	QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt);
5004	if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
5005	    ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
5006	    ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
5007		dev_info(&card->gdev->dev,
5008			"Completion Queueing supported\n");
5009	} else {
5010		card->options.cq = QETH_CQ_NOTAVAILABLE;
5011	}
5012
 
5013out_offline:
5014	if (ddev_offline == 1)
5015		qeth_stop_channel(channel);
5016out:
5017	return;
5018}
5019
5020static void qeth_read_ccw_conf_data(struct qeth_card *card)
5021{
5022	struct qeth_card_info *info = &card->info;
5023	struct ccw_device *cdev = CARD_DDEV(card);
5024	struct ccw_dev_id dev_id;
5025
5026	QETH_CARD_TEXT(card, 2, "ccwconfd");
5027	ccw_device_get_id(cdev, &dev_id);
5028
5029	info->ddev_devno = dev_id.devno;
5030	info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) &&
5031			  !ccw_device_get_iid(cdev, &info->iid) &&
5032			  !ccw_device_get_chid(cdev, 0, &info->chid);
5033	info->ssid = dev_id.ssid;
5034
5035	dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n",
5036		 info->chid, info->chpid);
5037
5038	QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno);
5039	QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid);
5040	QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid);
5041	QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid);
5042	QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid);
5043	QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid);
5044	QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid);
5045}
5046
5047static int qeth_qdio_establish(struct qeth_card *card)
5048{
5049	struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES];
5050	struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES];
5051	struct qeth_qib_parms *qib_parms = NULL;
5052	struct qdio_initialize init_data;
5053	unsigned int no_input_qs = 1;
5054	unsigned int i;
5055	int rc = 0;
5056
5057	QETH_CARD_TEXT(card, 2, "qdioest");
5058
5059	if (!IS_IQD(card) && !IS_VM_NIC(card)) {
5060		qib_parms = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL);
5061		if (!qib_parms)
5062			return -ENOMEM;
 
5063
5064		qeth_fill_qib_parms(card, qib_parms);
5065	}
5066
5067	in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs;
5068	if (card->options.cq == QETH_CQ_ENABLED) {
5069		in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs;
5070		no_input_qs++;
5071	}
5072
5073	for (i = 0; i < card->qdio.no_out_queues; i++)
5074		out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs;
5075
5076	memset(&init_data, 0, sizeof(struct qdio_initialize));
5077	init_data.q_format		 = IS_IQD(card) ? QDIO_IQDIO_QFMT :
5078							  QDIO_QETH_QFMT;
5079	init_data.qib_param_field_format = 0;
5080	init_data.qib_param_field	 = (void *)qib_parms;
5081	init_data.no_input_qs		 = no_input_qs;
5082	init_data.no_output_qs           = card->qdio.no_out_queues;
5083	init_data.input_handler		 = qeth_qdio_input_handler;
5084	init_data.output_handler	 = qeth_qdio_output_handler;
5085	init_data.irq_poll		 = qeth_qdio_poll;
5086	init_data.int_parm               = (unsigned long) card;
5087	init_data.input_sbal_addr_array  = in_sbal_ptrs;
5088	init_data.output_sbal_addr_array = out_sbal_ptrs;
 
 
5089
5090	if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
5091		QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
5092		rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs,
5093				   init_data.no_output_qs);
5094		if (rc) {
5095			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
5096			goto out;
5097		}
5098		rc = qdio_establish(CARD_DDEV(card), &init_data);
5099		if (rc) {
5100			atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
5101			qdio_free(CARD_DDEV(card));
5102		}
5103	}
5104
5105	switch (card->options.cq) {
5106	case QETH_CQ_ENABLED:
5107		dev_info(&card->gdev->dev, "Completion Queue support enabled");
5108		break;
5109	case QETH_CQ_DISABLED:
5110		dev_info(&card->gdev->dev, "Completion Queue support disabled");
5111		break;
5112	default:
5113		break;
5114	}
5115
5116out:
5117	kfree(qib_parms);
 
5118	return rc;
5119}
5120
5121static void qeth_core_free_card(struct qeth_card *card)
5122{
5123	QETH_CARD_TEXT(card, 2, "freecrd");
5124
5125	unregister_service_level(&card->qeth_service_level);
5126	debugfs_remove_recursive(card->debugfs);
5127	qeth_put_cmd(card->read_cmd);
5128	destroy_workqueue(card->event_wq);
5129	dev_set_drvdata(&card->gdev->dev, NULL);
5130	kfree(card);
5131}
5132
5133static void qeth_trace_features(struct qeth_card *card)
5134{
5135	QETH_CARD_TEXT(card, 2, "features");
5136	QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
5137	QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
5138	QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
5139	QETH_CARD_HEX(card, 2, &card->info.diagass_support,
5140		      sizeof(card->info.diagass_support));
5141}
 
5142
5143static struct ccw_device_id qeth_ids[] = {
5144	{CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
5145					.driver_info = QETH_CARD_TYPE_OSD},
5146	{CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
5147					.driver_info = QETH_CARD_TYPE_IQD},
 
 
 
 
5148	{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
5149					.driver_info = QETH_CARD_TYPE_OSM},
5150#ifdef CONFIG_QETH_OSX
5151	{CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
5152					.driver_info = QETH_CARD_TYPE_OSX},
5153#endif
5154	{},
5155};
5156MODULE_DEVICE_TABLE(ccw, qeth_ids);
5157
5158static struct ccw_driver qeth_ccw_driver = {
5159	.driver = {
5160		.owner = THIS_MODULE,
5161		.name = "qeth",
5162	},
5163	.ids = qeth_ids,
5164	.probe = ccwgroup_probe_ccwdev,
5165	.remove = ccwgroup_remove_ccwdev,
5166};
5167
5168static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
5169{
5170	int retries = 3;
5171	int rc;
5172
5173	QETH_CARD_TEXT(card, 2, "hrdsetup");
5174	atomic_set(&card->force_alloc_skb, 0);
5175	rc = qeth_update_from_chp_desc(card);
5176	if (rc)
5177		return rc;
5178retry:
5179	if (retries < 3)
5180		QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
5181				 CARD_DEVID(card));
5182	rc = qeth_qdio_clear_card(card, !IS_IQD(card));
5183	qeth_stop_channel(&card->data);
5184	qeth_stop_channel(&card->write);
5185	qeth_stop_channel(&card->read);
5186	qdio_free(CARD_DDEV(card));
5187
5188	rc = qeth_start_channel(&card->read);
5189	if (rc)
5190		goto retriable;
5191	rc = qeth_start_channel(&card->write);
5192	if (rc)
5193		goto retriable;
5194	rc = qeth_start_channel(&card->data);
5195	if (rc)
5196		goto retriable;
5197retriable:
5198	if (rc == -ERESTARTSYS) {
5199		QETH_CARD_TEXT(card, 2, "break1");
5200		return rc;
5201	} else if (rc) {
5202		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5203		if (--retries < 0)
5204			goto out;
5205		else
5206			goto retry;
5207	}
5208
5209	qeth_determine_capabilities(card);
5210	qeth_read_ccw_conf_data(card);
5211	qeth_idx_init(card);
5212
5213	rc = qeth_idx_activate_read_channel(card);
5214	if (rc == -EINTR) {
5215		QETH_CARD_TEXT(card, 2, "break2");
5216		return rc;
5217	} else if (rc) {
5218		QETH_CARD_TEXT_(card, 2, "3err%d", rc);
5219		if (--retries < 0)
5220			goto out;
5221		else
5222			goto retry;
5223	}
5224
5225	rc = qeth_idx_activate_write_channel(card);
5226	if (rc == -EINTR) {
5227		QETH_CARD_TEXT(card, 2, "break3");
5228		return rc;
5229	} else if (rc) {
5230		QETH_CARD_TEXT_(card, 2, "4err%d", rc);
5231		if (--retries < 0)
5232			goto out;
5233		else
5234			goto retry;
5235	}
5236	card->read_or_write_problem = 0;
5237	rc = qeth_mpc_initialize(card);
5238	if (rc) {
5239		QETH_CARD_TEXT_(card, 2, "5err%d", rc);
5240		goto out;
5241	}
5242
5243	rc = qeth_send_startlan(card);
5244	if (rc) {
5245		QETH_CARD_TEXT_(card, 2, "6err%d", rc);
5246		if (rc == -ENETDOWN) {
5247			dev_warn(&card->gdev->dev, "The LAN is offline\n");
5248			*carrier_ok = false;
5249		} else {
5250			goto out;
5251		}
5252	} else {
5253		*carrier_ok = true;
5254	}
5255
5256	card->options.ipa4.supported = 0;
5257	card->options.ipa6.supported = 0;
5258	card->options.adp.supported = 0;
5259	card->options.sbp.supported_funcs = 0;
5260	card->info.diagass_support = 0;
5261	rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
5262	if (rc == -ENOMEM)
5263		goto out;
5264	if (qeth_is_supported(card, IPA_IPV6)) {
5265		rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
5266		if (rc == -ENOMEM)
5267			goto out;
5268	}
5269	if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
5270		rc = qeth_query_setadapterparms(card);
5271		if (rc < 0) {
5272			QETH_CARD_TEXT_(card, 2, "7err%d", rc);
5273			goto out;
5274		}
5275	}
5276	if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
5277		rc = qeth_query_setdiagass(card);
5278		if (rc)
5279			QETH_CARD_TEXT_(card, 2, "8err%d", rc);
5280	}
5281
5282	qeth_trace_features(card);
5283
5284	if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
5285	    (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
5286		card->info.hwtrap = 0;
5287
5288	if (card->options.isolation != ISOLATION_MODE_NONE) {
5289		rc = qeth_setadpparms_set_access_ctrl(card,
5290						      card->options.isolation);
5291		if (rc)
5292			goto out;
5293	}
5294
5295	qeth_init_link_info(card);
5296
5297	rc = qeth_init_qdio_queues(card);
5298	if (rc) {
5299		QETH_CARD_TEXT_(card, 2, "9err%d", rc);
5300		goto out;
5301	}
5302
5303	return 0;
5304out:
5305	dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
5306		"an error on the device\n");
5307	QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
5308			 CARD_DEVID(card), rc);
5309	return rc;
5310}
 
5311
5312static int qeth_set_online(struct qeth_card *card,
5313			   const struct qeth_discipline *disc)
5314{
5315	bool carrier_ok;
5316	int rc;
5317
 
5318	mutex_lock(&card->conf_mutex);
5319	QETH_CARD_TEXT(card, 2, "setonlin");
5320
5321	rc = qeth_hardsetup_card(card, &carrier_ok);
5322	if (rc) {
5323		QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
5324		rc = -ENODEV;
5325		goto err_hardsetup;
5326	}
5327
5328	qeth_print_status_message(card);
5329
5330	if (card->dev->reg_state != NETREG_REGISTERED)
5331		/* no need for locking / error handling at this early stage: */
5332		qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));
5333
5334	rc = disc->set_online(card, carrier_ok);
5335	if (rc)
5336		goto err_online;
5337
5338	/* let user_space know that device is online */
5339	kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
5340
5341	mutex_unlock(&card->conf_mutex);
5342	return 0;
5343
5344err_online:
5345err_hardsetup:
5346	qeth_qdio_clear_card(card, 0);
5347	qeth_clear_working_pool_list(card);
5348	qeth_flush_local_addrs(card);
5349
5350	qeth_stop_channel(&card->data);
5351	qeth_stop_channel(&card->write);
5352	qeth_stop_channel(&card->read);
5353	qdio_free(CARD_DDEV(card));
5354
5355	mutex_unlock(&card->conf_mutex);
5356	return rc;
5357}
5358
5359int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
5360		     bool resetting)
5361{
5362	int rc, rc2, rc3;
5363
 
5364	mutex_lock(&card->conf_mutex);
5365	QETH_CARD_TEXT(card, 3, "setoffl");
5366
5367	if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) {
5368		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
5369		card->info.hwtrap = 1;
5370	}
5371
5372	/* cancel any stalled cmd that might block the rtnl: */
5373	qeth_clear_ipacmd_list(card);
5374
5375	rtnl_lock();
 
 
5376	netif_device_detach(card->dev);
5377	netif_carrier_off(card->dev);
5378	rtnl_unlock();
5379
5380	cancel_work_sync(&card->rx_mode_work);
5381
5382	disc->set_offline(card);
5383
5384	qeth_qdio_clear_card(card, 0);
5385	qeth_drain_output_queues(card);
5386	qeth_clear_working_pool_list(card);
5387	qeth_flush_local_addrs(card);
5388	card->info.promisc_mode = 0;
5389	qeth_default_link_info(card);
5390
5391	rc  = qeth_stop_channel(&card->data);
5392	rc2 = qeth_stop_channel(&card->write);
5393	rc3 = qeth_stop_channel(&card->read);
5394	if (!rc)
5395		rc = (rc2) ? rc2 : rc3;
5396	if (rc)
5397		QETH_CARD_TEXT_(card, 2, "1err%d", rc);
5398	qdio_free(CARD_DDEV(card));
5399
5400	/* let user_space know that device is offline */
5401	kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
5402
5403	mutex_unlock(&card->conf_mutex);
 
5404	return 0;
5405}
5406EXPORT_SYMBOL_GPL(qeth_set_offline);
5407
5408static int qeth_do_reset(void *data)
5409{
5410	const struct qeth_discipline *disc;
5411	struct qeth_card *card = data;
5412	int rc;
5413
5414	/* Lock-free, other users will block until we are done. */
5415	disc = card->discipline;
5416
5417	QETH_CARD_TEXT(card, 2, "recover1");
5418	if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
5419		return 0;
5420	QETH_CARD_TEXT(card, 2, "recover2");
5421	dev_warn(&card->gdev->dev,
5422		 "A recovery process has been started for the device\n");
5423
5424	qeth_set_offline(card, disc, true);
5425	rc = qeth_set_online(card, disc);
5426	if (!rc) {
5427		dev_info(&card->gdev->dev,
5428			 "Device successfully recovered!\n");
5429	} else {
5430		qeth_set_offline(card, disc, true);
5431		ccwgroup_set_offline(card->gdev, false);
5432		dev_warn(&card->gdev->dev,
5433			 "The qeth device driver failed to recover an error on the device\n");
5434	}
5435	qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
5436	qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
5437	return 0;
5438}
5439
5440#if IS_ENABLED(CONFIG_QETH_L3)
5441static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb,
5442				struct qeth_hdr *hdr)
5443{
5444	struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data;
5445	struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
5446	struct net_device *dev = skb->dev;
5447
5448	if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) {
5449		dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr,
5450				"FAKELL", skb->len);
5451		return;
5452	}
5453
5454	if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) {
5455		u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 :
5456							     ETH_P_IP;
5457		unsigned char tg_addr[ETH_ALEN];
5458
5459		skb_reset_network_header(skb);
5460		switch (l3_hdr->flags & QETH_HDR_CAST_MASK) {
5461		case QETH_CAST_MULTICAST:
5462			if (prot == ETH_P_IP)
5463				ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr);
5464			else
5465				ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr);
5466			QETH_CARD_STAT_INC(card, rx_multicast);
5467			break;
5468		case QETH_CAST_BROADCAST:
5469			ether_addr_copy(tg_addr, dev->broadcast);
5470			QETH_CARD_STAT_INC(card, rx_multicast);
5471			break;
5472		default:
5473			if (card->options.sniffer)
5474				skb->pkt_type = PACKET_OTHERHOST;
5475			ether_addr_copy(tg_addr, dev->dev_addr);
5476		}
5477
5478		if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR)
5479			dev_hard_header(skb, dev, prot, tg_addr,
5480					&l3_hdr->next_hop.rx.src_mac, skb->len);
5481		else
5482			dev_hard_header(skb, dev, prot, tg_addr, "FAKELL",
5483					skb->len);
5484	}
5485
5486	/* copy VLAN tag from hdr into skb */
5487	if (!card->options.sniffer &&
5488	    (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME |
5489				  QETH_HDR_EXT_INCLUDE_VLAN_TAG))) {
5490		u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ?
5491				l3_hdr->vlan_id :
5492				l3_hdr->next_hop.rx.vlan_id;
5493
5494		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
5495	}
5496}
5497#endif
5498
5499static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
5500			     bool uses_frags, bool is_cso)
5501{
5502	struct napi_struct *napi = &card->napi;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5503
5504	if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) {
5505		skb->ip_summed = CHECKSUM_UNNECESSARY;
5506		QETH_CARD_STAT_INC(card, rx_skb_csum);
5507	} else {
5508		skb->ip_summed = CHECKSUM_NONE;
5509	}
5510
5511	QETH_CARD_STAT_ADD(card, rx_bytes, skb->len);
5512	QETH_CARD_STAT_INC(card, rx_packets);
5513	if (skb_is_nonlinear(skb)) {
5514		QETH_CARD_STAT_INC(card, rx_sg_skbs);
5515		QETH_CARD_STAT_ADD(card, rx_sg_frags,
5516				   skb_shinfo(skb)->nr_frags);
5517	}
5518
5519	if (uses_frags) {
5520		napi_gro_frags(napi);
5521	} else {
5522		skb->protocol = eth_type_trans(skb, skb->dev);
5523		napi_gro_receive(napi, skb);
5524	}
5525}
5526
5527static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len)
5528{
5529	struct page *page = virt_to_page(data);
5530	unsigned int next_frag;
5531
5532	next_frag = skb_shinfo(skb)->nr_frags;
5533	get_page(page);
5534	skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len,
5535			data_len);
5536}
5537
5538static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
5539{
5540	return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
5541}
5542
5543static int qeth_extract_skb(struct qeth_card *card,
5544			    struct qeth_qdio_buffer *qethbuffer, u8 *element_no,
5545			    int *__offset)
5546{
5547	struct qeth_priv *priv = netdev_priv(card->dev);
5548	struct qdio_buffer *buffer = qethbuffer->buffer;
5549	struct napi_struct *napi = &card->napi;
5550	struct qdio_buffer_element *element;
5551	unsigned int linear_len = 0;
5552	bool uses_frags = false;
5553	int offset = *__offset;
5554	bool use_rx_sg = false;
5555	unsigned int headroom;
5556	struct qeth_hdr *hdr;
5557	struct sk_buff *skb;
5558	int skb_len = 0;
5559	bool is_cso;
5560
5561	element = &buffer->element[*element_no];
5562
5563next_packet:
5564	/* qeth_hdr must not cross element boundaries */
5565	while (element->length < offset + sizeof(struct qeth_hdr)) {
5566		if (qeth_is_last_sbale(element))
5567			return -ENODATA;
5568		element++;
5569		offset = 0;
5570	}
5571
5572	hdr = phys_to_virt(element->addr) + offset;
5573	offset += sizeof(*hdr);
5574	skb = NULL;
5575
5576	switch (hdr->hdr.l2.id) {
5577	case QETH_HEADER_TYPE_LAYER2:
5578		skb_len = hdr->hdr.l2.pkt_length;
5579		is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ;
5580
5581		linear_len = ETH_HLEN;
5582		headroom = 0;
5583		break;
5584	case QETH_HEADER_TYPE_LAYER3:
5585		skb_len = hdr->hdr.l3.length;
5586		is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ;
5587
5588		if (!IS_LAYER3(card)) {
5589			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5590			goto walk_packet;
5591		}
5592
5593		if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) {
5594			linear_len = ETH_HLEN;
5595			headroom = 0;
5596			break;
5597		}
5598
5599		if (hdr->hdr.l3.flags & QETH_HDR_IPV6)
5600			linear_len = sizeof(struct ipv6hdr);
5601		else
5602			linear_len = sizeof(struct iphdr);
5603		headroom = ETH_HLEN;
5604		break;
 
 
 
 
 
 
 
 
 
 
5605	default:
5606		if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL)
5607			QETH_CARD_STAT_INC(card, rx_frame_errors);
5608		else
5609			QETH_CARD_STAT_INC(card, rx_dropped_notsupp);
5610
5611		/* Can't determine packet length, drop the whole buffer. */
5612		return -EPROTONOSUPPORT;
5613	}
5614
5615	if (skb_len < linear_len) {
5616		QETH_CARD_STAT_INC(card, rx_dropped_runt);
5617		goto walk_packet;
5618	}
5619
5620	use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
5621		    (skb_len > READ_ONCE(priv->rx_copybreak) &&
5622		     !atomic_read(&card->force_alloc_skb));
 
5623
5624	if (use_rx_sg) {
5625		/* QETH_CQ_ENABLED only: */
5626		if (qethbuffer->rx_skb &&
5627		    skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) {
5628			skb = qethbuffer->rx_skb;
5629			qethbuffer->rx_skb = NULL;
5630			goto use_skb;
5631		}
5632
5633		skb = napi_get_frags(napi);
5634		if (!skb) {
5635			/* -ENOMEM, no point in falling back further. */
5636			QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5637			goto walk_packet;
5638		}
5639
5640		if (skb_tailroom(skb) >= linear_len + headroom) {
5641			uses_frags = true;
5642			goto use_skb;
5643		}
5644
5645		netdev_info_once(card->dev,
5646				 "Insufficient linear space in NAPI frags skb, need %u but have %u\n",
5647				 linear_len + headroom, skb_tailroom(skb));
5648		/* Shouldn't happen. Don't optimize, fall back to linear skb. */
5649	}
5650
5651	linear_len = skb_len;
5652	skb = napi_alloc_skb(napi, linear_len + headroom);
5653	if (!skb) {
5654		QETH_CARD_STAT_INC(card, rx_dropped_nomem);
5655		goto walk_packet;
5656	}
5657
5658use_skb:
5659	if (headroom)
5660		skb_reserve(skb, headroom);
5661walk_packet:
5662	while (skb_len) {
5663		int data_len = min(skb_len, (int)(element->length - offset));
5664		char *data = phys_to_virt(element->addr) + offset;
5665
5666		skb_len -= data_len;
5667		offset += data_len;
5668
5669		/* Extract data from current element: */
5670		if (skb && data_len) {
5671			if (linear_len) {
5672				unsigned int copy_len;
5673
5674				copy_len = min_t(unsigned int, linear_len,
5675						 data_len);
5676
5677				skb_put_data(skb, data, copy_len);
5678				linear_len -= copy_len;
5679				data_len -= copy_len;
5680				data += copy_len;
5681			}
5682
5683			if (data_len)
5684				qeth_create_skb_frag(skb, data, data_len);
5685		}
5686
5687		/* Step forward to next element: */
5688		if (skb_len) {
5689			if (qeth_is_last_sbale(element)) {
5690				QETH_CARD_TEXT(card, 4, "unexeob");
5691				QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
5692				if (skb) {
5693					if (uses_frags)
5694						napi_free_frags(napi);
5695					else
5696						kfree_skb(skb);
5697					QETH_CARD_STAT_INC(card,
5698							   rx_length_errors);
5699				}
5700				return -EMSGSIZE;
5701			}
5702			element++;
5703			offset = 0;
5704		}
5705	}
5706
5707	/* This packet was skipped, go get another one: */
5708	if (!skb)
5709		goto next_packet;
5710
5711	*element_no = element - &buffer->element[0];
5712	*__offset = offset;
5713
5714#if IS_ENABLED(CONFIG_QETH_L3)
5715	if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER3)
5716		qeth_l3_rebuild_skb(card, skb, hdr);
5717#endif
5718
5719	qeth_receive_skb(card, skb, uses_frags, is_cso);
5720	return 0;
5721}
5722
5723static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget,
5724				      struct qeth_qdio_buffer *buf, bool *done)
5725{
5726	unsigned int work_done = 0;
5727
5728	while (budget) {
5729		if (qeth_extract_skb(card, buf, &card->rx.buf_element,
5730				     &card->rx.e_offset)) {
5731			*done = true;
5732			break;
5733		}
5734
5735		work_done++;
5736		budget--;
5737	}
5738
5739	return work_done;
5740}
5741
5742static unsigned int qeth_rx_poll(struct qeth_card *card, int budget)
5743{
5744	struct qeth_rx *ctx = &card->rx;
5745	unsigned int work_done = 0;
5746
5747	while (budget > 0) {
5748		struct qeth_qdio_buffer *buffer;
5749		unsigned int skbs_done = 0;
5750		bool done = false;
5751
5752		/* Fetch completed RX buffers: */
5753		if (!card->rx.b_count) {
5754			card->rx.qdio_err = 0;
5755			card->rx.b_count =
5756				qdio_inspect_input_queue(CARD_DDEV(card), 0,
5757							 &card->rx.b_index,
5758							 &card->rx.qdio_err);
5759			if (card->rx.b_count <= 0) {
5760				card->rx.b_count = 0;
5761				break;
5762			}
5763		}
5764
5765		/* Process one completed RX buffer: */
5766		buffer = &card->qdio.in_q->bufs[card->rx.b_index];
5767		if (!(card->rx.qdio_err &&
5768		      qeth_check_qdio_errors(card, buffer->buffer,
5769					     card->rx.qdio_err, "qinerr")))
5770			skbs_done = qeth_extract_skbs(card, budget, buffer,
5771						      &done);
5772		else
5773			done = true;
5774
5775		work_done += skbs_done;
5776		budget -= skbs_done;
5777
5778		if (done) {
5779			QETH_CARD_STAT_INC(card, rx_bufs);
5780			qeth_put_buffer_pool_entry(card, buffer->pool_entry);
5781			buffer->pool_entry = NULL;
5782			card->rx.b_count--;
5783			ctx->bufs_refill++;
5784			ctx->bufs_refill -= qeth_rx_refill_queue(card,
5785								 ctx->bufs_refill);
5786
5787			/* Step forward to next buffer: */
5788			card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1);
5789			card->rx.buf_element = 0;
5790			card->rx.e_offset = 0;
5791		}
5792	}
5793
5794	return work_done;
5795}
5796
5797static void qeth_cq_poll(struct qeth_card *card)
5798{
5799	unsigned int work_done = 0;
5800
5801	while (work_done < QDIO_MAX_BUFFERS_PER_Q) {
5802		unsigned int start, error;
5803		int completed;
5804
5805		completed = qdio_inspect_input_queue(CARD_DDEV(card), 1, &start,
5806						     &error);
5807		if (completed <= 0)
5808			return;
5809
5810		qeth_qdio_cq_handler(card, error, 1, start, completed);
5811		work_done += completed;
5812	}
5813}
5814
5815int qeth_poll(struct napi_struct *napi, int budget)
5816{
5817	struct qeth_card *card = container_of(napi, struct qeth_card, napi);
5818	unsigned int work_done;
5819
5820	work_done = qeth_rx_poll(card, budget);
5821
5822	if (qeth_use_tx_irqs(card)) {
5823		struct qeth_qdio_out_q *queue;
5824		unsigned int i;
5825
5826		qeth_for_each_output_queue(card, queue, i) {
5827			if (!qeth_out_queue_is_empty(queue))
5828				napi_schedule(&queue->napi);
5829		}
5830	}
5831
5832	if (card->options.cq == QETH_CQ_ENABLED)
5833		qeth_cq_poll(card);
5834
5835	if (budget) {
5836		struct qeth_rx *ctx = &card->rx;
5837
5838		/* Process any substantial refill backlog: */
5839		ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill);
5840
5841		/* Exhausted the RX budget. Keep IRQ disabled, we get called again. */
5842		if (work_done >= budget)
5843			return work_done;
5844	}
5845
5846	if (napi_complete_done(napi, work_done) &&
5847	    qdio_start_irq(CARD_DDEV(card)))
5848		napi_schedule(napi);
5849
5850	return work_done;
5851}
5852EXPORT_SYMBOL_GPL(qeth_poll);
5853
5854static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
5855				 unsigned int bidx, unsigned int qdio_error,
5856				 int budget)
5857{
5858	struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
5859	u8 sflags = buffer->buffer->element[15].sflags;
5860	struct qeth_card *card = queue->card;
5861	bool error = !!qdio_error;
5862
5863	if (qdio_error == QDIO_ERROR_SLSB_PENDING) {
5864		struct qaob *aob = buffer->aob;
5865		struct qeth_qaob_priv1 *priv;
5866		enum iucv_tx_notify notify;
5867
5868		if (!aob) {
5869			netdev_WARN_ONCE(card->dev,
5870					 "Pending TX buffer %#x without QAOB on TX queue %u\n",
5871					 bidx, queue->queue_no);
5872			qeth_schedule_recovery(card);
5873			return;
5874		}
5875
5876		QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
5877
5878		priv = (struct qeth_qaob_priv1 *)&aob->user1;
5879		/* QAOB hasn't completed yet: */
5880		if (xchg(&priv->state, QETH_QAOB_PENDING) != QETH_QAOB_DONE) {
5881			qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
 
 
5882
5883			/* Prepare the queue slot for immediate re-use: */
5884			qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
5885			if (qeth_alloc_out_buf(queue, bidx, GFP_ATOMIC)) {
5886				QETH_CARD_TEXT(card, 2, "outofbuf");
5887				qeth_schedule_recovery(card);
5888			}
5889
5890			list_add(&buffer->list_entry, &queue->pending_bufs);
5891			/* Skip clearing the buffer: */
5892			return;
5893		}
5894
5895		/* QAOB already completed: */
5896		notify = qeth_compute_cq_notification(aob->aorc, 0);
5897		qeth_notify_skbs(queue, buffer, notify);
5898		error = !!aob->aorc;
5899		memset(aob, 0, sizeof(*aob));
5900	} else if (card->options.cq == QETH_CQ_ENABLED) {
5901		qeth_notify_skbs(queue, buffer,
5902				 qeth_compute_cq_notification(sflags, 0));
5903	}
5904
5905	qeth_clear_output_buffer(queue, buffer, error, budget);
5906}
5907
5908static int qeth_tx_poll(struct napi_struct *napi, int budget)
5909{
5910	struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi);
5911	unsigned int queue_no = queue->queue_no;
5912	struct qeth_card *card = queue->card;
5913	struct net_device *dev = card->dev;
5914	unsigned int work_done = 0;
5915	struct netdev_queue *txq;
5916
5917	if (IS_IQD(card))
5918		txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
5919	else
5920		txq = netdev_get_tx_queue(dev, queue_no);
5921
5922	while (1) {
5923		unsigned int start, error, i;
5924		unsigned int packets = 0;
5925		unsigned int bytes = 0;
5926		int completed;
5927
5928		qeth_tx_complete_pending_bufs(card, queue, false, budget);
5929
5930		if (qeth_out_queue_is_empty(queue)) {
5931			napi_complete(napi);
5932			return 0;
5933		}
5934
5935		/* Give the CPU a breather: */
5936		if (work_done >= QDIO_MAX_BUFFERS_PER_Q) {
5937			QETH_TXQ_STAT_INC(queue, completion_yield);
5938			if (napi_complete_done(napi, 0))
5939				napi_schedule(napi);
5940			return 0;
5941		}
5942
5943		completed = qdio_inspect_output_queue(CARD_DDEV(card), queue_no,
5944						      &start, &error);
5945		if (completed <= 0) {
5946			/* Ensure we see TX completion for pending work: */
5947			if (napi_complete_done(napi, 0) &&
5948			    !atomic_read(&queue->set_pci_flags_count))
5949				qeth_tx_arm_timer(queue, queue->rescan_usecs);
5950			return 0;
5951		}
5952
5953		for (i = start; i < start + completed; i++) {
5954			struct qeth_qdio_out_buffer *buffer;
5955			unsigned int bidx = QDIO_BUFNR(i);
5956
5957			buffer = queue->bufs[bidx];
5958			packets += buffer->frames;
5959			bytes += buffer->bytes;
5960
5961			qeth_handle_send_error(card, buffer, error);
5962			if (IS_IQD(card))
5963				qeth_iqd_tx_complete(queue, bidx, error, budget);
5964			else
5965				qeth_clear_output_buffer(queue, buffer, error,
5966							 budget);
5967		}
5968
 
5969		atomic_sub(completed, &queue->used_buffers);
5970		work_done += completed;
5971		if (IS_IQD(card))
5972			netdev_tx_completed_queue(txq, packets, bytes);
5973		else
5974			qeth_check_outbound_queue(queue);
5975
5976		/* xmit may have observed the full-condition, but not yet
5977		 * stopped the txq. In which case the code below won't trigger.
5978		 * So before returning, xmit will re-check the txq's fill level
5979		 * and wake it up if needed.
5980		 */
5981		if (netif_tx_queue_stopped(txq) &&
5982		    !qeth_out_queue_is_full(queue))
5983			netif_tx_wake_queue(txq);
5984	}
5985}
5986
5987static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
5988{
5989	if (!cmd->hdr.return_code)
5990		cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5991	return cmd->hdr.return_code;
5992}
5993
5994static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
5995					struct qeth_reply *reply,
5996					unsigned long data)
5997{
5998	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5999	struct qeth_ipa_caps *caps = reply->param;
6000
6001	if (qeth_setassparms_inspect_rc(cmd))
6002		return -EIO;
6003
6004	caps->supported = cmd->data.setassparms.data.caps.supported;
6005	caps->enabled = cmd->data.setassparms.data.caps.enabled;
6006	return 0;
6007}
6008
6009int qeth_setassparms_cb(struct qeth_card *card,
6010			struct qeth_reply *reply, unsigned long data)
6011{
6012	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6013
6014	QETH_CARD_TEXT(card, 4, "defadpcb");
6015
6016	if (cmd->hdr.return_code)
6017		return -EIO;
6018
6019	cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
6020	if (cmd->hdr.prot_version == QETH_PROT_IPV4)
6021		card->options.ipa4.enabled = cmd->hdr.assists.enabled;
6022	if (cmd->hdr.prot_version == QETH_PROT_IPV6)
6023		card->options.ipa6.enabled = cmd->hdr.assists.enabled;
6024	return 0;
6025}
6026EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
6027
6028struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
6029						 enum qeth_ipa_funcs ipa_func,
6030						 u16 cmd_code,
6031						 unsigned int data_length,
6032						 enum qeth_prot_versions prot)
6033{
6034	struct qeth_ipacmd_setassparms *setassparms;
6035	struct qeth_ipacmd_setassparms_hdr *hdr;
6036	struct qeth_cmd_buffer *iob;
6037
6038	QETH_CARD_TEXT(card, 4, "getasscm");
6039	iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot,
6040				 data_length +
6041				 offsetof(struct qeth_ipacmd_setassparms,
6042					  data));
6043	if (!iob)
6044		return NULL;
6045
6046	setassparms = &__ipa_cmd(iob)->data.setassparms;
6047	setassparms->assist_no = ipa_func;
6048
6049	hdr = &setassparms->hdr;
6050	hdr->length = sizeof(*hdr) + data_length;
6051	hdr->command_code = cmd_code;
6052	return iob;
6053}
6054EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
6055
6056int qeth_send_simple_setassparms_prot(struct qeth_card *card,
6057				      enum qeth_ipa_funcs ipa_func,
6058				      u16 cmd_code, u32 *data,
6059				      enum qeth_prot_versions prot)
6060{
6061	unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0;
6062	struct qeth_cmd_buffer *iob;
6063
6064	QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
6065	iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
6066	if (!iob)
6067		return -ENOMEM;
6068
6069	if (data)
6070		__ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data;
6071	return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
6072}
6073EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
6074
6075static void qeth_unregister_dbf_views(void)
6076{
6077	int x;
6078
6079	for (x = 0; x < QETH_DBF_INFOS; x++) {
6080		debug_unregister(qeth_dbf[x].id);
6081		qeth_dbf[x].id = NULL;
6082	}
6083}
6084
6085void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
6086{
6087	char dbf_txt_buf[32];
6088	va_list args;
6089
6090	if (!debug_level_enabled(id, level))
6091		return;
6092	va_start(args, fmt);
6093	vscnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
6094	va_end(args);
6095	debug_text_event(id, level, dbf_txt_buf);
6096}
6097EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
6098
6099static int qeth_register_dbf_views(void)
6100{
6101	int ret;
6102	int x;
6103
6104	for (x = 0; x < QETH_DBF_INFOS; x++) {
6105		/* register the areas */
6106		qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
6107						qeth_dbf[x].pages,
6108						qeth_dbf[x].areas,
6109						qeth_dbf[x].len);
6110		if (qeth_dbf[x].id == NULL) {
6111			qeth_unregister_dbf_views();
6112			return -ENOMEM;
6113		}
6114
6115		/* register a view */
6116		ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
6117		if (ret) {
6118			qeth_unregister_dbf_views();
6119			return ret;
6120		}
6121
6122		/* set a passing level */
6123		debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
6124	}
6125
6126	return 0;
6127}
6128
6129static DEFINE_MUTEX(qeth_mod_mutex);	/* for synchronized module loading */
6130
6131int qeth_setup_discipline(struct qeth_card *card,
6132			  enum qeth_discipline_id discipline)
6133{
6134	int rc;
6135
6136	mutex_lock(&qeth_mod_mutex);
6137	switch (discipline) {
6138	case QETH_DISCIPLINE_LAYER3:
6139		card->discipline = try_then_request_module(
6140			symbol_get(qeth_l3_discipline), "qeth_l3");
6141		break;
6142	case QETH_DISCIPLINE_LAYER2:
6143		card->discipline = try_then_request_module(
6144			symbol_get(qeth_l2_discipline), "qeth_l2");
6145		break;
6146	default:
6147		break;
6148	}
6149	mutex_unlock(&qeth_mod_mutex);
6150
6151	if (!card->discipline) {
6152		dev_err(&card->gdev->dev, "There is no kernel module to "
6153			"support discipline %d\n", discipline);
6154		return -EINVAL;
6155	}
6156
6157	rc = card->discipline->setup(card->gdev);
6158	if (rc) {
6159		if (discipline == QETH_DISCIPLINE_LAYER2)
6160			symbol_put(qeth_l2_discipline);
6161		else
6162			symbol_put(qeth_l3_discipline);
6163		card->discipline = NULL;
6164
6165		return rc;
6166	}
6167
6168	card->options.layer = discipline;
6169	return 0;
6170}
6171
6172void qeth_remove_discipline(struct qeth_card *card)
6173{
6174	card->discipline->remove(card->gdev);
6175
6176	if (IS_LAYER2(card))
6177		symbol_put(qeth_l2_discipline);
6178	else
6179		symbol_put(qeth_l3_discipline);
6180	card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
6181	card->discipline = NULL;
6182}
6183
6184static const struct device_type qeth_generic_devtype = {
6185	.name = "qeth_generic",
 
 
 
 
 
 
 
6186};
6187
6188#define DBF_NAME_LEN	20
6189
6190struct qeth_dbf_entry {
6191	char dbf_name[DBF_NAME_LEN];
6192	debug_info_t *dbf_info;
6193	struct list_head dbf_list;
6194};
6195
6196static LIST_HEAD(qeth_dbf_list);
6197static DEFINE_MUTEX(qeth_dbf_list_mutex);
6198
6199static debug_info_t *qeth_get_dbf_entry(char *name)
6200{
6201	struct qeth_dbf_entry *entry;
6202	debug_info_t *rc = NULL;
6203
6204	mutex_lock(&qeth_dbf_list_mutex);
6205	list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
6206		if (strcmp(entry->dbf_name, name) == 0) {
6207			rc = entry->dbf_info;
6208			break;
6209		}
6210	}
6211	mutex_unlock(&qeth_dbf_list_mutex);
6212	return rc;
6213}
6214
6215static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
6216{
6217	struct qeth_dbf_entry *new_entry;
6218
6219	card->debug = debug_register(name, 2, 1, 8);
6220	if (!card->debug) {
6221		QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
6222		goto err;
6223	}
6224	if (debug_register_view(card->debug, &debug_hex_ascii_view))
6225		goto err_dbg;
6226	new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
6227	if (!new_entry)
6228		goto err_dbg;
6229	strscpy(new_entry->dbf_name, name, sizeof(new_entry->dbf_name));
6230	new_entry->dbf_info = card->debug;
6231	mutex_lock(&qeth_dbf_list_mutex);
6232	list_add(&new_entry->dbf_list, &qeth_dbf_list);
6233	mutex_unlock(&qeth_dbf_list_mutex);
6234
6235	return 0;
6236
6237err_dbg:
6238	debug_unregister(card->debug);
6239err:
6240	return -ENOMEM;
6241}
6242
6243static void qeth_clear_dbf_list(void)
6244{
6245	struct qeth_dbf_entry *entry, *tmp;
6246
6247	mutex_lock(&qeth_dbf_list_mutex);
6248	list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
6249		list_del(&entry->dbf_list);
6250		debug_unregister(entry->dbf_info);
6251		kfree(entry);
6252	}
6253	mutex_unlock(&qeth_dbf_list_mutex);
6254}
6255
6256static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
6257{
6258	struct net_device *dev;
6259	struct qeth_priv *priv;
6260
6261	switch (card->info.type) {
6262	case QETH_CARD_TYPE_IQD:
6263		dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN,
6264				       ether_setup, QETH_MAX_OUT_QUEUES, 1);
6265		break;
6266	case QETH_CARD_TYPE_OSM:
6267		dev = alloc_etherdev(sizeof(*priv));
6268		break;
 
 
 
 
6269	default:
6270		dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1);
6271	}
6272
6273	if (!dev)
6274		return NULL;
6275
6276	priv = netdev_priv(dev);
6277	priv->rx_copybreak = QETH_RX_COPYBREAK;
6278	priv->tx_wanted_queues = IS_IQD(card) ? QETH_IQD_MIN_TXQ : 1;
6279
6280	dev->ml_priv = card;
6281	dev->watchdog_timeo = QETH_TX_TIMEOUT;
6282	dev->min_mtu = 576;
6283	 /* initialized when device first goes online: */
6284	dev->max_mtu = 0;
6285	dev->mtu = 0;
6286	SET_NETDEV_DEV(dev, &card->gdev->dev);
6287	netif_carrier_off(dev);
6288
6289	dev->ethtool_ops = &qeth_ethtool_ops;
6290	dev->priv_flags &= ~IFF_TX_SKB_SHARING;
6291	dev->hw_features |= NETIF_F_SG;
6292	dev->vlan_features |= NETIF_F_SG;
6293	if (IS_IQD(card))
6294		dev->features |= NETIF_F_SG;
6295
6296	return dev;
6297}
6298
6299struct net_device *qeth_clone_netdev(struct net_device *orig)
6300{
6301	struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);
6302
6303	if (!clone)
6304		return NULL;
6305
6306	clone->dev_port = orig->dev_port;
6307	return clone;
6308}
6309
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6310static int qeth_core_probe_device(struct ccwgroup_device *gdev)
6311{
6312	struct qeth_card *card;
6313	struct device *dev;
6314	int rc;
6315	enum qeth_discipline_id enforced_disc;
6316	char dbf_name[DBF_NAME_LEN];
6317
6318	QETH_DBF_TEXT(SETUP, 2, "probedev");
6319
6320	dev = &gdev->dev;
6321	if (!get_device(dev))
6322		return -ENODEV;
6323
6324	QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
6325
6326	card = qeth_alloc_card(gdev);
6327	if (!card) {
6328		QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
6329		rc = -ENOMEM;
6330		goto err_dev;
6331	}
6332
6333	scnprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
6334		  dev_name(&gdev->dev));
6335	card->debug = qeth_get_dbf_entry(dbf_name);
6336	if (!card->debug) {
6337		rc = qeth_add_dbf_entry(card, dbf_name);
6338		if (rc)
6339			goto err_card;
6340	}
6341
6342	qeth_setup_card(card);
6343	card->dev = qeth_alloc_netdev(card);
6344	if (!card->dev) {
6345		rc = -ENOMEM;
6346		goto err_card;
6347	}
6348
6349	qeth_determine_capabilities(card);
6350	qeth_set_blkt_defaults(card);
6351
6352	card->qdio.in_q = qeth_alloc_qdio_queue();
6353	if (!card->qdio.in_q) {
6354		rc = -ENOMEM;
6355		goto err_rx_queue;
6356	}
6357
6358	card->qdio.no_out_queues = card->dev->num_tx_queues;
6359	rc = qeth_update_from_chp_desc(card);
6360	if (rc)
6361		goto err_chp_desc;
6362
6363	gdev->dev.groups = qeth_dev_groups;
6364
6365	enforced_disc = qeth_enforce_discipline(card);
6366	switch (enforced_disc) {
6367	case QETH_DISCIPLINE_UNDETERMINED:
6368		gdev->dev.type = &qeth_generic_devtype;
6369		break;
6370	default:
6371		card->info.layer_enforced = true;
6372		/* It's so early that we don't need the discipline_mutex yet. */
6373		rc = qeth_setup_discipline(card, enforced_disc);
6374		if (rc)
6375			goto err_setup_disc;
6376
 
 
 
 
 
6377		break;
6378	}
6379
6380	return 0;
6381
6382err_setup_disc:
 
 
6383err_chp_desc:
6384	qeth_free_qdio_queue(card->qdio.in_q);
6385err_rx_queue:
6386	free_netdev(card->dev);
6387err_card:
6388	qeth_core_free_card(card);
6389err_dev:
6390	put_device(dev);
6391	return rc;
6392}
6393
6394static void qeth_core_remove_device(struct ccwgroup_device *gdev)
6395{
6396	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6397
6398	QETH_CARD_TEXT(card, 2, "removedv");
6399
6400	mutex_lock(&card->discipline_mutex);
6401	if (card->discipline)
6402		qeth_remove_discipline(card);
6403	mutex_unlock(&card->discipline_mutex);
6404
6405	qeth_free_qdio_queues(card);
6406
6407	qeth_free_qdio_queue(card->qdio.in_q);
6408	free_netdev(card->dev);
6409	qeth_core_free_card(card);
6410	put_device(&gdev->dev);
6411}
6412
6413static int qeth_core_set_online(struct ccwgroup_device *gdev)
6414{
6415	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6416	int rc = 0;
6417	enum qeth_discipline_id def_discipline;
6418
6419	mutex_lock(&card->discipline_mutex);
6420	if (!card->discipline) {
6421		def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
6422						QETH_DISCIPLINE_LAYER2;
6423		rc = qeth_setup_discipline(card, def_discipline);
6424		if (rc)
6425			goto err;
 
 
 
 
 
6426	}
6427
6428	rc = qeth_set_online(card, card->discipline);
6429
6430err:
6431	mutex_unlock(&card->discipline_mutex);
6432	return rc;
6433}
6434
6435static int qeth_core_set_offline(struct ccwgroup_device *gdev)
6436{
6437	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6438	int rc;
6439
6440	mutex_lock(&card->discipline_mutex);
6441	rc = qeth_set_offline(card, card->discipline, false);
6442	mutex_unlock(&card->discipline_mutex);
6443
6444	return rc;
6445}
6446
6447static void qeth_core_shutdown(struct ccwgroup_device *gdev)
6448{
6449	struct qeth_card *card = dev_get_drvdata(&gdev->dev);
6450
6451	qeth_set_allowed_threads(card, 0, 1);
6452	if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
6453		qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
6454	qeth_qdio_clear_card(card, 0);
6455	qeth_drain_output_queues(card);
6456	qdio_free(CARD_DDEV(card));
6457}
6458
6459static ssize_t group_store(struct device_driver *ddrv, const char *buf,
6460			   size_t count)
6461{
6462	int err;
6463
6464	err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
6465				  buf);
6466
6467	return err ? err : count;
6468}
6469static DRIVER_ATTR_WO(group);
6470
6471static struct attribute *qeth_drv_attrs[] = {
6472	&driver_attr_group.attr,
6473	NULL,
6474};
6475static struct attribute_group qeth_drv_attr_group = {
6476	.attrs = qeth_drv_attrs,
6477};
6478static const struct attribute_group *qeth_drv_attr_groups[] = {
6479	&qeth_drv_attr_group,
6480	NULL,
6481};
6482
6483static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
6484	.driver = {
6485		.groups = qeth_drv_attr_groups,
6486		.owner = THIS_MODULE,
6487		.name = "qeth",
6488	},
6489	.ccw_driver = &qeth_ccw_driver,
6490	.setup = qeth_core_probe_device,
6491	.remove = qeth_core_remove_device,
6492	.set_online = qeth_core_set_online,
6493	.set_offline = qeth_core_set_offline,
6494	.shutdown = qeth_core_shutdown,
6495};
6496
6497int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6498{
6499	struct qeth_card *card = dev->ml_priv;
 
6500	int rc = 0;
6501
6502	switch (cmd) {
6503	case SIOC_QETH_ADP_SET_SNMP_CONTROL:
6504		rc = qeth_snmp_command(card, data);
6505		break;
6506	case SIOC_QETH_GET_CARD_TYPE:
6507		if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
6508		    !IS_VM_NIC(card))
6509			return 1;
6510		return 0;
6511	case SIOC_QETH_QUERY_OAT:
6512		rc = qeth_query_oat_command(card, data);
6513		break;
6514	default:
6515		rc = -EOPNOTSUPP;
6516	}
6517	if (rc)
6518		QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
6519	return rc;
6520}
6521EXPORT_SYMBOL_GPL(qeth_siocdevprivate);
6522
6523int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6524{
6525	struct qeth_card *card = dev->ml_priv;
6526	struct mii_ioctl_data *mii_data;
6527	int rc = 0;
6528
6529	switch (cmd) {
6530	case SIOCGMIIPHY:
6531		mii_data = if_mii(rq);
6532		mii_data->phy_id = 0;
6533		break;
6534	case SIOCGMIIREG:
6535		mii_data = if_mii(rq);
6536		if (mii_data->phy_id != 0)
6537			rc = -EINVAL;
6538		else
6539			mii_data->val_out = qeth_mdio_read(dev,
6540				mii_data->phy_id, mii_data->reg_num);
6541		break;
 
 
 
6542	default:
6543		return -EOPNOTSUPP;
 
 
 
6544	}
6545	if (rc)
6546		QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
6547	return rc;
6548}
6549EXPORT_SYMBOL_GPL(qeth_do_ioctl);
6550
6551static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
6552			      unsigned long data)
6553{
6554	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6555	u32 *features = reply->param;
6556
6557	if (qeth_setassparms_inspect_rc(cmd))
6558		return -EIO;
6559
6560	*features = cmd->data.setassparms.data.flags_32bit;
6561	return 0;
6562}
6563
6564static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6565			     enum qeth_prot_versions prot)
6566{
6567	return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP,
6568						 NULL, prot);
6569}
6570
6571static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
6572			    enum qeth_prot_versions prot, u8 *lp2lp)
6573{
6574	u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
6575	struct qeth_cmd_buffer *iob;
6576	struct qeth_ipa_caps caps;
6577	u32 features;
6578	int rc;
6579
6580	/* some L3 HW requires combined L3+L4 csum offload: */
6581	if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 &&
6582	    cstype == IPA_OUTBOUND_CHECKSUM)
6583		required_features |= QETH_IPA_CHECKSUM_IP_HDR;
6584
6585	iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0,
6586				       prot);
6587	if (!iob)
6588		return -ENOMEM;
6589
6590	rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features);
6591	if (rc)
6592		return rc;
6593
6594	if ((required_features & features) != required_features) {
6595		qeth_set_csum_off(card, cstype, prot);
6596		return -EOPNOTSUPP;
6597	}
6598
6599	iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
6600				       SETASS_DATA_SIZEOF(flags_32bit),
6601				       prot);
6602	if (!iob) {
6603		qeth_set_csum_off(card, cstype, prot);
6604		return -ENOMEM;
6605	}
6606
6607	if (features & QETH_IPA_CHECKSUM_LP2LP)
6608		required_features |= QETH_IPA_CHECKSUM_LP2LP;
6609	__ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features;
6610	rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6611	if (rc) {
6612		qeth_set_csum_off(card, cstype, prot);
6613		return rc;
6614	}
6615
6616	if (!qeth_ipa_caps_supported(&caps, required_features) ||
6617	    !qeth_ipa_caps_enabled(&caps, required_features)) {
6618		qeth_set_csum_off(card, cstype, prot);
6619		return -EOPNOTSUPP;
6620	}
6621
6622	dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
6623		 cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
6624
6625	if (lp2lp)
6626		*lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP);
6627
6628	return 0;
6629}
6630
6631static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
6632			     enum qeth_prot_versions prot, u8 *lp2lp)
6633{
6634	return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) :
6635		    qeth_set_csum_off(card, cstype, prot);
6636}
6637
6638static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
6639			     unsigned long data)
6640{
6641	struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6642	struct qeth_tso_start_data *tso_data = reply->param;
6643
6644	if (qeth_setassparms_inspect_rc(cmd))
6645		return -EIO;
6646
6647	tso_data->mss = cmd->data.setassparms.data.tso.mss;
6648	tso_data->supported = cmd->data.setassparms.data.tso.supported;
6649	return 0;
6650}
6651
6652static int qeth_set_tso_off(struct qeth_card *card,
6653			    enum qeth_prot_versions prot)
6654{
6655	return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
6656						 IPA_CMD_ASS_STOP, NULL, prot);
6657}
6658
6659static int qeth_set_tso_on(struct qeth_card *card,
6660			   enum qeth_prot_versions prot)
6661{
6662	struct qeth_tso_start_data tso_data;
6663	struct qeth_cmd_buffer *iob;
6664	struct qeth_ipa_caps caps;
6665	int rc;
6666
6667	iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6668				       IPA_CMD_ASS_START, 0, prot);
6669	if (!iob)
6670		return -ENOMEM;
6671
6672	rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
6673	if (rc)
6674		return rc;
6675
6676	if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
6677		qeth_set_tso_off(card, prot);
6678		return -EOPNOTSUPP;
6679	}
6680
6681	iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6682				       IPA_CMD_ASS_ENABLE,
6683				       SETASS_DATA_SIZEOF(caps), prot);
6684	if (!iob) {
6685		qeth_set_tso_off(card, prot);
6686		return -ENOMEM;
6687	}
6688
6689	/* enable TSO capability */
6690	__ipa_cmd(iob)->data.setassparms.data.caps.enabled =
6691		QETH_IPA_LARGE_SEND_TCP;
6692	rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6693	if (rc) {
6694		qeth_set_tso_off(card, prot);
6695		return rc;
6696	}
6697
6698	if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
6699	    !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
6700		qeth_set_tso_off(card, prot);
6701		return -EOPNOTSUPP;
6702	}
6703
6704	dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
6705		 tso_data.mss);
6706	return 0;
6707}
6708
6709static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
6710			    enum qeth_prot_versions prot)
6711{
6712	return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot);
6713}
6714
6715static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
6716{
6717	int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
6718	int rc_ipv6;
6719
6720	if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
6721		rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6722					    QETH_PROT_IPV4, NULL);
6723	if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6724		/* no/one Offload Assist available, so the rc is trivial */
6725		return rc_ipv4;
6726
6727	rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6728				    QETH_PROT_IPV6, NULL);
6729
6730	if (on)
6731		/* enable: success if any Assist is active */
6732		return (rc_ipv6) ? rc_ipv4 : 0;
6733
6734	/* disable: failure if any Assist is still active */
6735	return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
6736}
6737
6738/**
6739 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
6740 * @dev:	a net_device
6741 */
6742void qeth_enable_hw_features(struct net_device *dev)
6743{
6744	struct qeth_card *card = dev->ml_priv;
6745	netdev_features_t features;
6746
6747	features = dev->features;
6748	/* force-off any feature that might need an IPA sequence.
6749	 * netdev_update_features() will restart them.
6750	 */
6751	dev->features &= ~dev->hw_features;
6752	/* toggle VLAN filter, so that VIDs are re-programmed: */
6753	if (IS_LAYER2(card) && IS_VM_NIC(card)) {
6754		dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
6755		dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6756	}
6757	netdev_update_features(dev);
6758	if (features != dev->features)
6759		dev_warn(&card->gdev->dev,
6760			 "Device recovery failed to restore all offload features\n");
6761}
6762EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
6763
6764static void qeth_check_restricted_features(struct qeth_card *card,
6765					   netdev_features_t changed,
6766					   netdev_features_t actual)
6767{
6768	netdev_features_t ipv6_features = NETIF_F_TSO6;
6769	netdev_features_t ipv4_features = NETIF_F_TSO;
6770
6771	if (!card->info.has_lp2lp_cso_v6)
6772		ipv6_features |= NETIF_F_IPV6_CSUM;
6773	if (!card->info.has_lp2lp_cso_v4)
6774		ipv4_features |= NETIF_F_IP_CSUM;
6775
6776	if ((changed & ipv6_features) && !(actual & ipv6_features))
6777		qeth_flush_local_addrs6(card);
6778	if ((changed & ipv4_features) && !(actual & ipv4_features))
6779		qeth_flush_local_addrs4(card);
6780}
6781
6782int qeth_set_features(struct net_device *dev, netdev_features_t features)
6783{
6784	struct qeth_card *card = dev->ml_priv;
6785	netdev_features_t changed = dev->features ^ features;
6786	int rc = 0;
6787
6788	QETH_CARD_TEXT(card, 2, "setfeat");
6789	QETH_CARD_HEX(card, 2, &features, sizeof(features));
6790
6791	if ((changed & NETIF_F_IP_CSUM)) {
6792		rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
6793				       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4,
6794				       &card->info.has_lp2lp_cso_v4);
6795		if (rc)
6796			changed ^= NETIF_F_IP_CSUM;
6797	}
6798	if (changed & NETIF_F_IPV6_CSUM) {
6799		rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
6800				       IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6,
6801				       &card->info.has_lp2lp_cso_v6);
6802		if (rc)
6803			changed ^= NETIF_F_IPV6_CSUM;
6804	}
6805	if (changed & NETIF_F_RXCSUM) {
6806		rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
6807		if (rc)
6808			changed ^= NETIF_F_RXCSUM;
6809	}
6810	if (changed & NETIF_F_TSO) {
6811		rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
6812				      QETH_PROT_IPV4);
6813		if (rc)
6814			changed ^= NETIF_F_TSO;
6815	}
6816	if (changed & NETIF_F_TSO6) {
6817		rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
6818				      QETH_PROT_IPV6);
6819		if (rc)
6820			changed ^= NETIF_F_TSO6;
6821	}
6822
6823	qeth_check_restricted_features(card, dev->features ^ features,
6824				       dev->features ^ changed);
6825
6826	/* everything changed successfully? */
6827	if ((dev->features ^ features) == changed)
6828		return 0;
6829	/* something went wrong. save changed features and return error */
6830	dev->features ^= changed;
6831	return -EIO;
6832}
6833EXPORT_SYMBOL_GPL(qeth_set_features);
6834
6835netdev_features_t qeth_fix_features(struct net_device *dev,
6836				    netdev_features_t features)
6837{
6838	struct qeth_card *card = dev->ml_priv;
6839
6840	QETH_CARD_TEXT(card, 2, "fixfeat");
6841	if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
6842		features &= ~NETIF_F_IP_CSUM;
6843	if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
6844		features &= ~NETIF_F_IPV6_CSUM;
6845	if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
6846	    !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6847		features &= ~NETIF_F_RXCSUM;
6848	if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
6849		features &= ~NETIF_F_TSO;
6850	if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
6851		features &= ~NETIF_F_TSO6;
6852
6853	QETH_CARD_HEX(card, 2, &features, sizeof(features));
6854	return features;
6855}
6856EXPORT_SYMBOL_GPL(qeth_fix_features);
6857
6858netdev_features_t qeth_features_check(struct sk_buff *skb,
6859				      struct net_device *dev,
6860				      netdev_features_t features)
6861{
6862	struct qeth_card *card = dev->ml_priv;
6863
6864	/* Traffic with local next-hop is not eligible for some offloads: */
6865	if (skb->ip_summed == CHECKSUM_PARTIAL &&
6866	    READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) {
6867		netdev_features_t restricted = 0;
6868
6869		if (skb_is_gso(skb) && !netif_needs_gso(skb, features))
6870			restricted |= NETIF_F_ALL_TSO;
6871
6872		switch (vlan_get_protocol(skb)) {
6873		case htons(ETH_P_IP):
6874			if (!card->info.has_lp2lp_cso_v4)
6875				restricted |= NETIF_F_IP_CSUM;
6876
6877			if (restricted && qeth_next_hop_is_local_v4(card, skb))
6878				features &= ~restricted;
6879			break;
6880		case htons(ETH_P_IPV6):
6881			if (!card->info.has_lp2lp_cso_v6)
6882				restricted |= NETIF_F_IPV6_CSUM;
6883
6884			if (restricted && qeth_next_hop_is_local_v6(card, skb))
6885				features &= ~restricted;
6886			break;
6887		default:
6888			break;
6889		}
6890	}
6891
6892	/* GSO segmentation builds skbs with
6893	 *	a (small) linear part for the headers, and
6894	 *	page frags for the data.
6895	 * Compared to a linear skb, the header-only part consumes an
6896	 * additional buffer element. This reduces buffer utilization, and
6897	 * hurts throughput. So compress small segments into one element.
6898	 */
6899	if (netif_needs_gso(skb, features)) {
6900		/* match skb_segment(): */
6901		unsigned int doffset = skb->data - skb_mac_header(skb);
6902		unsigned int hsize = skb_shinfo(skb)->gso_size;
6903		unsigned int hroom = skb_headroom(skb);
6904
6905		/* linearize only if resulting skb allocations are order-0: */
6906		if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
6907			features &= ~NETIF_F_SG;
6908	}
6909
6910	return vlan_features_check(skb, features);
6911}
6912EXPORT_SYMBOL_GPL(qeth_features_check);
6913
6914void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6915{
6916	struct qeth_card *card = dev->ml_priv;
6917	struct qeth_qdio_out_q *queue;
6918	unsigned int i;
6919
6920	QETH_CARD_TEXT(card, 5, "getstat");
6921
6922	stats->rx_packets = card->stats.rx_packets;
6923	stats->rx_bytes = card->stats.rx_bytes;
6924	stats->rx_errors = card->stats.rx_length_errors +
6925			   card->stats.rx_frame_errors +
6926			   card->stats.rx_fifo_errors;
6927	stats->rx_dropped = card->stats.rx_dropped_nomem +
6928			    card->stats.rx_dropped_notsupp +
6929			    card->stats.rx_dropped_runt;
6930	stats->multicast = card->stats.rx_multicast;
6931	stats->rx_length_errors = card->stats.rx_length_errors;
6932	stats->rx_frame_errors = card->stats.rx_frame_errors;
6933	stats->rx_fifo_errors = card->stats.rx_fifo_errors;
6934
6935	for (i = 0; i < card->qdio.no_out_queues; i++) {
6936		queue = card->qdio.out_qs[i];
6937
6938		stats->tx_packets += queue->stats.tx_packets;
6939		stats->tx_bytes += queue->stats.tx_bytes;
6940		stats->tx_errors += queue->stats.tx_errors;
6941		stats->tx_dropped += queue->stats.tx_dropped;
6942	}
6943}
6944EXPORT_SYMBOL_GPL(qeth_get_stats64);
6945
6946#define TC_IQD_UCAST   0
6947static void qeth_iqd_set_prio_tc_map(struct net_device *dev,
6948				     unsigned int ucast_txqs)
6949{
6950	unsigned int prio;
6951
6952	/* IQD requires mcast traffic to be placed on a dedicated queue, and
6953	 * qeth_iqd_select_queue() deals with this.
6954	 * For unicast traffic, we defer the queue selection to the stack.
6955	 * By installing a trivial prio map that spans over only the unicast
6956	 * queues, we can encourage the stack to spread the ucast traffic evenly
6957	 * without selecting the mcast queue.
6958	 */
6959
6960	/* One traffic class, spanning over all active ucast queues: */
6961	netdev_set_num_tc(dev, 1);
6962	netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs,
6963			    QETH_IQD_MIN_UCAST_TXQ);
6964
6965	/* Map all priorities to this traffic class: */
6966	for (prio = 0; prio <= TC_BITMASK; prio++)
6967		netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST);
6968}
6969
6970int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count)
6971{
6972	struct net_device *dev = card->dev;
6973	int rc;
6974
6975	/* Per netif_setup_tc(), adjust the mapping first: */
6976	if (IS_IQD(card))
6977		qeth_iqd_set_prio_tc_map(dev, count - 1);
6978
6979	rc = netif_set_real_num_tx_queues(dev, count);
6980
6981	if (rc && IS_IQD(card))
6982		qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1);
6983
6984	return rc;
6985}
6986EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues);
6987
6988u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
6989			  u8 cast_type, struct net_device *sb_dev)
6990{
6991	u16 txq;
6992
6993	if (cast_type != RTN_UNICAST)
6994		return QETH_IQD_MCAST_TXQ;
6995	if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ)
6996		return QETH_IQD_MIN_UCAST_TXQ;
6997
6998	txq = netdev_pick_tx(dev, skb, sb_dev);
6999	return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq;
7000}
7001EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
7002
7003u16 qeth_osa_select_queue(struct net_device *dev, struct sk_buff *skb,
7004			  struct net_device *sb_dev)
7005{
7006	struct qeth_card *card = dev->ml_priv;
7007
7008	if (qeth_uses_tx_prio_queueing(card))
7009		return qeth_get_priority_queue(card, skb);
7010
7011	return netdev_pick_tx(dev, skb, sb_dev);
7012}
7013EXPORT_SYMBOL_GPL(qeth_osa_select_queue);
7014
7015int qeth_open(struct net_device *dev)
7016{
7017	struct qeth_card *card = dev->ml_priv;
7018	struct qeth_qdio_out_q *queue;
7019	unsigned int i;
7020
7021	QETH_CARD_TEXT(card, 4, "qethopen");
7022
7023	card->data.state = CH_STATE_UP;
7024	netif_tx_start_all_queues(dev);
7025
 
7026	local_bh_disable();
7027	qeth_for_each_output_queue(card, queue, i) {
7028		netif_napi_add_tx(dev, &queue->napi, qeth_tx_poll);
7029		napi_enable(&queue->napi);
7030		napi_schedule(&queue->napi);
 
 
 
 
 
 
 
7031	}
7032
7033	napi_enable(&card->napi);
7034	napi_schedule(&card->napi);
7035	/* kick-start the NAPI softirq: */
7036	local_bh_enable();
7037
7038	return 0;
7039}
7040EXPORT_SYMBOL_GPL(qeth_open);
7041
7042int qeth_stop(struct net_device *dev)
7043{
7044	struct qeth_card *card = dev->ml_priv;
7045	struct qeth_qdio_out_q *queue;
7046	unsigned int i;
7047
7048	QETH_CARD_TEXT(card, 4, "qethstop");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7049
7050	napi_disable(&card->napi);
7051	cancel_delayed_work_sync(&card->buffer_reclaim_work);
7052	qdio_stop_irq(CARD_DDEV(card));
7053
7054	/* Quiesce the NAPI instances: */
7055	qeth_for_each_output_queue(card, queue, i)
7056		napi_disable(&queue->napi);
7057
7058	/* Stop .ndo_start_xmit, might still access queue->napi. */
7059	netif_tx_disable(dev);
7060
7061	qeth_for_each_output_queue(card, queue, i) {
7062		del_timer_sync(&queue->timer);
7063		/* Queues may get re-allocated, so remove the NAPIs. */
7064		netif_napi_del(&queue->napi);
7065	}
7066
7067	return 0;
7068}
7069EXPORT_SYMBOL_GPL(qeth_stop);
7070
7071static int __init qeth_core_init(void)
7072{
7073	int rc;
7074
7075	pr_info("loading core functions\n");
7076
7077	qeth_debugfs_root = debugfs_create_dir("qeth", NULL);
7078
7079	rc = qeth_register_dbf_views();
7080	if (rc)
7081		goto dbf_err;
7082	qeth_core_root_dev = root_device_register("qeth");
7083	rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
7084	if (rc)
7085		goto register_err;
7086	qeth_core_header_cache =
7087		kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
7088				  roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
7089				  0, NULL);
7090	if (!qeth_core_header_cache) {
7091		rc = -ENOMEM;
7092		goto slab_err;
7093	}
7094	qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
7095			sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
7096	if (!qeth_qdio_outbuf_cache) {
7097		rc = -ENOMEM;
7098		goto cqslab_err;
7099	}
7100
7101	qeth_qaob_cache = kmem_cache_create("qeth_qaob",
7102					    sizeof(struct qaob),
7103					    sizeof(struct qaob),
7104					    0, NULL);
7105	if (!qeth_qaob_cache) {
7106		rc = -ENOMEM;
7107		goto qaob_err;
7108	}
7109
7110	rc = ccw_driver_register(&qeth_ccw_driver);
7111	if (rc)
7112		goto ccw_err;
7113	rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
7114	if (rc)
7115		goto ccwgroup_err;
7116
7117	return 0;
7118
7119ccwgroup_err:
7120	ccw_driver_unregister(&qeth_ccw_driver);
7121ccw_err:
7122	kmem_cache_destroy(qeth_qaob_cache);
7123qaob_err:
7124	kmem_cache_destroy(qeth_qdio_outbuf_cache);
7125cqslab_err:
7126	kmem_cache_destroy(qeth_core_header_cache);
7127slab_err:
7128	root_device_unregister(qeth_core_root_dev);
7129register_err:
7130	qeth_unregister_dbf_views();
7131dbf_err:
7132	debugfs_remove_recursive(qeth_debugfs_root);
7133	pr_err("Initializing the qeth device driver failed\n");
7134	return rc;
7135}
7136
7137static void __exit qeth_core_exit(void)
7138{
7139	qeth_clear_dbf_list();
7140	ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
7141	ccw_driver_unregister(&qeth_ccw_driver);
7142	kmem_cache_destroy(qeth_qaob_cache);
7143	kmem_cache_destroy(qeth_qdio_outbuf_cache);
7144	kmem_cache_destroy(qeth_core_header_cache);
7145	root_device_unregister(qeth_core_root_dev);
7146	qeth_unregister_dbf_views();
7147	debugfs_remove_recursive(qeth_debugfs_root);
7148	pr_info("core functions removed\n");
7149}
7150
7151module_init(qeth_core_init);
7152module_exit(qeth_core_exit);
7153MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
7154MODULE_DESCRIPTION("qeth core functions");
7155MODULE_LICENSE("GPL");