Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * linux/drivers/s390/cio/qdio_main.c
   3 *
   4 * Linux for s390 qdio support, buffer handling, qdio API and module support.
   5 *
   6 * Copyright 2000,2008 IBM Corp.
   7 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
   8 *	      Jan Glauber <jang@linux.vnet.ibm.com>
   9 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
  10 */
  11#include <linux/module.h>
  12#include <linux/init.h>
  13#include <linux/kernel.h>
  14#include <linux/timer.h>
  15#include <linux/delay.h>
  16#include <linux/gfp.h>
  17#include <linux/kernel_stat.h>
  18#include <linux/atomic.h>
  19#include <asm/debug.h>
  20#include <asm/qdio.h>
 
  21
  22#include "cio.h"
  23#include "css.h"
  24#include "device.h"
  25#include "qdio.h"
  26#include "qdio_debug.h"
  27
  28MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
  29	"Jan Glauber <jang@linux.vnet.ibm.com>");
  30MODULE_DESCRIPTION("QDIO base support");
  31MODULE_LICENSE("GPL");
  32
  33static inline int do_siga_sync(unsigned long schid,
  34			       unsigned int out_mask, unsigned int in_mask,
  35			       unsigned int fc)
  36{
  37	register unsigned long __fc asm ("0") = fc;
  38	register unsigned long __schid asm ("1") = schid;
  39	register unsigned long out asm ("2") = out_mask;
  40	register unsigned long in asm ("3") = in_mask;
  41	int cc;
  42
  43	asm volatile(
  44		"	siga	0\n"
  45		"	ipm	%0\n"
  46		"	srl	%0,28\n"
  47		: "=d" (cc)
  48		: "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
  49	return cc;
  50}
  51
  52static inline int do_siga_input(unsigned long schid, unsigned int mask,
  53				unsigned int fc)
  54{
  55	register unsigned long __fc asm ("0") = fc;
  56	register unsigned long __schid asm ("1") = schid;
  57	register unsigned long __mask asm ("2") = mask;
  58	int cc;
  59
  60	asm volatile(
  61		"	siga	0\n"
  62		"	ipm	%0\n"
  63		"	srl	%0,28\n"
  64		: "=d" (cc)
  65		: "d" (__fc), "d" (__schid), "d" (__mask) : "cc", "memory");
  66	return cc;
  67}
  68
  69/**
  70 * do_siga_output - perform SIGA-w/wt function
  71 * @schid: subchannel id or in case of QEBSM the subchannel token
  72 * @mask: which output queues to process
  73 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
  74 * @fc: function code to perform
  75 *
  76 * Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION.
  77 * Note: For IQDC unicast queues only the highest priority queue is processed.
  78 */
  79static inline int do_siga_output(unsigned long schid, unsigned long mask,
  80				 unsigned int *bb, unsigned int fc)
 
  81{
  82	register unsigned long __fc asm("0") = fc;
  83	register unsigned long __schid asm("1") = schid;
  84	register unsigned long __mask asm("2") = mask;
  85	int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION;
 
  86
  87	asm volatile(
  88		"	siga	0\n"
  89		"0:	ipm	%0\n"
  90		"	srl	%0,28\n"
  91		"1:\n"
  92		EX_TABLE(0b, 1b)
  93		: "+d" (cc), "+d" (__fc), "+d" (__schid), "+d" (__mask)
  94		: : "cc", "memory");
  95	*bb = ((unsigned int) __fc) >> 31;
  96	return cc;
  97}
  98
  99static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
 100{
 101	/* all done or next buffer state different */
 102	if (ccq == 0 || ccq == 32)
 103		return 0;
 104	/* not all buffers processed */
 105	if (ccq == 96 || ccq == 97)
 106		return 1;
 
 
 
 107	/* notify devices immediately */
 108	DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
 109	return -EIO;
 110}
 111
 112/**
 113 * qdio_do_eqbs - extract buffer states for QEBSM
 114 * @q: queue to manipulate
 115 * @state: state of the extracted buffers
 116 * @start: buffer number to start at
 117 * @count: count of buffers to examine
 118 * @auto_ack: automatically acknowledge buffers
 119 *
 120 * Returns the number of successfully extracted equal buffer states.
 121 * Stops processing if a state is different from the last buffers state.
 122 */
 123static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
 124			int start, int count, int auto_ack)
 125{
 
 126	unsigned int ccq = 0;
 127	int tmp_count = count, tmp_start = start;
 128	int nr = q->nr;
 129	int rc;
 130
 131	BUG_ON(!q->irq_ptr->sch_token);
 132	qperf_inc(q, eqbs);
 133
 134	if (!q->is_input_q)
 135		nr += q->irq_ptr->nr_input_qs;
 136again:
 137	ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
 138		      auto_ack);
 139	rc = qdio_check_ccq(q, ccq);
 140
 141	/* At least one buffer was processed, return and extract the remaining
 142	 * buffers later.
 143	 */
 144	if ((ccq == 96) && (count != tmp_count)) {
 145		qperf_inc(q, eqbs_partial);
 146		return (count - tmp_count);
 147	}
 148
 149	if (rc == 1) {
 150		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
 151		goto again;
 152	}
 153
 154	if (rc < 0) {
 155		DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
 156		DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
 157		q->handler(q->irq_ptr->cdev,
 158			   QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
 159			   0, -1, -1, q->irq_ptr->int_parm);
 160		return 0;
 
 
 
 
 
 161	}
 162	return count - tmp_count;
 
 
 
 
 
 163}
 164
 165/**
 166 * qdio_do_sqbs - set buffer states for QEBSM
 167 * @q: queue to manipulate
 168 * @state: new state of the buffers
 169 * @start: first buffer number to change
 170 * @count: how many buffers to change
 171 *
 172 * Returns the number of successfully changed buffers.
 173 * Does retrying until the specified count of buffer states is set or an
 174 * error occurs.
 175 */
 176static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
 177			int count)
 178{
 179	unsigned int ccq = 0;
 180	int tmp_count = count, tmp_start = start;
 181	int nr = q->nr;
 182	int rc;
 183
 184	if (!count)
 185		return 0;
 186
 187	BUG_ON(!q->irq_ptr->sch_token);
 188	qperf_inc(q, sqbs);
 189
 190	if (!q->is_input_q)
 191		nr += q->irq_ptr->nr_input_qs;
 192again:
 193	ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
 194	rc = qdio_check_ccq(q, ccq);
 195	if (rc == 1) {
 
 
 
 
 
 196		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
 197		qperf_inc(q, sqbs_partial);
 198		goto again;
 199	}
 200	if (rc < 0) {
 201		DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
 202		DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
 203		q->handler(q->irq_ptr->cdev,
 204			   QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
 205			   0, -1, -1, q->irq_ptr->int_parm);
 206		return 0;
 207	}
 208	WARN_ON(tmp_count);
 209	return count - tmp_count;
 210}
 211
 212/* returns number of examined buffers and their common state in *state */
 213static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
 214				 unsigned char *state, unsigned int count,
 215				 int auto_ack)
 216{
 217	unsigned char __state = 0;
 218	int i;
 219
 220	BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
 221	BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
 222
 223	if (is_qebsm(q))
 224		return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
 225
 226	for (i = 0; i < count; i++) {
 227		if (!__state)
 228			__state = q->slsb.val[bufnr];
 229		else if (q->slsb.val[bufnr] != __state)
 
 
 
 
 
 230			break;
 231		bufnr = next_buf(bufnr);
 232	}
 233	*state = __state;
 234	return i;
 235}
 236
 237static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
 238				unsigned char *state, int auto_ack)
 239{
 240	return get_buf_states(q, bufnr, state, 1, auto_ack);
 241}
 242
 243/* wrap-around safe setting of slsb states, returns number of changed buffers */
 244static inline int set_buf_states(struct qdio_q *q, int bufnr,
 245				 unsigned char state, int count)
 246{
 247	int i;
 248
 249	BUG_ON(bufnr > QDIO_MAX_BUFFERS_MASK);
 250	BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q);
 251
 252	if (is_qebsm(q))
 253		return qdio_do_sqbs(q, state, bufnr, count);
 254
 255	for (i = 0; i < count; i++) {
 256		xchg(&q->slsb.val[bufnr], state);
 257		bufnr = next_buf(bufnr);
 258	}
 259	return count;
 260}
 261
 262static inline int set_buf_state(struct qdio_q *q, int bufnr,
 263				unsigned char state)
 264{
 265	return set_buf_states(q, bufnr, state, 1);
 266}
 267
 268/* set slsb states to initial state */
 269void qdio_init_buf_states(struct qdio_irq *irq_ptr)
 270{
 271	struct qdio_q *q;
 272	int i;
 273
 274	for_each_input_queue(irq_ptr, q, i)
 275		set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
 276			       QDIO_MAX_BUFFERS_PER_Q);
 277	for_each_output_queue(irq_ptr, q, i)
 278		set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
 279			       QDIO_MAX_BUFFERS_PER_Q);
 280}
 281
 282static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
 283			  unsigned int input)
 284{
 285	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
 286	unsigned int fc = QDIO_SIGA_SYNC;
 287	int cc;
 288
 289	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
 290	qperf_inc(q, siga_sync);
 291
 292	if (is_qebsm(q)) {
 293		schid = q->irq_ptr->sch_token;
 294		fc |= QDIO_SIGA_QEBSM_FLAG;
 295	}
 296
 297	cc = do_siga_sync(schid, output, input, fc);
 298	if (unlikely(cc))
 299		DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
 300	return cc;
 301}
 302
 303static inline int qdio_siga_sync_q(struct qdio_q *q)
 304{
 305	if (q->is_input_q)
 306		return qdio_siga_sync(q, 0, q->mask);
 307	else
 308		return qdio_siga_sync(q, q->mask, 0);
 309}
 310
 311static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
 
 312{
 313	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
 314	unsigned int fc = QDIO_SIGA_WRITE;
 315	u64 start_time = 0;
 316	int retries = 0, cc;
 
 
 
 
 
 
 317
 318	if (is_qebsm(q)) {
 319		schid = q->irq_ptr->sch_token;
 320		fc |= QDIO_SIGA_QEBSM_FLAG;
 321	}
 322again:
 323	cc = do_siga_output(schid, q->mask, busy_bit, fc);
 
 
 324
 325	/* hipersocket busy condition */
 326	if (unlikely(*busy_bit)) {
 327		WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
 328		retries++;
 329
 330		if (!start_time) {
 331			start_time = get_clock();
 332			goto again;
 333		}
 334		if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
 335			goto again;
 336	}
 337	if (retries) {
 338		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
 339			      "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
 340		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
 341	}
 342	return cc;
 343}
 344
 345static inline int qdio_siga_input(struct qdio_q *q)
 346{
 347	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
 348	unsigned int fc = QDIO_SIGA_READ;
 349	int cc;
 350
 351	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
 352	qperf_inc(q, siga_read);
 353
 354	if (is_qebsm(q)) {
 355		schid = q->irq_ptr->sch_token;
 356		fc |= QDIO_SIGA_QEBSM_FLAG;
 357	}
 358
 359	cc = do_siga_input(schid, q->mask, fc);
 360	if (unlikely(cc))
 361		DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
 362	return cc;
 363}
 364
 365#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
 366#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
 367
 368static inline void qdio_sync_queues(struct qdio_q *q)
 369{
 370	/* PCI capable outbound queues will also be scanned so sync them too */
 371	if (pci_out_supported(q))
 372		qdio_siga_sync_all(q);
 373	else
 374		qdio_siga_sync_q(q);
 375}
 376
 377int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
 378			unsigned char *state)
 379{
 380	if (need_siga_sync(q))
 381		qdio_siga_sync_q(q);
 382	return get_buf_states(q, bufnr, state, 1, 0);
 383}
 384
 385static inline void qdio_stop_polling(struct qdio_q *q)
 386{
 387	if (!q->u.in.polling)
 388		return;
 389
 390	q->u.in.polling = 0;
 391	qperf_inc(q, stop_polling);
 392
 393	/* show the card that we are not polling anymore */
 394	if (is_qebsm(q)) {
 395		set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
 396			       q->u.in.ack_count);
 397		q->u.in.ack_count = 0;
 398	} else
 399		set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
 400}
 401
 402static inline void account_sbals(struct qdio_q *q, int count)
 403{
 404	int pos = 0;
 405
 406	q->q_stats.nr_sbal_total += count;
 407	if (count == QDIO_MAX_BUFFERS_MASK) {
 408		q->q_stats.nr_sbals[7]++;
 409		return;
 410	}
 411	while (count >>= 1)
 412		pos++;
 413	q->q_stats.nr_sbals[pos]++;
 414}
 415
 416static void process_buffer_error(struct qdio_q *q, int count)
 417{
 418	unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
 419					SLSB_P_OUTPUT_NOT_INIT;
 420
 421	q->qdio_error |= QDIO_ERROR_SLSB_STATE;
 422
 423	/* special handling for no target buffer empty */
 424	if ((!q->is_input_q &&
 425	    (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) {
 426		qperf_inc(q, target_full);
 427		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
 428			      q->first_to_check);
 429		return;
 430	}
 431
 432	DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
 433	DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
 434	DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
 435	DBF_ERROR("F14:%2x F15:%2x",
 436		  q->sbal[q->first_to_check]->element[14].sflags,
 437		  q->sbal[q->first_to_check]->element[15].sflags);
 438
 
 439	/*
 440	 * Interrupts may be avoided as long as the error is present
 441	 * so change the buffer state immediately to avoid starvation.
 442	 */
 443	set_buf_states(q, q->first_to_check, state, count);
 444}
 445
 446static inline void inbound_primed(struct qdio_q *q, int count)
 447{
 448	int new;
 449
 450	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count);
 451
 452	/* for QEBSM the ACK was already set by EQBS */
 453	if (is_qebsm(q)) {
 454		if (!q->u.in.polling) {
 455			q->u.in.polling = 1;
 456			q->u.in.ack_count = count;
 457			q->u.in.ack_start = q->first_to_check;
 458			return;
 459		}
 460
 461		/* delete the previous ACK's */
 462		set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
 463			       q->u.in.ack_count);
 464		q->u.in.ack_count = count;
 465		q->u.in.ack_start = q->first_to_check;
 466		return;
 467	}
 468
 469	/*
 470	 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
 471	 * or by the next inbound run.
 472	 */
 473	new = add_buf(q->first_to_check, count - 1);
 474	if (q->u.in.polling) {
 475		/* reset the previous ACK but first set the new one */
 476		set_buf_state(q, new, SLSB_P_INPUT_ACK);
 477		set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
 478	} else {
 479		q->u.in.polling = 1;
 480		set_buf_state(q, new, SLSB_P_INPUT_ACK);
 481	}
 482
 483	q->u.in.ack_start = new;
 484	count--;
 485	if (!count)
 486		return;
 487	/* need to change ALL buffers to get more interrupts */
 488	set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
 489}
 490
 491static int get_inbound_buffer_frontier(struct qdio_q *q)
 492{
 493	int count, stop;
 494	unsigned char state = 0;
 495
 
 
 496	/*
 497	 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
 498	 * would return 0.
 499	 */
 500	count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
 501	stop = add_buf(q->first_to_check, count);
 502
 503	if (q->first_to_check == stop)
 504		goto out;
 505
 506	/*
 507	 * No siga sync here, as a PCI or we after a thin interrupt
 508	 * already sync'ed the queues.
 509	 */
 510	count = get_buf_states(q, q->first_to_check, &state, count, 1);
 511	if (!count)
 512		goto out;
 513
 514	switch (state) {
 515	case SLSB_P_INPUT_PRIMED:
 516		inbound_primed(q, count);
 517		q->first_to_check = add_buf(q->first_to_check, count);
 518		if (atomic_sub(count, &q->nr_buf_used) == 0)
 519			qperf_inc(q, inbound_queue_full);
 520		if (q->irq_ptr->perf_stat_enabled)
 521			account_sbals(q, count);
 522		break;
 523	case SLSB_P_INPUT_ERROR:
 524		process_buffer_error(q, count);
 525		q->first_to_check = add_buf(q->first_to_check, count);
 526		atomic_sub(count, &q->nr_buf_used);
 527		if (q->irq_ptr->perf_stat_enabled)
 528			account_sbals_error(q, count);
 529		break;
 530	case SLSB_CU_INPUT_EMPTY:
 531	case SLSB_P_INPUT_NOT_INIT:
 532	case SLSB_P_INPUT_ACK:
 533		if (q->irq_ptr->perf_stat_enabled)
 534			q->q_stats.nr_sbal_nop++;
 535		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
 536		break;
 537	default:
 538		BUG();
 539	}
 540out:
 541	return q->first_to_check;
 542}
 543
 544static int qdio_inbound_q_moved(struct qdio_q *q)
 545{
 546	int bufnr;
 547
 548	bufnr = get_inbound_buffer_frontier(q);
 549
 550	if ((bufnr != q->last_move) || q->qdio_error) {
 551		q->last_move = bufnr;
 552		if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
 553			q->u.in.timestamp = get_clock();
 554		return 1;
 555	} else
 556		return 0;
 557}
 558
 559static inline int qdio_inbound_q_done(struct qdio_q *q)
 560{
 561	unsigned char state = 0;
 562
 563	if (!atomic_read(&q->nr_buf_used))
 564		return 1;
 565
 566	if (need_siga_sync(q))
 567		qdio_siga_sync_q(q);
 568	get_buf_state(q, q->first_to_check, &state, 0);
 569
 570	if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
 571		/* more work coming */
 572		return 0;
 573
 574	if (is_thinint_irq(q->irq_ptr))
 575		return 1;
 576
 577	/* don't poll under z/VM */
 578	if (MACHINE_IS_VM)
 579		return 1;
 580
 581	/*
 582	 * At this point we know, that inbound first_to_check
 583	 * has (probably) not moved (see qdio_inbound_processing).
 584	 */
 585	if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
 586		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
 587			      q->first_to_check);
 588		return 1;
 589	} else
 590		return 0;
 591}
 592
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 593static void qdio_kick_handler(struct qdio_q *q)
 594{
 595	int start = q->first_to_kick;
 596	int end = q->first_to_check;
 597	int count;
 598
 599	if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
 600		return;
 601
 602	count = sub_buf(end, start);
 603
 604	if (q->is_input_q) {
 605		qperf_inc(q, inbound_handler);
 606		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
 607	} else {
 608		qperf_inc(q, outbound_handler);
 609		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
 610			      start, count);
 611	}
 612
 
 
 613	q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
 614		   q->irq_ptr->int_parm);
 615
 616	/* for the next time */
 617	q->first_to_kick = end;
 618	q->qdio_error = 0;
 619}
 620
 621static void __qdio_inbound_processing(struct qdio_q *q)
 622{
 623	qperf_inc(q, tasklet_inbound);
 624
 625	if (!qdio_inbound_q_moved(q))
 626		return;
 627
 628	qdio_kick_handler(q);
 629
 630	if (!qdio_inbound_q_done(q)) {
 631		/* means poll time is not yet over */
 632		qperf_inc(q, tasklet_inbound_resched);
 633		if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
 634			tasklet_schedule(&q->tasklet);
 635			return;
 636		}
 637	}
 638
 639	qdio_stop_polling(q);
 640	/*
 641	 * We need to check again to not lose initiative after
 642	 * resetting the ACK state.
 643	 */
 644	if (!qdio_inbound_q_done(q)) {
 645		qperf_inc(q, tasklet_inbound_resched2);
 646		if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
 647			tasklet_schedule(&q->tasklet);
 648	}
 649}
 650
 651void qdio_inbound_processing(unsigned long data)
 652{
 653	struct qdio_q *q = (struct qdio_q *)data;
 654	__qdio_inbound_processing(q);
 655}
 656
 657static int get_outbound_buffer_frontier(struct qdio_q *q)
 658{
 659	int count, stop;
 660	unsigned char state = 0;
 661
 
 
 662	if (need_siga_sync(q))
 663		if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
 664		    !pci_out_supported(q)) ||
 665		    (queue_type(q) == QDIO_IQDIO_QFMT &&
 666		    multicast_outbound(q)))
 667			qdio_siga_sync_q(q);
 668
 669	/*
 670	 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
 671	 * would return 0.
 672	 */
 673	count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
 674	stop = add_buf(q->first_to_check, count);
 675
 676	if (q->first_to_check == stop)
 677		return q->first_to_check;
 678
 679	count = get_buf_states(q, q->first_to_check, &state, count, 0);
 680	if (!count)
 681		return q->first_to_check;
 682
 683	switch (state) {
 684	case SLSB_P_OUTPUT_EMPTY:
 685		/* the adapter got it */
 686		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count);
 
 687
 688		atomic_sub(count, &q->nr_buf_used);
 689		q->first_to_check = add_buf(q->first_to_check, count);
 690		if (q->irq_ptr->perf_stat_enabled)
 691			account_sbals(q, count);
 
 692		break;
 693	case SLSB_P_OUTPUT_ERROR:
 694		process_buffer_error(q, count);
 695		q->first_to_check = add_buf(q->first_to_check, count);
 696		atomic_sub(count, &q->nr_buf_used);
 697		if (q->irq_ptr->perf_stat_enabled)
 698			account_sbals_error(q, count);
 699		break;
 700	case SLSB_CU_OUTPUT_PRIMED:
 701		/* the adapter has not fetched the output yet */
 702		if (q->irq_ptr->perf_stat_enabled)
 703			q->q_stats.nr_sbal_nop++;
 704		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr);
 
 705		break;
 706	case SLSB_P_OUTPUT_NOT_INIT:
 707	case SLSB_P_OUTPUT_HALTED:
 708		break;
 709	default:
 710		BUG();
 711	}
 
 
 712	return q->first_to_check;
 713}
 714
 715/* all buffers processed? */
 716static inline int qdio_outbound_q_done(struct qdio_q *q)
 717{
 718	return atomic_read(&q->nr_buf_used) == 0;
 719}
 720
 721static inline int qdio_outbound_q_moved(struct qdio_q *q)
 722{
 723	int bufnr;
 724
 725	bufnr = get_outbound_buffer_frontier(q);
 726
 727	if ((bufnr != q->last_move) || q->qdio_error) {
 728		q->last_move = bufnr;
 729		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
 730		return 1;
 731	} else
 732		return 0;
 733}
 734
 735static int qdio_kick_outbound_q(struct qdio_q *q)
 736{
 737	int retries = 0, cc;
 738	unsigned int busy_bit;
 739
 740	if (!need_siga_out(q))
 741		return 0;
 742
 743	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
 744retry:
 745	qperf_inc(q, siga_write);
 746
 747	cc = qdio_siga_output(q, &busy_bit);
 748	switch (cc) {
 749	case 0:
 750		break;
 751	case 2:
 752		if (busy_bit) {
 753			while (++retries < QDIO_BUSY_BIT_RETRIES) {
 754				mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
 755				goto retry;
 756			}
 757			DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
 758			cc |= QDIO_ERROR_SIGA_BUSY;
 759		} else
 760			DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
 
 
 761		break;
 762	case 1:
 763	case 3:
 764		DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
 
 765		break;
 766	}
 767	if (retries) {
 768		DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
 769		DBF_ERROR("count:%u", retries);
 770	}
 771	return cc;
 772}
 773
 774static void __qdio_outbound_processing(struct qdio_q *q)
 775{
 776	qperf_inc(q, tasklet_outbound);
 777	BUG_ON(atomic_read(&q->nr_buf_used) < 0);
 778
 779	if (qdio_outbound_q_moved(q))
 780		qdio_kick_handler(q);
 781
 782	if (queue_type(q) == QDIO_ZFCP_QFMT)
 783		if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
 784			goto sched;
 785
 786	/* bail out for HiperSockets unicast queues */
 787	if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q))
 788		return;
 789
 790	if ((queue_type(q) == QDIO_IQDIO_QFMT) &&
 791	    (atomic_read(&q->nr_buf_used)) > QDIO_IQDIO_POLL_LVL)
 792		goto sched;
 793
 794	if (q->u.out.pci_out_enabled)
 795		return;
 796
 797	/*
 798	 * Now we know that queue type is either qeth without pci enabled
 799	 * or HiperSockets multicast. Make sure buffer switch from PRIMED to
 800	 * EMPTY is noticed and outbound_handler is called after some time.
 801	 */
 802	if (qdio_outbound_q_done(q))
 803		del_timer(&q->u.out.timer);
 804	else
 805		if (!timer_pending(&q->u.out.timer))
 806			mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
 807	return;
 808
 809sched:
 810	if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
 811		return;
 812	tasklet_schedule(&q->tasklet);
 813}
 814
 815/* outbound tasklet */
 816void qdio_outbound_processing(unsigned long data)
 817{
 818	struct qdio_q *q = (struct qdio_q *)data;
 819	__qdio_outbound_processing(q);
 820}
 821
 822void qdio_outbound_timer(unsigned long data)
 823{
 824	struct qdio_q *q = (struct qdio_q *)data;
 825
 826	if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
 827		return;
 828	tasklet_schedule(&q->tasklet);
 829}
 830
 831static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
 832{
 833	struct qdio_q *out;
 834	int i;
 835
 836	if (!pci_out_supported(q))
 837		return;
 838
 839	for_each_output_queue(q->irq_ptr, out, i)
 840		if (!qdio_outbound_q_done(out))
 841			tasklet_schedule(&out->tasklet);
 842}
 843
 844static void __tiqdio_inbound_processing(struct qdio_q *q)
 845{
 846	qperf_inc(q, tasklet_inbound);
 847	if (need_siga_sync(q) && need_siga_sync_after_ai(q))
 848		qdio_sync_queues(q);
 849
 850	/*
 851	 * The interrupt could be caused by a PCI request. Check the
 852	 * PCI capable outbound queues.
 853	 */
 854	qdio_check_outbound_after_thinint(q);
 855
 856	if (!qdio_inbound_q_moved(q))
 857		return;
 858
 859	qdio_kick_handler(q);
 860
 861	if (!qdio_inbound_q_done(q)) {
 862		qperf_inc(q, tasklet_inbound_resched);
 863		if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
 864			tasklet_schedule(&q->tasklet);
 865			return;
 866		}
 867	}
 868
 869	qdio_stop_polling(q);
 870	/*
 871	 * We need to check again to not lose initiative after
 872	 * resetting the ACK state.
 873	 */
 874	if (!qdio_inbound_q_done(q)) {
 875		qperf_inc(q, tasklet_inbound_resched2);
 876		if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
 877			tasklet_schedule(&q->tasklet);
 878	}
 879}
 880
 881void tiqdio_inbound_processing(unsigned long data)
 882{
 883	struct qdio_q *q = (struct qdio_q *)data;
 884	__tiqdio_inbound_processing(q);
 885}
 886
 887static inline void qdio_set_state(struct qdio_irq *irq_ptr,
 888				  enum qdio_irq_states state)
 889{
 890	DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
 891
 892	irq_ptr->state = state;
 893	mb();
 894}
 895
 896static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
 897{
 898	if (irb->esw.esw0.erw.cons) {
 899		DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
 900		DBF_ERROR_HEX(irb, 64);
 901		DBF_ERROR_HEX(irb->ecw, 64);
 902	}
 903}
 904
 905/* PCI interrupt handler */
 906static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
 907{
 908	int i;
 909	struct qdio_q *q;
 910
 911	if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
 912		return;
 913
 914	for_each_input_queue(irq_ptr, q, i) {
 915		if (q->u.in.queue_start_poll) {
 916			/* skip if polling is enabled or already in work */
 917			if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
 918				     &q->u.in.queue_irq_state)) {
 919				qperf_inc(q, int_discarded);
 920				continue;
 921			}
 922			q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
 923						 q->irq_ptr->int_parm);
 924		} else
 925			tasklet_schedule(&q->tasklet);
 
 926	}
 927
 928	if (!pci_out_supported(q))
 929		return;
 930
 931	for_each_output_queue(irq_ptr, q, i) {
 932		if (qdio_outbound_q_done(q))
 933			continue;
 934		if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
 935			qdio_siga_sync_q(q);
 936		tasklet_schedule(&q->tasklet);
 937	}
 938}
 939
 940static void qdio_handle_activate_check(struct ccw_device *cdev,
 941				unsigned long intparm, int cstat, int dstat)
 942{
 943	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
 944	struct qdio_q *q;
 
 945
 946	DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
 947	DBF_ERROR("intp :%lx", intparm);
 948	DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
 949
 950	if (irq_ptr->nr_input_qs) {
 951		q = irq_ptr->input_qs[0];
 952	} else if (irq_ptr->nr_output_qs) {
 953		q = irq_ptr->output_qs[0];
 954	} else {
 955		dump_stack();
 956		goto no_handler;
 957	}
 958	q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
 959		   0, -1, -1, irq_ptr->int_parm);
 
 
 960no_handler:
 961	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
 
 
 
 
 
 962}
 963
 964static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
 965				      int dstat)
 966{
 967	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
 968
 969	DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
 970
 971	if (cstat)
 972		goto error;
 973	if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
 974		goto error;
 975	if (!(dstat & DEV_STAT_DEV_END))
 976		goto error;
 977	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
 978	return;
 979
 980error:
 981	DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
 982	DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
 983	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
 984}
 985
 986/* qdio interrupt handler */
 987void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
 988		      struct irb *irb)
 989{
 990	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
 991	int cstat, dstat;
 992
 993	if (!intparm || !irq_ptr) {
 994		DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
 995		return;
 996	}
 997
 998	kstat_cpu(smp_processor_id()).irqs[IOINT_QDI]++;
 999	if (irq_ptr->perf_stat_enabled)
1000		irq_ptr->perf_stat.qdio_int++;
1001
1002	if (IS_ERR(irb)) {
1003		switch (PTR_ERR(irb)) {
1004		case -EIO:
1005			DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
1006			qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1007			wake_up(&cdev->private->wait_q);
1008			return;
1009		default:
1010			WARN_ON(1);
1011			return;
1012		}
1013	}
1014	qdio_irq_check_sense(irq_ptr, irb);
1015	cstat = irb->scsw.cmd.cstat;
1016	dstat = irb->scsw.cmd.dstat;
1017
1018	switch (irq_ptr->state) {
1019	case QDIO_IRQ_STATE_INACTIVE:
1020		qdio_establish_handle_irq(cdev, cstat, dstat);
1021		break;
1022	case QDIO_IRQ_STATE_CLEANUP:
1023		qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1024		break;
1025	case QDIO_IRQ_STATE_ESTABLISHED:
1026	case QDIO_IRQ_STATE_ACTIVE:
1027		if (cstat & SCHN_STAT_PCI) {
1028			qdio_int_handler_pci(irq_ptr);
1029			return;
1030		}
1031		if (cstat || dstat)
1032			qdio_handle_activate_check(cdev, intparm, cstat,
1033						   dstat);
1034		break;
1035	case QDIO_IRQ_STATE_STOPPED:
1036		break;
1037	default:
1038		WARN_ON(1);
1039	}
1040	wake_up(&cdev->private->wait_q);
1041}
1042
1043/**
1044 * qdio_get_ssqd_desc - get qdio subchannel description
1045 * @cdev: ccw device to get description for
1046 * @data: where to store the ssqd
1047 *
1048 * Returns 0 or an error code. The results of the chsc are stored in the
1049 * specified structure.
1050 */
1051int qdio_get_ssqd_desc(struct ccw_device *cdev,
1052		       struct qdio_ssqd_desc *data)
1053{
1054
1055	if (!cdev || !cdev->private)
1056		return -EINVAL;
1057
1058	DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
1059	return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
1060}
1061EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1062
1063static void qdio_shutdown_queues(struct ccw_device *cdev)
1064{
1065	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1066	struct qdio_q *q;
1067	int i;
1068
1069	for_each_input_queue(irq_ptr, q, i)
1070		tasklet_kill(&q->tasklet);
1071
1072	for_each_output_queue(irq_ptr, q, i) {
1073		del_timer(&q->u.out.timer);
1074		tasklet_kill(&q->tasklet);
1075	}
1076}
1077
1078/**
1079 * qdio_shutdown - shut down a qdio subchannel
1080 * @cdev: associated ccw device
1081 * @how: use halt or clear to shutdown
1082 */
1083int qdio_shutdown(struct ccw_device *cdev, int how)
1084{
1085	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1086	int rc;
1087	unsigned long flags;
1088
1089	if (!irq_ptr)
1090		return -ENODEV;
1091
1092	BUG_ON(irqs_disabled());
1093	DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
1094
1095	mutex_lock(&irq_ptr->setup_mutex);
1096	/*
1097	 * Subchannel was already shot down. We cannot prevent being called
1098	 * twice since cio may trigger a shutdown asynchronously.
1099	 */
1100	if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1101		mutex_unlock(&irq_ptr->setup_mutex);
1102		return 0;
1103	}
1104
1105	/*
1106	 * Indicate that the device is going down. Scheduling the queue
1107	 * tasklets is forbidden from here on.
1108	 */
1109	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1110
1111	tiqdio_remove_input_queues(irq_ptr);
1112	qdio_shutdown_queues(cdev);
1113	qdio_shutdown_debug_entries(irq_ptr, cdev);
1114
1115	/* cleanup subchannel */
1116	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1117
1118	if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1119		rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1120	else
1121		/* default behaviour is halt */
1122		rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1123	if (rc) {
1124		DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1125		DBF_ERROR("rc:%4d", rc);
1126		goto no_cleanup;
1127	}
1128
1129	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1130	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1131	wait_event_interruptible_timeout(cdev->private->wait_q,
1132		irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1133		irq_ptr->state == QDIO_IRQ_STATE_ERR,
1134		10 * HZ);
1135	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1136
1137no_cleanup:
1138	qdio_shutdown_thinint(irq_ptr);
1139
1140	/* restore interrupt handler */
1141	if ((void *)cdev->handler == (void *)qdio_int_handler)
1142		cdev->handler = irq_ptr->orig_handler;
1143	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1144
1145	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1146	mutex_unlock(&irq_ptr->setup_mutex);
1147	if (rc)
1148		return rc;
1149	return 0;
1150}
1151EXPORT_SYMBOL_GPL(qdio_shutdown);
1152
1153/**
1154 * qdio_free - free data structures for a qdio subchannel
1155 * @cdev: associated ccw device
1156 */
1157int qdio_free(struct ccw_device *cdev)
1158{
1159	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1160
1161	if (!irq_ptr)
1162		return -ENODEV;
1163
1164	DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
1165	mutex_lock(&irq_ptr->setup_mutex);
1166
1167	if (irq_ptr->debug_area != NULL) {
1168		debug_unregister(irq_ptr->debug_area);
1169		irq_ptr->debug_area = NULL;
1170	}
1171	cdev->private->qdio_data = NULL;
1172	mutex_unlock(&irq_ptr->setup_mutex);
1173
1174	qdio_release_memory(irq_ptr);
1175	return 0;
1176}
1177EXPORT_SYMBOL_GPL(qdio_free);
1178
1179/**
1180 * qdio_allocate - allocate qdio queues and associated data
1181 * @init_data: initialization data
1182 */
1183int qdio_allocate(struct qdio_initialize *init_data)
1184{
1185	struct qdio_irq *irq_ptr;
1186
1187	DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
1188
1189	if ((init_data->no_input_qs && !init_data->input_handler) ||
1190	    (init_data->no_output_qs && !init_data->output_handler))
1191		return -EINVAL;
1192
1193	if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1194	    (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1195		return -EINVAL;
1196
1197	if ((!init_data->input_sbal_addr_array) ||
1198	    (!init_data->output_sbal_addr_array))
1199		return -EINVAL;
1200
1201	/* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1202	irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1203	if (!irq_ptr)
1204		goto out_err;
1205
1206	mutex_init(&irq_ptr->setup_mutex);
1207	qdio_allocate_dbf(init_data, irq_ptr);
1208
1209	/*
1210	 * Allocate a page for the chsc calls in qdio_establish.
1211	 * Must be pre-allocated since a zfcp recovery will call
1212	 * qdio_establish. In case of low memory and swap on a zfcp disk
1213	 * we may not be able to allocate memory otherwise.
1214	 */
1215	irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1216	if (!irq_ptr->chsc_page)
1217		goto out_rel;
1218
1219	/* qdr is used in ccw1.cda which is u32 */
1220	irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1221	if (!irq_ptr->qdr)
1222		goto out_rel;
1223	WARN_ON((unsigned long)irq_ptr->qdr & 0xfff);
1224
1225	if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1226			     init_data->no_output_qs))
1227		goto out_rel;
1228
1229	init_data->cdev->private->qdio_data = irq_ptr;
1230	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1231	return 0;
1232out_rel:
1233	qdio_release_memory(irq_ptr);
1234out_err:
1235	return -ENOMEM;
1236}
1237EXPORT_SYMBOL_GPL(qdio_allocate);
1238
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1239/**
1240 * qdio_establish - establish queues on a qdio subchannel
1241 * @init_data: initialization data
1242 */
1243int qdio_establish(struct qdio_initialize *init_data)
1244{
1245	struct qdio_irq *irq_ptr;
1246	struct ccw_device *cdev = init_data->cdev;
1247	unsigned long saveflags;
1248	int rc;
1249
1250	DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
1251
1252	irq_ptr = cdev->private->qdio_data;
1253	if (!irq_ptr)
1254		return -ENODEV;
1255
1256	if (cdev->private->state != DEV_STATE_ONLINE)
1257		return -EINVAL;
1258
1259	mutex_lock(&irq_ptr->setup_mutex);
1260	qdio_setup_irq(init_data);
1261
1262	rc = qdio_establish_thinint(irq_ptr);
1263	if (rc) {
1264		mutex_unlock(&irq_ptr->setup_mutex);
1265		qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1266		return rc;
1267	}
1268
1269	/* establish q */
1270	irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1271	irq_ptr->ccw.flags = CCW_FLAG_SLI;
1272	irq_ptr->ccw.count = irq_ptr->equeue.count;
1273	irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1274
1275	spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1276	ccw_device_set_options_mask(cdev, 0);
1277
1278	rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1279	if (rc) {
1280		DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1281		DBF_ERROR("rc:%4x", rc);
1282	}
1283	spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1284
1285	if (rc) {
1286		mutex_unlock(&irq_ptr->setup_mutex);
1287		qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1288		return rc;
1289	}
1290
1291	wait_event_interruptible_timeout(cdev->private->wait_q,
1292		irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1293		irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1294
1295	if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1296		mutex_unlock(&irq_ptr->setup_mutex);
1297		qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1298		return -EIO;
1299	}
1300
1301	qdio_setup_ssqd_info(irq_ptr);
1302	DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
 
1303
1304	/* qebsm is now setup if available, initialize buffer states */
1305	qdio_init_buf_states(irq_ptr);
1306
1307	mutex_unlock(&irq_ptr->setup_mutex);
1308	qdio_print_subchannel_info(irq_ptr, cdev);
1309	qdio_setup_debug_entries(irq_ptr, cdev);
1310	return 0;
1311}
1312EXPORT_SYMBOL_GPL(qdio_establish);
1313
1314/**
1315 * qdio_activate - activate queues on a qdio subchannel
1316 * @cdev: associated cdev
1317 */
1318int qdio_activate(struct ccw_device *cdev)
1319{
1320	struct qdio_irq *irq_ptr;
1321	int rc;
1322	unsigned long saveflags;
1323
1324	DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
1325
1326	irq_ptr = cdev->private->qdio_data;
1327	if (!irq_ptr)
1328		return -ENODEV;
1329
1330	if (cdev->private->state != DEV_STATE_ONLINE)
1331		return -EINVAL;
1332
1333	mutex_lock(&irq_ptr->setup_mutex);
1334	if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1335		rc = -EBUSY;
1336		goto out;
1337	}
1338
1339	irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1340	irq_ptr->ccw.flags = CCW_FLAG_SLI;
1341	irq_ptr->ccw.count = irq_ptr->aqueue.count;
1342	irq_ptr->ccw.cda = 0;
1343
1344	spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1345	ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1346
1347	rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1348			      0, DOIO_DENY_PREFETCH);
1349	if (rc) {
1350		DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1351		DBF_ERROR("rc:%4x", rc);
1352	}
1353	spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1354
1355	if (rc)
1356		goto out;
1357
1358	if (is_thinint_irq(irq_ptr))
1359		tiqdio_add_input_queues(irq_ptr);
1360
1361	/* wait for subchannel to become active */
1362	msleep(5);
1363
1364	switch (irq_ptr->state) {
1365	case QDIO_IRQ_STATE_STOPPED:
1366	case QDIO_IRQ_STATE_ERR:
1367		rc = -EIO;
1368		break;
1369	default:
1370		qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1371		rc = 0;
1372	}
1373out:
1374	mutex_unlock(&irq_ptr->setup_mutex);
1375	return rc;
1376}
1377EXPORT_SYMBOL_GPL(qdio_activate);
1378
1379static inline int buf_in_between(int bufnr, int start, int count)
1380{
1381	int end = add_buf(start, count);
1382
1383	if (end > start) {
1384		if (bufnr >= start && bufnr < end)
1385			return 1;
1386		else
1387			return 0;
1388	}
1389
1390	/* wrap-around case */
1391	if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1392	    (bufnr < end))
1393		return 1;
1394	else
1395		return 0;
1396}
1397
1398/**
1399 * handle_inbound - reset processed input buffers
1400 * @q: queue containing the buffers
1401 * @callflags: flags
1402 * @bufnr: first buffer to process
1403 * @count: how many buffers are emptied
1404 */
1405static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1406			  int bufnr, int count)
1407{
1408	int used, diff;
1409
1410	qperf_inc(q, inbound_call);
1411
1412	if (!q->u.in.polling)
1413		goto set;
1414
1415	/* protect against stop polling setting an ACK for an emptied slsb */
1416	if (count == QDIO_MAX_BUFFERS_PER_Q) {
1417		/* overwriting everything, just delete polling status */
1418		q->u.in.polling = 0;
1419		q->u.in.ack_count = 0;
1420		goto set;
1421	} else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
1422		if (is_qebsm(q)) {
1423			/* partial overwrite, just update ack_start */
1424			diff = add_buf(bufnr, count);
1425			diff = sub_buf(diff, q->u.in.ack_start);
1426			q->u.in.ack_count -= diff;
1427			if (q->u.in.ack_count <= 0) {
1428				q->u.in.polling = 0;
1429				q->u.in.ack_count = 0;
1430				goto set;
1431			}
1432			q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
1433		}
1434		else
1435			/* the only ACK will be deleted, so stop polling */
1436			q->u.in.polling = 0;
1437	}
1438
1439set:
1440	count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1441
1442	used = atomic_add_return(count, &q->nr_buf_used) - count;
1443	BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q);
1444
1445	/* no need to signal as long as the adapter had free buffers */
1446	if (used)
1447		return 0;
1448
1449	if (need_siga_in(q))
1450		return qdio_siga_input(q);
 
1451	return 0;
1452}
1453
1454/**
1455 * handle_outbound - process filled outbound buffers
1456 * @q: queue containing the buffers
1457 * @callflags: flags
1458 * @bufnr: first buffer to process
1459 * @count: how many buffers are filled
1460 */
1461static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1462			   int bufnr, int count)
1463{
1464	unsigned char state = 0;
1465	int used, rc = 0;
1466
1467	qperf_inc(q, outbound_call);
1468
1469	count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1470	used = atomic_add_return(count, &q->nr_buf_used);
1471	BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1472
1473	if (used == QDIO_MAX_BUFFERS_PER_Q)
1474		qperf_inc(q, outbound_queue_full);
1475
1476	if (callflags & QDIO_FLAG_PCI_OUT) {
1477		q->u.out.pci_out_enabled = 1;
1478		qperf_inc(q, pci_request_int);
1479	} else
1480		q->u.out.pci_out_enabled = 0;
1481
1482	if (queue_type(q) == QDIO_IQDIO_QFMT) {
1483		/* One SIGA-W per buffer required for unicast HiperSockets. */
 
 
1484		WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
1485
1486		rc = qdio_kick_outbound_q(q);
 
 
1487	} else if (need_siga_sync(q)) {
1488		rc = qdio_siga_sync_q(q);
1489	} else {
1490		/* try to fast requeue buffers */
1491		get_buf_state(q, prev_buf(bufnr), &state, 0);
1492		if (state != SLSB_CU_OUTPUT_PRIMED)
1493			rc = qdio_kick_outbound_q(q);
1494		else
1495			qperf_inc(q, fast_requeue);
1496	}
1497
1498	/* in case of SIGA errors we must process the error immediately */
1499	if (used >= q->u.out.scan_threshold || rc)
1500		tasklet_schedule(&q->tasklet);
1501	else
1502		/* free the SBALs in case of no further traffic */
1503		if (!timer_pending(&q->u.out.timer))
1504			mod_timer(&q->u.out.timer, jiffies + HZ);
1505	return rc;
1506}
1507
1508/**
1509 * do_QDIO - process input or output buffers
1510 * @cdev: associated ccw_device for the qdio subchannel
1511 * @callflags: input or output and special flags from the program
1512 * @q_nr: queue number
1513 * @bufnr: buffer number
1514 * @count: how many buffers to process
1515 */
1516int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1517	    int q_nr, unsigned int bufnr, unsigned int count)
1518{
1519	struct qdio_irq *irq_ptr;
1520
1521	if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1522		return -EINVAL;
1523
1524	irq_ptr = cdev->private->qdio_data;
1525	if (!irq_ptr)
1526		return -ENODEV;
1527
1528	DBF_DEV_EVENT(DBF_INFO, irq_ptr,
1529		      "do%02x b:%02x c:%02x", callflags, bufnr, count);
1530
1531	if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1532		return -EBUSY;
1533	if (!count)
1534		return 0;
1535	if (callflags & QDIO_FLAG_SYNC_INPUT)
1536		return handle_inbound(irq_ptr->input_qs[q_nr],
1537				      callflags, bufnr, count);
1538	else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1539		return handle_outbound(irq_ptr->output_qs[q_nr],
1540				       callflags, bufnr, count);
1541	return -EINVAL;
1542}
1543EXPORT_SYMBOL_GPL(do_QDIO);
1544
1545/**
1546 * qdio_start_irq - process input buffers
1547 * @cdev: associated ccw_device for the qdio subchannel
1548 * @nr: input queue number
1549 *
1550 * Return codes
1551 *   0 - success
1552 *   1 - irqs not started since new data is available
1553 */
1554int qdio_start_irq(struct ccw_device *cdev, int nr)
1555{
1556	struct qdio_q *q;
1557	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1558
1559	if (!irq_ptr)
1560		return -ENODEV;
1561	q = irq_ptr->input_qs[nr];
1562
1563	WARN_ON(queue_irqs_enabled(q));
1564
1565	if (!shared_ind(q->irq_ptr->dsci))
1566		xchg(q->irq_ptr->dsci, 0);
1567
1568	qdio_stop_polling(q);
1569	clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
1570
1571	/*
1572	 * We need to check again to not lose initiative after
1573	 * resetting the ACK state.
1574	 */
1575	if (!shared_ind(q->irq_ptr->dsci) && *q->irq_ptr->dsci)
1576		goto rescan;
1577	if (!qdio_inbound_q_done(q))
1578		goto rescan;
1579	return 0;
1580
1581rescan:
1582	if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1583			     &q->u.in.queue_irq_state))
1584		return 0;
1585	else
1586		return 1;
1587
1588}
1589EXPORT_SYMBOL(qdio_start_irq);
1590
1591/**
1592 * qdio_get_next_buffers - process input buffers
1593 * @cdev: associated ccw_device for the qdio subchannel
1594 * @nr: input queue number
1595 * @bufnr: first filled buffer number
1596 * @error: buffers are in error state
1597 *
1598 * Return codes
1599 *   < 0 - error
1600 *   = 0 - no new buffers found
1601 *   > 0 - number of processed buffers
1602 */
1603int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1604			  int *error)
1605{
1606	struct qdio_q *q;
1607	int start, end;
1608	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1609
1610	if (!irq_ptr)
1611		return -ENODEV;
1612	q = irq_ptr->input_qs[nr];
1613	WARN_ON(queue_irqs_enabled(q));
1614
1615	/*
1616	 * Cannot rely on automatic sync after interrupt since queues may
1617	 * also be examined without interrupt.
1618	 */
1619	if (need_siga_sync(q))
1620		qdio_sync_queues(q);
1621
1622	/* check the PCI capable outbound queues. */
1623	qdio_check_outbound_after_thinint(q);
1624
1625	if (!qdio_inbound_q_moved(q))
1626		return 0;
1627
1628	/* Note: upper-layer MUST stop processing immediately here ... */
1629	if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
1630		return -EIO;
1631
1632	start = q->first_to_kick;
1633	end = q->first_to_check;
1634	*bufnr = start;
1635	*error = q->qdio_error;
1636
1637	/* for the next time */
1638	q->first_to_kick = end;
1639	q->qdio_error = 0;
1640	return sub_buf(end, start);
1641}
1642EXPORT_SYMBOL(qdio_get_next_buffers);
1643
1644/**
1645 * qdio_stop_irq - disable interrupt processing for the device
1646 * @cdev: associated ccw_device for the qdio subchannel
1647 * @nr: input queue number
1648 *
1649 * Return codes
1650 *   0 - interrupts were already disabled
1651 *   1 - interrupts successfully disabled
1652 */
1653int qdio_stop_irq(struct ccw_device *cdev, int nr)
1654{
1655	struct qdio_q *q;
1656	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1657
1658	if (!irq_ptr)
1659		return -ENODEV;
1660	q = irq_ptr->input_qs[nr];
1661
1662	if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1663			     &q->u.in.queue_irq_state))
1664		return 0;
1665	else
1666		return 1;
1667}
1668EXPORT_SYMBOL(qdio_stop_irq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1669
1670static int __init init_QDIO(void)
1671{
1672	int rc;
1673
1674	rc = qdio_debug_init();
1675	if (rc)
1676		return rc;
1677	rc = qdio_setup_init();
1678	if (rc)
1679		goto out_debug;
1680	rc = tiqdio_allocate_memory();
1681	if (rc)
1682		goto out_cache;
1683	rc = tiqdio_register_thinints();
1684	if (rc)
1685		goto out_ti;
1686	return 0;
1687
1688out_ti:
1689	tiqdio_free_memory();
1690out_cache:
1691	qdio_setup_exit();
1692out_debug:
1693	qdio_debug_exit();
1694	return rc;
1695}
1696
1697static void __exit exit_QDIO(void)
1698{
1699	tiqdio_unregister_thinints();
1700	tiqdio_free_memory();
1701	qdio_setup_exit();
1702	qdio_debug_exit();
1703}
1704
1705module_init(init_QDIO);
1706module_exit(exit_QDIO);
v3.15
   1/*
 
 
   2 * Linux for s390 qdio support, buffer handling, qdio API and module support.
   3 *
   4 * Copyright IBM Corp. 2000, 2008
   5 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
   6 *	      Jan Glauber <jang@linux.vnet.ibm.com>
   7 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
   8 */
   9#include <linux/module.h>
  10#include <linux/init.h>
  11#include <linux/kernel.h>
  12#include <linux/timer.h>
  13#include <linux/delay.h>
  14#include <linux/gfp.h>
  15#include <linux/io.h>
  16#include <linux/atomic.h>
  17#include <asm/debug.h>
  18#include <asm/qdio.h>
  19#include <asm/ipl.h>
  20
  21#include "cio.h"
  22#include "css.h"
  23#include "device.h"
  24#include "qdio.h"
  25#include "qdio_debug.h"
  26
  27MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
  28	"Jan Glauber <jang@linux.vnet.ibm.com>");
  29MODULE_DESCRIPTION("QDIO base support");
  30MODULE_LICENSE("GPL");
  31
  32static inline int do_siga_sync(unsigned long schid,
  33			       unsigned int out_mask, unsigned int in_mask,
  34			       unsigned int fc)
  35{
  36	register unsigned long __fc asm ("0") = fc;
  37	register unsigned long __schid asm ("1") = schid;
  38	register unsigned long out asm ("2") = out_mask;
  39	register unsigned long in asm ("3") = in_mask;
  40	int cc;
  41
  42	asm volatile(
  43		"	siga	0\n"
  44		"	ipm	%0\n"
  45		"	srl	%0,28\n"
  46		: "=d" (cc)
  47		: "d" (__fc), "d" (__schid), "d" (out), "d" (in) : "cc");
  48	return cc;
  49}
  50
  51static inline int do_siga_input(unsigned long schid, unsigned int mask,
  52				unsigned int fc)
  53{
  54	register unsigned long __fc asm ("0") = fc;
  55	register unsigned long __schid asm ("1") = schid;
  56	register unsigned long __mask asm ("2") = mask;
  57	int cc;
  58
  59	asm volatile(
  60		"	siga	0\n"
  61		"	ipm	%0\n"
  62		"	srl	%0,28\n"
  63		: "=d" (cc)
  64		: "d" (__fc), "d" (__schid), "d" (__mask) : "cc");
  65	return cc;
  66}
  67
  68/**
  69 * do_siga_output - perform SIGA-w/wt function
  70 * @schid: subchannel id or in case of QEBSM the subchannel token
  71 * @mask: which output queues to process
  72 * @bb: busy bit indicator, set only if SIGA-w/wt could not access a buffer
  73 * @fc: function code to perform
  74 *
  75 * Returns condition code.
  76 * Note: For IQDC unicast queues only the highest priority queue is processed.
  77 */
  78static inline int do_siga_output(unsigned long schid, unsigned long mask,
  79				 unsigned int *bb, unsigned int fc,
  80				 unsigned long aob)
  81{
  82	register unsigned long __fc asm("0") = fc;
  83	register unsigned long __schid asm("1") = schid;
  84	register unsigned long __mask asm("2") = mask;
  85	register unsigned long __aob asm("3") = aob;
  86	int cc;
  87
  88	asm volatile(
  89		"	siga	0\n"
  90		"	ipm	%0\n"
  91		"	srl	%0,28\n"
  92		: "=d" (cc), "+d" (__fc), "+d" (__aob)
  93		: "d" (__schid), "d" (__mask)
  94		: "cc");
  95	*bb = __fc >> 31;
 
  96	return cc;
  97}
  98
  99static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
 100{
 101	/* all done or next buffer state different */
 102	if (ccq == 0 || ccq == 32)
 103		return 0;
 104	/* no buffer processed */
 105	if (ccq == 97)
 106		return 1;
 107	/* not all buffers processed */
 108	if (ccq == 96)
 109		return 2;
 110	/* notify devices immediately */
 111	DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq);
 112	return -EIO;
 113}
 114
 115/**
 116 * qdio_do_eqbs - extract buffer states for QEBSM
 117 * @q: queue to manipulate
 118 * @state: state of the extracted buffers
 119 * @start: buffer number to start at
 120 * @count: count of buffers to examine
 121 * @auto_ack: automatically acknowledge buffers
 122 *
 123 * Returns the number of successfully extracted equal buffer states.
 124 * Stops processing if a state is different from the last buffers state.
 125 */
 126static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
 127			int start, int count, int auto_ack)
 128{
 129	int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
 130	unsigned int ccq = 0;
 
 
 
 131
 
 132	qperf_inc(q, eqbs);
 133
 134	if (!q->is_input_q)
 135		nr += q->irq_ptr->nr_input_qs;
 136again:
 137	ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count,
 138		      auto_ack);
 139	rc = qdio_check_ccq(q, ccq);
 140	if (!rc)
 141		return count - tmp_count;
 
 
 
 
 
 
 142
 143	if (rc == 1) {
 144		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq);
 145		goto again;
 146	}
 147
 148	if (rc == 2) {
 149		qperf_inc(q, eqbs_partial);
 150		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
 151			tmp_count);
 152		/*
 153		 * Retry once, if that fails bail out and process the
 154		 * extracted buffers before trying again.
 155		 */
 156		if (!retried++)
 157			goto again;
 158		else
 159			return count - tmp_count;
 160	}
 161
 162	DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
 163	DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
 164	q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE,
 165		   q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
 166	return 0;
 167}
 168
 169/**
 170 * qdio_do_sqbs - set buffer states for QEBSM
 171 * @q: queue to manipulate
 172 * @state: new state of the buffers
 173 * @start: first buffer number to change
 174 * @count: how many buffers to change
 175 *
 176 * Returns the number of successfully changed buffers.
 177 * Does retrying until the specified count of buffer states is set or an
 178 * error occurs.
 179 */
 180static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
 181			int count)
 182{
 183	unsigned int ccq = 0;
 184	int tmp_count = count, tmp_start = start;
 185	int nr = q->nr;
 186	int rc;
 187
 188	if (!count)
 189		return 0;
 
 
 190	qperf_inc(q, sqbs);
 191
 192	if (!q->is_input_q)
 193		nr += q->irq_ptr->nr_input_qs;
 194again:
 195	ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count);
 196	rc = qdio_check_ccq(q, ccq);
 197	if (!rc) {
 198		WARN_ON_ONCE(tmp_count);
 199		return count - tmp_count;
 200	}
 201
 202	if (rc == 1 || rc == 2) {
 203		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
 204		qperf_inc(q, sqbs_partial);
 205		goto again;
 206	}
 207
 208	DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
 209	DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
 210	q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE,
 211		   q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
 212	return 0;
 
 
 
 
 213}
 214
 215/* returns number of examined buffers and their common state in *state */
 216static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
 217				 unsigned char *state, unsigned int count,
 218				 int auto_ack, int merge_pending)
 219{
 220	unsigned char __state = 0;
 221	int i;
 222
 
 
 
 223	if (is_qebsm(q))
 224		return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
 225
 226	for (i = 0; i < count; i++) {
 227		if (!__state) {
 228			__state = q->slsb.val[bufnr];
 229			if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
 230				__state = SLSB_P_OUTPUT_EMPTY;
 231		} else if (merge_pending) {
 232			if ((q->slsb.val[bufnr] & __state) != __state)
 233				break;
 234		} else if (q->slsb.val[bufnr] != __state)
 235			break;
 236		bufnr = next_buf(bufnr);
 237	}
 238	*state = __state;
 239	return i;
 240}
 241
 242static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
 243				unsigned char *state, int auto_ack)
 244{
 245	return get_buf_states(q, bufnr, state, 1, auto_ack, 0);
 246}
 247
 248/* wrap-around safe setting of slsb states, returns number of changed buffers */
 249static inline int set_buf_states(struct qdio_q *q, int bufnr,
 250				 unsigned char state, int count)
 251{
 252	int i;
 253
 
 
 
 254	if (is_qebsm(q))
 255		return qdio_do_sqbs(q, state, bufnr, count);
 256
 257	for (i = 0; i < count; i++) {
 258		xchg(&q->slsb.val[bufnr], state);
 259		bufnr = next_buf(bufnr);
 260	}
 261	return count;
 262}
 263
 264static inline int set_buf_state(struct qdio_q *q, int bufnr,
 265				unsigned char state)
 266{
 267	return set_buf_states(q, bufnr, state, 1);
 268}
 269
 270/* set slsb states to initial state */
 271static void qdio_init_buf_states(struct qdio_irq *irq_ptr)
 272{
 273	struct qdio_q *q;
 274	int i;
 275
 276	for_each_input_queue(irq_ptr, q, i)
 277		set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT,
 278			       QDIO_MAX_BUFFERS_PER_Q);
 279	for_each_output_queue(irq_ptr, q, i)
 280		set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT,
 281			       QDIO_MAX_BUFFERS_PER_Q);
 282}
 283
 284static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
 285			  unsigned int input)
 286{
 287	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
 288	unsigned int fc = QDIO_SIGA_SYNC;
 289	int cc;
 290
 291	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
 292	qperf_inc(q, siga_sync);
 293
 294	if (is_qebsm(q)) {
 295		schid = q->irq_ptr->sch_token;
 296		fc |= QDIO_SIGA_QEBSM_FLAG;
 297	}
 298
 299	cc = do_siga_sync(schid, output, input, fc);
 300	if (unlikely(cc))
 301		DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
 302	return (cc) ? -EIO : 0;
 303}
 304
 305static inline int qdio_siga_sync_q(struct qdio_q *q)
 306{
 307	if (q->is_input_q)
 308		return qdio_siga_sync(q, 0, q->mask);
 309	else
 310		return qdio_siga_sync(q, q->mask, 0);
 311}
 312
 313static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
 314	unsigned long aob)
 315{
 316	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
 317	unsigned int fc = QDIO_SIGA_WRITE;
 318	u64 start_time = 0;
 319	int retries = 0, cc;
 320	unsigned long laob = 0;
 321
 322	if (q->u.out.use_cq && aob != 0) {
 323		fc = QDIO_SIGA_WRITEQ;
 324		laob = aob;
 325	}
 326
 327	if (is_qebsm(q)) {
 328		schid = q->irq_ptr->sch_token;
 329		fc |= QDIO_SIGA_QEBSM_FLAG;
 330	}
 331again:
 332	WARN_ON_ONCE((aob && queue_type(q) != QDIO_IQDIO_QFMT) ||
 333		(aob && fc != QDIO_SIGA_WRITEQ));
 334	cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
 335
 336	/* hipersocket busy condition */
 337	if (unlikely(*busy_bit)) {
 
 338		retries++;
 339
 340		if (!start_time) {
 341			start_time = get_tod_clock_fast();
 342			goto again;
 343		}
 344		if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE)
 345			goto again;
 346	}
 347	if (retries) {
 348		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr,
 349			      "%4x cc2 BB1:%1d", SCH_NO(q), q->nr);
 350		DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries);
 351	}
 352	return cc;
 353}
 354
 355static inline int qdio_siga_input(struct qdio_q *q)
 356{
 357	unsigned long schid = *((u32 *) &q->irq_ptr->schid);
 358	unsigned int fc = QDIO_SIGA_READ;
 359	int cc;
 360
 361	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
 362	qperf_inc(q, siga_read);
 363
 364	if (is_qebsm(q)) {
 365		schid = q->irq_ptr->sch_token;
 366		fc |= QDIO_SIGA_QEBSM_FLAG;
 367	}
 368
 369	cc = do_siga_input(schid, q->mask, fc);
 370	if (unlikely(cc))
 371		DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
 372	return (cc) ? -EIO : 0;
 373}
 374
 375#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
 376#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
 377
 378static inline void qdio_sync_queues(struct qdio_q *q)
 379{
 380	/* PCI capable outbound queues will also be scanned so sync them too */
 381	if (pci_out_supported(q))
 382		qdio_siga_sync_all(q);
 383	else
 384		qdio_siga_sync_q(q);
 385}
 386
 387int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
 388			unsigned char *state)
 389{
 390	if (need_siga_sync(q))
 391		qdio_siga_sync_q(q);
 392	return get_buf_states(q, bufnr, state, 1, 0, 0);
 393}
 394
 395static inline void qdio_stop_polling(struct qdio_q *q)
 396{
 397	if (!q->u.in.polling)
 398		return;
 399
 400	q->u.in.polling = 0;
 401	qperf_inc(q, stop_polling);
 402
 403	/* show the card that we are not polling anymore */
 404	if (is_qebsm(q)) {
 405		set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
 406			       q->u.in.ack_count);
 407		q->u.in.ack_count = 0;
 408	} else
 409		set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
 410}
 411
 412static inline void account_sbals(struct qdio_q *q, int count)
 413{
 414	int pos = 0;
 415
 416	q->q_stats.nr_sbal_total += count;
 417	if (count == QDIO_MAX_BUFFERS_MASK) {
 418		q->q_stats.nr_sbals[7]++;
 419		return;
 420	}
 421	while (count >>= 1)
 422		pos++;
 423	q->q_stats.nr_sbals[pos]++;
 424}
 425
 426static void process_buffer_error(struct qdio_q *q, int count)
 427{
 428	unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
 429					SLSB_P_OUTPUT_NOT_INIT;
 430
 431	q->qdio_error = QDIO_ERROR_SLSB_STATE;
 432
 433	/* special handling for no target buffer empty */
 434	if ((!q->is_input_q &&
 435	    (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) {
 436		qperf_inc(q, target_full);
 437		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
 438			      q->first_to_check);
 439		goto set;
 440	}
 441
 442	DBF_ERROR("%4x BUF ERROR", SCH_NO(q));
 443	DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
 444	DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
 445	DBF_ERROR("F14:%2x F15:%2x",
 446		  q->sbal[q->first_to_check]->element[14].sflags,
 447		  q->sbal[q->first_to_check]->element[15].sflags);
 448
 449set:
 450	/*
 451	 * Interrupts may be avoided as long as the error is present
 452	 * so change the buffer state immediately to avoid starvation.
 453	 */
 454	set_buf_states(q, q->first_to_check, state, count);
 455}
 456
 457static inline void inbound_primed(struct qdio_q *q, int count)
 458{
 459	int new;
 460
 461	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count);
 462
 463	/* for QEBSM the ACK was already set by EQBS */
 464	if (is_qebsm(q)) {
 465		if (!q->u.in.polling) {
 466			q->u.in.polling = 1;
 467			q->u.in.ack_count = count;
 468			q->u.in.ack_start = q->first_to_check;
 469			return;
 470		}
 471
 472		/* delete the previous ACK's */
 473		set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
 474			       q->u.in.ack_count);
 475		q->u.in.ack_count = count;
 476		q->u.in.ack_start = q->first_to_check;
 477		return;
 478	}
 479
 480	/*
 481	 * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
 482	 * or by the next inbound run.
 483	 */
 484	new = add_buf(q->first_to_check, count - 1);
 485	if (q->u.in.polling) {
 486		/* reset the previous ACK but first set the new one */
 487		set_buf_state(q, new, SLSB_P_INPUT_ACK);
 488		set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
 489	} else {
 490		q->u.in.polling = 1;
 491		set_buf_state(q, new, SLSB_P_INPUT_ACK);
 492	}
 493
 494	q->u.in.ack_start = new;
 495	count--;
 496	if (!count)
 497		return;
 498	/* need to change ALL buffers to get more interrupts */
 499	set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count);
 500}
 501
 502static int get_inbound_buffer_frontier(struct qdio_q *q)
 503{
 504	int count, stop;
 505	unsigned char state = 0;
 506
 507	q->timestamp = get_tod_clock_fast();
 508
 509	/*
 510	 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
 511	 * would return 0.
 512	 */
 513	count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
 514	stop = add_buf(q->first_to_check, count);
 515
 516	if (q->first_to_check == stop)
 517		goto out;
 518
 519	/*
 520	 * No siga sync here, as a PCI or we after a thin interrupt
 521	 * already sync'ed the queues.
 522	 */
 523	count = get_buf_states(q, q->first_to_check, &state, count, 1, 0);
 524	if (!count)
 525		goto out;
 526
 527	switch (state) {
 528	case SLSB_P_INPUT_PRIMED:
 529		inbound_primed(q, count);
 530		q->first_to_check = add_buf(q->first_to_check, count);
 531		if (atomic_sub_return(count, &q->nr_buf_used) == 0)
 532			qperf_inc(q, inbound_queue_full);
 533		if (q->irq_ptr->perf_stat_enabled)
 534			account_sbals(q, count);
 535		break;
 536	case SLSB_P_INPUT_ERROR:
 537		process_buffer_error(q, count);
 538		q->first_to_check = add_buf(q->first_to_check, count);
 539		atomic_sub(count, &q->nr_buf_used);
 540		if (q->irq_ptr->perf_stat_enabled)
 541			account_sbals_error(q, count);
 542		break;
 543	case SLSB_CU_INPUT_EMPTY:
 544	case SLSB_P_INPUT_NOT_INIT:
 545	case SLSB_P_INPUT_ACK:
 546		if (q->irq_ptr->perf_stat_enabled)
 547			q->q_stats.nr_sbal_nop++;
 548		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
 549		break;
 550	default:
 551		WARN_ON_ONCE(1);
 552	}
 553out:
 554	return q->first_to_check;
 555}
 556
 557static int qdio_inbound_q_moved(struct qdio_q *q)
 558{
 559	int bufnr;
 560
 561	bufnr = get_inbound_buffer_frontier(q);
 562
 563	if (bufnr != q->last_move) {
 564		q->last_move = bufnr;
 565		if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
 566			q->u.in.timestamp = get_tod_clock();
 567		return 1;
 568	} else
 569		return 0;
 570}
 571
 572static inline int qdio_inbound_q_done(struct qdio_q *q)
 573{
 574	unsigned char state = 0;
 575
 576	if (!atomic_read(&q->nr_buf_used))
 577		return 1;
 578
 579	if (need_siga_sync(q))
 580		qdio_siga_sync_q(q);
 581	get_buf_state(q, q->first_to_check, &state, 0);
 582
 583	if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
 584		/* more work coming */
 585		return 0;
 586
 587	if (is_thinint_irq(q->irq_ptr))
 588		return 1;
 589
 590	/* don't poll under z/VM */
 591	if (MACHINE_IS_VM)
 592		return 1;
 593
 594	/*
 595	 * At this point we know, that inbound first_to_check
 596	 * has (probably) not moved (see qdio_inbound_processing).
 597	 */
 598	if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
 599		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
 600			      q->first_to_check);
 601		return 1;
 602	} else
 603		return 0;
 604}
 605
 606static inline int contains_aobs(struct qdio_q *q)
 607{
 608	return !q->is_input_q && q->u.out.use_cq;
 609}
 610
 611static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count)
 612{
 613	unsigned char state = 0;
 614	int j, b = start;
 615
 616	if (!contains_aobs(q))
 617		return;
 618
 619	for (j = 0; j < count; ++j) {
 620		get_buf_state(q, b, &state, 0);
 621		if (state == SLSB_P_OUTPUT_PENDING) {
 622			struct qaob *aob = q->u.out.aobs[b];
 623			if (aob == NULL)
 624				continue;
 625
 626			q->u.out.sbal_state[b].flags |=
 627				QDIO_OUTBUF_STATE_FLAG_PENDING;
 628			q->u.out.aobs[b] = NULL;
 629		} else if (state == SLSB_P_OUTPUT_EMPTY) {
 630			q->u.out.sbal_state[b].aob = NULL;
 631		}
 632		b = next_buf(b);
 633	}
 634}
 635
 636static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
 637					int bufnr)
 638{
 639	unsigned long phys_aob = 0;
 640
 641	if (!q->use_cq)
 642		goto out;
 643
 644	if (!q->aobs[bufnr]) {
 645		struct qaob *aob = qdio_allocate_aob();
 646		q->aobs[bufnr] = aob;
 647	}
 648	if (q->aobs[bufnr]) {
 649		q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
 650		q->sbal_state[bufnr].aob = q->aobs[bufnr];
 651		q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
 652		phys_aob = virt_to_phys(q->aobs[bufnr]);
 653		WARN_ON_ONCE(phys_aob & 0xFF);
 654	}
 655
 656out:
 657	return phys_aob;
 658}
 659
 660static void qdio_kick_handler(struct qdio_q *q)
 661{
 662	int start = q->first_to_kick;
 663	int end = q->first_to_check;
 664	int count;
 665
 666	if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
 667		return;
 668
 669	count = sub_buf(end, start);
 670
 671	if (q->is_input_q) {
 672		qperf_inc(q, inbound_handler);
 673		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
 674	} else {
 675		qperf_inc(q, outbound_handler);
 676		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
 677			      start, count);
 678	}
 679
 680	qdio_handle_aobs(q, start, count);
 681
 682	q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
 683		   q->irq_ptr->int_parm);
 684
 685	/* for the next time */
 686	q->first_to_kick = end;
 687	q->qdio_error = 0;
 688}
 689
 690static void __qdio_inbound_processing(struct qdio_q *q)
 691{
 692	qperf_inc(q, tasklet_inbound);
 693
 694	if (!qdio_inbound_q_moved(q))
 695		return;
 696
 697	qdio_kick_handler(q);
 698
 699	if (!qdio_inbound_q_done(q)) {
 700		/* means poll time is not yet over */
 701		qperf_inc(q, tasklet_inbound_resched);
 702		if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
 703			tasklet_schedule(&q->tasklet);
 704			return;
 705		}
 706	}
 707
 708	qdio_stop_polling(q);
 709	/*
 710	 * We need to check again to not lose initiative after
 711	 * resetting the ACK state.
 712	 */
 713	if (!qdio_inbound_q_done(q)) {
 714		qperf_inc(q, tasklet_inbound_resched2);
 715		if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
 716			tasklet_schedule(&q->tasklet);
 717	}
 718}
 719
 720void qdio_inbound_processing(unsigned long data)
 721{
 722	struct qdio_q *q = (struct qdio_q *)data;
 723	__qdio_inbound_processing(q);
 724}
 725
 726static int get_outbound_buffer_frontier(struct qdio_q *q)
 727{
 728	int count, stop;
 729	unsigned char state = 0;
 730
 731	q->timestamp = get_tod_clock_fast();
 732
 733	if (need_siga_sync(q))
 734		if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
 735		    !pci_out_supported(q)) ||
 736		    (queue_type(q) == QDIO_IQDIO_QFMT &&
 737		    multicast_outbound(q)))
 738			qdio_siga_sync_q(q);
 739
 740	/*
 741	 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
 742	 * would return 0.
 743	 */
 744	count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
 745	stop = add_buf(q->first_to_check, count);
 
 746	if (q->first_to_check == stop)
 747		goto out;
 748
 749	count = get_buf_states(q, q->first_to_check, &state, count, 0, 1);
 750	if (!count)
 751		goto out;
 752
 753	switch (state) {
 754	case SLSB_P_OUTPUT_EMPTY:
 755		/* the adapter got it */
 756		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr,
 757			"out empty:%1d %02x", q->nr, count);
 758
 759		atomic_sub(count, &q->nr_buf_used);
 760		q->first_to_check = add_buf(q->first_to_check, count);
 761		if (q->irq_ptr->perf_stat_enabled)
 762			account_sbals(q, count);
 763
 764		break;
 765	case SLSB_P_OUTPUT_ERROR:
 766		process_buffer_error(q, count);
 767		q->first_to_check = add_buf(q->first_to_check, count);
 768		atomic_sub(count, &q->nr_buf_used);
 769		if (q->irq_ptr->perf_stat_enabled)
 770			account_sbals_error(q, count);
 771		break;
 772	case SLSB_CU_OUTPUT_PRIMED:
 773		/* the adapter has not fetched the output yet */
 774		if (q->irq_ptr->perf_stat_enabled)
 775			q->q_stats.nr_sbal_nop++;
 776		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
 777			      q->nr);
 778		break;
 779	case SLSB_P_OUTPUT_NOT_INIT:
 780	case SLSB_P_OUTPUT_HALTED:
 781		break;
 782	default:
 783		WARN_ON_ONCE(1);
 784	}
 785
 786out:
 787	return q->first_to_check;
 788}
 789
 790/* all buffers processed? */
 791static inline int qdio_outbound_q_done(struct qdio_q *q)
 792{
 793	return atomic_read(&q->nr_buf_used) == 0;
 794}
 795
 796static inline int qdio_outbound_q_moved(struct qdio_q *q)
 797{
 798	int bufnr;
 799
 800	bufnr = get_outbound_buffer_frontier(q);
 801
 802	if (bufnr != q->last_move) {
 803		q->last_move = bufnr;
 804		DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
 805		return 1;
 806	} else
 807		return 0;
 808}
 809
 810static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
 811{
 812	int retries = 0, cc;
 813	unsigned int busy_bit;
 814
 815	if (!need_siga_out(q))
 816		return 0;
 817
 818	DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
 819retry:
 820	qperf_inc(q, siga_write);
 821
 822	cc = qdio_siga_output(q, &busy_bit, aob);
 823	switch (cc) {
 824	case 0:
 825		break;
 826	case 2:
 827		if (busy_bit) {
 828			while (++retries < QDIO_BUSY_BIT_RETRIES) {
 829				mdelay(QDIO_BUSY_BIT_RETRY_DELAY);
 830				goto retry;
 831			}
 832			DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr);
 833			cc = -EBUSY;
 834		} else {
 835			DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr);
 836			cc = -ENOBUFS;
 837		}
 838		break;
 839	case 1:
 840	case 3:
 841		DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc);
 842		cc = -EIO;
 843		break;
 844	}
 845	if (retries) {
 846		DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr);
 847		DBF_ERROR("count:%u", retries);
 848	}
 849	return cc;
 850}
 851
 852static void __qdio_outbound_processing(struct qdio_q *q)
 853{
 854	qperf_inc(q, tasklet_outbound);
 855	WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0);
 856
 857	if (qdio_outbound_q_moved(q))
 858		qdio_kick_handler(q);
 859
 860	if (queue_type(q) == QDIO_ZFCP_QFMT)
 861		if (!pci_out_supported(q) && !qdio_outbound_q_done(q))
 862			goto sched;
 863
 
 
 
 
 
 
 
 
 864	if (q->u.out.pci_out_enabled)
 865		return;
 866
 867	/*
 868	 * Now we know that queue type is either qeth without pci enabled
 869	 * or HiperSockets. Make sure buffer switch from PRIMED to EMPTY
 870	 * is noticed and outbound_handler is called after some time.
 871	 */
 872	if (qdio_outbound_q_done(q))
 873		del_timer(&q->u.out.timer);
 874	else
 875		if (!timer_pending(&q->u.out.timer))
 876			mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
 877	return;
 878
 879sched:
 880	if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
 881		return;
 882	tasklet_schedule(&q->tasklet);
 883}
 884
 885/* outbound tasklet */
 886void qdio_outbound_processing(unsigned long data)
 887{
 888	struct qdio_q *q = (struct qdio_q *)data;
 889	__qdio_outbound_processing(q);
 890}
 891
 892void qdio_outbound_timer(unsigned long data)
 893{
 894	struct qdio_q *q = (struct qdio_q *)data;
 895
 896	if (unlikely(q->irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
 897		return;
 898	tasklet_schedule(&q->tasklet);
 899}
 900
 901static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
 902{
 903	struct qdio_q *out;
 904	int i;
 905
 906	if (!pci_out_supported(q))
 907		return;
 908
 909	for_each_output_queue(q->irq_ptr, out, i)
 910		if (!qdio_outbound_q_done(out))
 911			tasklet_schedule(&out->tasklet);
 912}
 913
 914static void __tiqdio_inbound_processing(struct qdio_q *q)
 915{
 916	qperf_inc(q, tasklet_inbound);
 917	if (need_siga_sync(q) && need_siga_sync_after_ai(q))
 918		qdio_sync_queues(q);
 919
 920	/*
 921	 * The interrupt could be caused by a PCI request. Check the
 922	 * PCI capable outbound queues.
 923	 */
 924	qdio_check_outbound_after_thinint(q);
 925
 926	if (!qdio_inbound_q_moved(q))
 927		return;
 928
 929	qdio_kick_handler(q);
 930
 931	if (!qdio_inbound_q_done(q)) {
 932		qperf_inc(q, tasklet_inbound_resched);
 933		if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
 934			tasklet_schedule(&q->tasklet);
 935			return;
 936		}
 937	}
 938
 939	qdio_stop_polling(q);
 940	/*
 941	 * We need to check again to not lose initiative after
 942	 * resetting the ACK state.
 943	 */
 944	if (!qdio_inbound_q_done(q)) {
 945		qperf_inc(q, tasklet_inbound_resched2);
 946		if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
 947			tasklet_schedule(&q->tasklet);
 948	}
 949}
 950
 951void tiqdio_inbound_processing(unsigned long data)
 952{
 953	struct qdio_q *q = (struct qdio_q *)data;
 954	__tiqdio_inbound_processing(q);
 955}
 956
 957static inline void qdio_set_state(struct qdio_irq *irq_ptr,
 958				  enum qdio_irq_states state)
 959{
 960	DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state);
 961
 962	irq_ptr->state = state;
 963	mb();
 964}
 965
 966static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb)
 967{
 968	if (irb->esw.esw0.erw.cons) {
 969		DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no);
 970		DBF_ERROR_HEX(irb, 64);
 971		DBF_ERROR_HEX(irb->ecw, 64);
 972	}
 973}
 974
 975/* PCI interrupt handler */
 976static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
 977{
 978	int i;
 979	struct qdio_q *q;
 980
 981	if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
 982		return;
 983
 984	for_each_input_queue(irq_ptr, q, i) {
 985		if (q->u.in.queue_start_poll) {
 986			/* skip if polling is enabled or already in work */
 987			if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
 988				     &q->u.in.queue_irq_state)) {
 989				qperf_inc(q, int_discarded);
 990				continue;
 991			}
 992			q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
 993						 q->irq_ptr->int_parm);
 994		} else {
 995			tasklet_schedule(&q->tasklet);
 996		}
 997	}
 998
 999	if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
1000		return;
1001
1002	for_each_output_queue(irq_ptr, q, i) {
1003		if (qdio_outbound_q_done(q))
1004			continue;
1005		if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
1006			qdio_siga_sync_q(q);
1007		tasklet_schedule(&q->tasklet);
1008	}
1009}
1010
1011static void qdio_handle_activate_check(struct ccw_device *cdev,
1012				unsigned long intparm, int cstat, int dstat)
1013{
1014	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1015	struct qdio_q *q;
1016	int count;
1017
1018	DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
1019	DBF_ERROR("intp :%lx", intparm);
1020	DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
1021
1022	if (irq_ptr->nr_input_qs) {
1023		q = irq_ptr->input_qs[0];
1024	} else if (irq_ptr->nr_output_qs) {
1025		q = irq_ptr->output_qs[0];
1026	} else {
1027		dump_stack();
1028		goto no_handler;
1029	}
1030
1031	count = sub_buf(q->first_to_check, q->first_to_kick);
1032	q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE,
1033		   q->nr, q->first_to_kick, count, irq_ptr->int_parm);
1034no_handler:
1035	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1036	/*
1037	 * In case of z/VM LGR (Live Guest Migration) QDIO recovery will happen.
1038	 * Therefore we call the LGR detection function here.
1039	 */
1040	lgr_info_log();
1041}
1042
1043static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
1044				      int dstat)
1045{
1046	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1047
1048	DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
1049
1050	if (cstat)
1051		goto error;
1052	if (dstat & ~(DEV_STAT_DEV_END | DEV_STAT_CHN_END))
1053		goto error;
1054	if (!(dstat & DEV_STAT_DEV_END))
1055		goto error;
1056	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED);
1057	return;
1058
1059error:
1060	DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no);
1061	DBF_ERROR("ds: %2x cs:%2x", dstat, cstat);
1062	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1063}
1064
1065/* qdio interrupt handler */
1066void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
1067		      struct irb *irb)
1068{
1069	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1070	int cstat, dstat;
1071
1072	if (!intparm || !irq_ptr) {
1073		DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
1074		return;
1075	}
1076
 
1077	if (irq_ptr->perf_stat_enabled)
1078		irq_ptr->perf_stat.qdio_int++;
1079
1080	if (IS_ERR(irb)) {
1081		DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no);
1082		qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
1083		wake_up(&cdev->private->wait_q);
1084		return;
 
 
 
 
 
 
1085	}
1086	qdio_irq_check_sense(irq_ptr, irb);
1087	cstat = irb->scsw.cmd.cstat;
1088	dstat = irb->scsw.cmd.dstat;
1089
1090	switch (irq_ptr->state) {
1091	case QDIO_IRQ_STATE_INACTIVE:
1092		qdio_establish_handle_irq(cdev, cstat, dstat);
1093		break;
1094	case QDIO_IRQ_STATE_CLEANUP:
1095		qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1096		break;
1097	case QDIO_IRQ_STATE_ESTABLISHED:
1098	case QDIO_IRQ_STATE_ACTIVE:
1099		if (cstat & SCHN_STAT_PCI) {
1100			qdio_int_handler_pci(irq_ptr);
1101			return;
1102		}
1103		if (cstat || dstat)
1104			qdio_handle_activate_check(cdev, intparm, cstat,
1105						   dstat);
1106		break;
1107	case QDIO_IRQ_STATE_STOPPED:
1108		break;
1109	default:
1110		WARN_ON_ONCE(1);
1111	}
1112	wake_up(&cdev->private->wait_q);
1113}
1114
1115/**
1116 * qdio_get_ssqd_desc - get qdio subchannel description
1117 * @cdev: ccw device to get description for
1118 * @data: where to store the ssqd
1119 *
1120 * Returns 0 or an error code. The results of the chsc are stored in the
1121 * specified structure.
1122 */
1123int qdio_get_ssqd_desc(struct ccw_device *cdev,
1124		       struct qdio_ssqd_desc *data)
1125{
1126
1127	if (!cdev || !cdev->private)
1128		return -EINVAL;
1129
1130	DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no);
1131	return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data);
1132}
1133EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1134
1135static void qdio_shutdown_queues(struct ccw_device *cdev)
1136{
1137	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1138	struct qdio_q *q;
1139	int i;
1140
1141	for_each_input_queue(irq_ptr, q, i)
1142		tasklet_kill(&q->tasklet);
1143
1144	for_each_output_queue(irq_ptr, q, i) {
1145		del_timer(&q->u.out.timer);
1146		tasklet_kill(&q->tasklet);
1147	}
1148}
1149
1150/**
1151 * qdio_shutdown - shut down a qdio subchannel
1152 * @cdev: associated ccw device
1153 * @how: use halt or clear to shutdown
1154 */
1155int qdio_shutdown(struct ccw_device *cdev, int how)
1156{
1157	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1158	int rc;
1159	unsigned long flags;
1160
1161	if (!irq_ptr)
1162		return -ENODEV;
1163
1164	WARN_ON_ONCE(irqs_disabled());
1165	DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no);
1166
1167	mutex_lock(&irq_ptr->setup_mutex);
1168	/*
1169	 * Subchannel was already shot down. We cannot prevent being called
1170	 * twice since cio may trigger a shutdown asynchronously.
1171	 */
1172	if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1173		mutex_unlock(&irq_ptr->setup_mutex);
1174		return 0;
1175	}
1176
1177	/*
1178	 * Indicate that the device is going down. Scheduling the queue
1179	 * tasklets is forbidden from here on.
1180	 */
1181	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
1182
1183	tiqdio_remove_input_queues(irq_ptr);
1184	qdio_shutdown_queues(cdev);
1185	qdio_shutdown_debug_entries(irq_ptr);
1186
1187	/* cleanup subchannel */
1188	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1189
1190	if (how & QDIO_FLAG_CLEANUP_USING_CLEAR)
1191		rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
1192	else
1193		/* default behaviour is halt */
1194		rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
1195	if (rc) {
1196		DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no);
1197		DBF_ERROR("rc:%4d", rc);
1198		goto no_cleanup;
1199	}
1200
1201	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
1202	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1203	wait_event_interruptible_timeout(cdev->private->wait_q,
1204		irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
1205		irq_ptr->state == QDIO_IRQ_STATE_ERR,
1206		10 * HZ);
1207	spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
1208
1209no_cleanup:
1210	qdio_shutdown_thinint(irq_ptr);
1211
1212	/* restore interrupt handler */
1213	if ((void *)cdev->handler == (void *)qdio_int_handler)
1214		cdev->handler = irq_ptr->orig_handler;
1215	spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
1216
1217	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1218	mutex_unlock(&irq_ptr->setup_mutex);
1219	if (rc)
1220		return rc;
1221	return 0;
1222}
1223EXPORT_SYMBOL_GPL(qdio_shutdown);
1224
1225/**
1226 * qdio_free - free data structures for a qdio subchannel
1227 * @cdev: associated ccw device
1228 */
1229int qdio_free(struct ccw_device *cdev)
1230{
1231	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1232
1233	if (!irq_ptr)
1234		return -ENODEV;
1235
1236	DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
1237	mutex_lock(&irq_ptr->setup_mutex);
1238
1239	if (irq_ptr->debug_area != NULL) {
1240		debug_unregister(irq_ptr->debug_area);
1241		irq_ptr->debug_area = NULL;
1242	}
1243	cdev->private->qdio_data = NULL;
1244	mutex_unlock(&irq_ptr->setup_mutex);
1245
1246	qdio_release_memory(irq_ptr);
1247	return 0;
1248}
1249EXPORT_SYMBOL_GPL(qdio_free);
1250
1251/**
1252 * qdio_allocate - allocate qdio queues and associated data
1253 * @init_data: initialization data
1254 */
1255int qdio_allocate(struct qdio_initialize *init_data)
1256{
1257	struct qdio_irq *irq_ptr;
1258
1259	DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no);
1260
1261	if ((init_data->no_input_qs && !init_data->input_handler) ||
1262	    (init_data->no_output_qs && !init_data->output_handler))
1263		return -EINVAL;
1264
1265	if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
1266	    (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
1267		return -EINVAL;
1268
1269	if ((!init_data->input_sbal_addr_array) ||
1270	    (!init_data->output_sbal_addr_array))
1271		return -EINVAL;
1272
1273	/* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
1274	irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1275	if (!irq_ptr)
1276		goto out_err;
1277
1278	mutex_init(&irq_ptr->setup_mutex);
1279	qdio_allocate_dbf(init_data, irq_ptr);
1280
1281	/*
1282	 * Allocate a page for the chsc calls in qdio_establish.
1283	 * Must be pre-allocated since a zfcp recovery will call
1284	 * qdio_establish. In case of low memory and swap on a zfcp disk
1285	 * we may not be able to allocate memory otherwise.
1286	 */
1287	irq_ptr->chsc_page = get_zeroed_page(GFP_KERNEL);
1288	if (!irq_ptr->chsc_page)
1289		goto out_rel;
1290
1291	/* qdr is used in ccw1.cda which is u32 */
1292	irq_ptr->qdr = (struct qdr *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1293	if (!irq_ptr->qdr)
1294		goto out_rel;
 
1295
1296	if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
1297			     init_data->no_output_qs))
1298		goto out_rel;
1299
1300	init_data->cdev->private->qdio_data = irq_ptr;
1301	qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
1302	return 0;
1303out_rel:
1304	qdio_release_memory(irq_ptr);
1305out_err:
1306	return -ENOMEM;
1307}
1308EXPORT_SYMBOL_GPL(qdio_allocate);
1309
1310static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
1311{
1312	struct qdio_q *q = irq_ptr->input_qs[0];
1313	int i, use_cq = 0;
1314
1315	if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT)
1316		use_cq = 1;
1317
1318	for_each_output_queue(irq_ptr, q, i) {
1319		if (use_cq) {
1320			if (qdio_enable_async_operation(&q->u.out) < 0) {
1321				use_cq = 0;
1322				continue;
1323			}
1324		} else
1325			qdio_disable_async_operation(&q->u.out);
1326	}
1327	DBF_EVENT("use_cq:%d", use_cq);
1328}
1329
1330/**
1331 * qdio_establish - establish queues on a qdio subchannel
1332 * @init_data: initialization data
1333 */
1334int qdio_establish(struct qdio_initialize *init_data)
1335{
1336	struct qdio_irq *irq_ptr;
1337	struct ccw_device *cdev = init_data->cdev;
1338	unsigned long saveflags;
1339	int rc;
1340
1341	DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no);
1342
1343	irq_ptr = cdev->private->qdio_data;
1344	if (!irq_ptr)
1345		return -ENODEV;
1346
1347	if (cdev->private->state != DEV_STATE_ONLINE)
1348		return -EINVAL;
1349
1350	mutex_lock(&irq_ptr->setup_mutex);
1351	qdio_setup_irq(init_data);
1352
1353	rc = qdio_establish_thinint(irq_ptr);
1354	if (rc) {
1355		mutex_unlock(&irq_ptr->setup_mutex);
1356		qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1357		return rc;
1358	}
1359
1360	/* establish q */
1361	irq_ptr->ccw.cmd_code = irq_ptr->equeue.cmd;
1362	irq_ptr->ccw.flags = CCW_FLAG_SLI;
1363	irq_ptr->ccw.count = irq_ptr->equeue.count;
1364	irq_ptr->ccw.cda = (u32)((addr_t)irq_ptr->qdr);
1365
1366	spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1367	ccw_device_set_options_mask(cdev, 0);
1368
1369	rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0);
1370	if (rc) {
1371		DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no);
1372		DBF_ERROR("rc:%4x", rc);
1373	}
1374	spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1375
1376	if (rc) {
1377		mutex_unlock(&irq_ptr->setup_mutex);
1378		qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1379		return rc;
1380	}
1381
1382	wait_event_interruptible_timeout(cdev->private->wait_q,
1383		irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
1384		irq_ptr->state == QDIO_IRQ_STATE_ERR, HZ);
1385
1386	if (irq_ptr->state != QDIO_IRQ_STATE_ESTABLISHED) {
1387		mutex_unlock(&irq_ptr->setup_mutex);
1388		qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
1389		return -EIO;
1390	}
1391
1392	qdio_setup_ssqd_info(irq_ptr);
1393
1394	qdio_detect_hsicq(irq_ptr);
1395
1396	/* qebsm is now setup if available, initialize buffer states */
1397	qdio_init_buf_states(irq_ptr);
1398
1399	mutex_unlock(&irq_ptr->setup_mutex);
1400	qdio_print_subchannel_info(irq_ptr, cdev);
1401	qdio_setup_debug_entries(irq_ptr, cdev);
1402	return 0;
1403}
1404EXPORT_SYMBOL_GPL(qdio_establish);
1405
1406/**
1407 * qdio_activate - activate queues on a qdio subchannel
1408 * @cdev: associated cdev
1409 */
1410int qdio_activate(struct ccw_device *cdev)
1411{
1412	struct qdio_irq *irq_ptr;
1413	int rc;
1414	unsigned long saveflags;
1415
1416	DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no);
1417
1418	irq_ptr = cdev->private->qdio_data;
1419	if (!irq_ptr)
1420		return -ENODEV;
1421
1422	if (cdev->private->state != DEV_STATE_ONLINE)
1423		return -EINVAL;
1424
1425	mutex_lock(&irq_ptr->setup_mutex);
1426	if (irq_ptr->state == QDIO_IRQ_STATE_INACTIVE) {
1427		rc = -EBUSY;
1428		goto out;
1429	}
1430
1431	irq_ptr->ccw.cmd_code = irq_ptr->aqueue.cmd;
1432	irq_ptr->ccw.flags = CCW_FLAG_SLI;
1433	irq_ptr->ccw.count = irq_ptr->aqueue.count;
1434	irq_ptr->ccw.cda = 0;
1435
1436	spin_lock_irqsave(get_ccwdev_lock(cdev), saveflags);
1437	ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
1438
1439	rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE,
1440			      0, DOIO_DENY_PREFETCH);
1441	if (rc) {
1442		DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no);
1443		DBF_ERROR("rc:%4x", rc);
1444	}
1445	spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags);
1446
1447	if (rc)
1448		goto out;
1449
1450	if (is_thinint_irq(irq_ptr))
1451		tiqdio_add_input_queues(irq_ptr);
1452
1453	/* wait for subchannel to become active */
1454	msleep(5);
1455
1456	switch (irq_ptr->state) {
1457	case QDIO_IRQ_STATE_STOPPED:
1458	case QDIO_IRQ_STATE_ERR:
1459		rc = -EIO;
1460		break;
1461	default:
1462		qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
1463		rc = 0;
1464	}
1465out:
1466	mutex_unlock(&irq_ptr->setup_mutex);
1467	return rc;
1468}
1469EXPORT_SYMBOL_GPL(qdio_activate);
1470
1471static inline int buf_in_between(int bufnr, int start, int count)
1472{
1473	int end = add_buf(start, count);
1474
1475	if (end > start) {
1476		if (bufnr >= start && bufnr < end)
1477			return 1;
1478		else
1479			return 0;
1480	}
1481
1482	/* wrap-around case */
1483	if ((bufnr >= start && bufnr <= QDIO_MAX_BUFFERS_PER_Q) ||
1484	    (bufnr < end))
1485		return 1;
1486	else
1487		return 0;
1488}
1489
1490/**
1491 * handle_inbound - reset processed input buffers
1492 * @q: queue containing the buffers
1493 * @callflags: flags
1494 * @bufnr: first buffer to process
1495 * @count: how many buffers are emptied
1496 */
1497static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1498			  int bufnr, int count)
1499{
1500	int diff;
1501
1502	qperf_inc(q, inbound_call);
1503
1504	if (!q->u.in.polling)
1505		goto set;
1506
1507	/* protect against stop polling setting an ACK for an emptied slsb */
1508	if (count == QDIO_MAX_BUFFERS_PER_Q) {
1509		/* overwriting everything, just delete polling status */
1510		q->u.in.polling = 0;
1511		q->u.in.ack_count = 0;
1512		goto set;
1513	} else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
1514		if (is_qebsm(q)) {
1515			/* partial overwrite, just update ack_start */
1516			diff = add_buf(bufnr, count);
1517			diff = sub_buf(diff, q->u.in.ack_start);
1518			q->u.in.ack_count -= diff;
1519			if (q->u.in.ack_count <= 0) {
1520				q->u.in.polling = 0;
1521				q->u.in.ack_count = 0;
1522				goto set;
1523			}
1524			q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
1525		}
1526		else
1527			/* the only ACK will be deleted, so stop polling */
1528			q->u.in.polling = 0;
1529	}
1530
1531set:
1532	count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
1533	atomic_add(count, &q->nr_buf_used);
 
 
 
 
 
 
1534
1535	if (need_siga_in(q))
1536		return qdio_siga_input(q);
1537
1538	return 0;
1539}
1540
1541/**
1542 * handle_outbound - process filled outbound buffers
1543 * @q: queue containing the buffers
1544 * @callflags: flags
1545 * @bufnr: first buffer to process
1546 * @count: how many buffers are filled
1547 */
1548static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1549			   int bufnr, int count)
1550{
1551	unsigned char state = 0;
1552	int used, rc = 0;
1553
1554	qperf_inc(q, outbound_call);
1555
1556	count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1557	used = atomic_add_return(count, &q->nr_buf_used);
 
1558
1559	if (used == QDIO_MAX_BUFFERS_PER_Q)
1560		qperf_inc(q, outbound_queue_full);
1561
1562	if (callflags & QDIO_FLAG_PCI_OUT) {
1563		q->u.out.pci_out_enabled = 1;
1564		qperf_inc(q, pci_request_int);
1565	} else
1566		q->u.out.pci_out_enabled = 0;
1567
1568	if (queue_type(q) == QDIO_IQDIO_QFMT) {
1569		unsigned long phys_aob = 0;
1570
1571		/* One SIGA-W per buffer required for unicast HSI */
1572		WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
1573
1574		phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
1575
1576		rc = qdio_kick_outbound_q(q, phys_aob);
1577	} else if (need_siga_sync(q)) {
1578		rc = qdio_siga_sync_q(q);
1579	} else {
1580		/* try to fast requeue buffers */
1581		get_buf_state(q, prev_buf(bufnr), &state, 0);
1582		if (state != SLSB_CU_OUTPUT_PRIMED)
1583			rc = qdio_kick_outbound_q(q, 0);
1584		else
1585			qperf_inc(q, fast_requeue);
1586	}
1587
1588	/* in case of SIGA errors we must process the error immediately */
1589	if (used >= q->u.out.scan_threshold || rc)
1590		tasklet_schedule(&q->tasklet);
1591	else
1592		/* free the SBALs in case of no further traffic */
1593		if (!timer_pending(&q->u.out.timer))
1594			mod_timer(&q->u.out.timer, jiffies + HZ);
1595	return rc;
1596}
1597
1598/**
1599 * do_QDIO - process input or output buffers
1600 * @cdev: associated ccw_device for the qdio subchannel
1601 * @callflags: input or output and special flags from the program
1602 * @q_nr: queue number
1603 * @bufnr: buffer number
1604 * @count: how many buffers to process
1605 */
1606int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1607	    int q_nr, unsigned int bufnr, unsigned int count)
1608{
1609	struct qdio_irq *irq_ptr;
1610
1611	if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
1612		return -EINVAL;
1613
1614	irq_ptr = cdev->private->qdio_data;
1615	if (!irq_ptr)
1616		return -ENODEV;
1617
1618	DBF_DEV_EVENT(DBF_INFO, irq_ptr,
1619		      "do%02x b:%02x c:%02x", callflags, bufnr, count);
1620
1621	if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1622		return -EIO;
1623	if (!count)
1624		return 0;
1625	if (callflags & QDIO_FLAG_SYNC_INPUT)
1626		return handle_inbound(irq_ptr->input_qs[q_nr],
1627				      callflags, bufnr, count);
1628	else if (callflags & QDIO_FLAG_SYNC_OUTPUT)
1629		return handle_outbound(irq_ptr->output_qs[q_nr],
1630				       callflags, bufnr, count);
1631	return -EINVAL;
1632}
1633EXPORT_SYMBOL_GPL(do_QDIO);
1634
1635/**
1636 * qdio_start_irq - process input buffers
1637 * @cdev: associated ccw_device for the qdio subchannel
1638 * @nr: input queue number
1639 *
1640 * Return codes
1641 *   0 - success
1642 *   1 - irqs not started since new data is available
1643 */
1644int qdio_start_irq(struct ccw_device *cdev, int nr)
1645{
1646	struct qdio_q *q;
1647	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1648
1649	if (!irq_ptr)
1650		return -ENODEV;
1651	q = irq_ptr->input_qs[nr];
1652
1653	clear_nonshared_ind(irq_ptr);
 
 
 
 
1654	qdio_stop_polling(q);
1655	clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
1656
1657	/*
1658	 * We need to check again to not lose initiative after
1659	 * resetting the ACK state.
1660	 */
1661	if (test_nonshared_ind(irq_ptr))
1662		goto rescan;
1663	if (!qdio_inbound_q_done(q))
1664		goto rescan;
1665	return 0;
1666
1667rescan:
1668	if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1669			     &q->u.in.queue_irq_state))
1670		return 0;
1671	else
1672		return 1;
1673
1674}
1675EXPORT_SYMBOL(qdio_start_irq);
1676
1677/**
1678 * qdio_get_next_buffers - process input buffers
1679 * @cdev: associated ccw_device for the qdio subchannel
1680 * @nr: input queue number
1681 * @bufnr: first filled buffer number
1682 * @error: buffers are in error state
1683 *
1684 * Return codes
1685 *   < 0 - error
1686 *   = 0 - no new buffers found
1687 *   > 0 - number of processed buffers
1688 */
1689int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1690			  int *error)
1691{
1692	struct qdio_q *q;
1693	int start, end;
1694	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1695
1696	if (!irq_ptr)
1697		return -ENODEV;
1698	q = irq_ptr->input_qs[nr];
 
1699
1700	/*
1701	 * Cannot rely on automatic sync after interrupt since queues may
1702	 * also be examined without interrupt.
1703	 */
1704	if (need_siga_sync(q))
1705		qdio_sync_queues(q);
1706
1707	/* check the PCI capable outbound queues. */
1708	qdio_check_outbound_after_thinint(q);
1709
1710	if (!qdio_inbound_q_moved(q))
1711		return 0;
1712
1713	/* Note: upper-layer MUST stop processing immediately here ... */
1714	if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
1715		return -EIO;
1716
1717	start = q->first_to_kick;
1718	end = q->first_to_check;
1719	*bufnr = start;
1720	*error = q->qdio_error;
1721
1722	/* for the next time */
1723	q->first_to_kick = end;
1724	q->qdio_error = 0;
1725	return sub_buf(end, start);
1726}
1727EXPORT_SYMBOL(qdio_get_next_buffers);
1728
1729/**
1730 * qdio_stop_irq - disable interrupt processing for the device
1731 * @cdev: associated ccw_device for the qdio subchannel
1732 * @nr: input queue number
1733 *
1734 * Return codes
1735 *   0 - interrupts were already disabled
1736 *   1 - interrupts successfully disabled
1737 */
1738int qdio_stop_irq(struct ccw_device *cdev, int nr)
1739{
1740	struct qdio_q *q;
1741	struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1742
1743	if (!irq_ptr)
1744		return -ENODEV;
1745	q = irq_ptr->input_qs[nr];
1746
1747	if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1748			     &q->u.in.queue_irq_state))
1749		return 0;
1750	else
1751		return 1;
1752}
1753EXPORT_SYMBOL(qdio_stop_irq);
1754
1755/**
1756 * qdio_pnso_brinfo() - perform network subchannel op #0 - bridge info.
1757 * @schid:		Subchannel ID.
1758 * @cnc:		Boolean Change-Notification Control
1759 * @response:		Response code will be stored at this address
1760 * @cb: 		Callback function will be executed for each element
1761 *			of the address list
1762 * @priv:		Pointer passed from the caller to qdio_pnso_brinfo()
1763 * @type:		Type of the address entry passed to the callback
1764 * @entry:		Entry containg the address of the specified type
1765 * @priv:		Pointer to pass to the callback function.
1766 *
1767 * Performs "Store-network-bridging-information list" operation and calls
1768 * the callback function for every entry in the list. If "change-
1769 * notification-control" is set, further changes in the address list
1770 * will be reported via the IPA command.
1771 */
1772int qdio_pnso_brinfo(struct subchannel_id schid,
1773		int cnc, u16 *response,
1774		void (*cb)(void *priv, enum qdio_brinfo_entry_type type,
1775				void *entry),
1776		void *priv)
1777{
1778	struct chsc_pnso_area *rr;
1779	int rc;
1780	u32 prev_instance = 0;
1781	int isfirstblock = 1;
1782	int i, size, elems;
1783
1784	rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
1785	if (rr == NULL)
1786		return -ENOMEM;
1787	do {
1788		/* on the first iteration, naihdr.resume_token will be zero */
1789		rc = chsc_pnso_brinfo(schid, rr, rr->naihdr.resume_token, cnc);
1790		if (rc != 0 && rc != -EBUSY)
1791			goto out;
1792		if (rr->response.code != 1) {
1793			rc = -EIO;
1794			continue;
1795		} else
1796			rc = 0;
1797
1798		if (cb == NULL)
1799			continue;
1800
1801		size = rr->naihdr.naids;
1802		elems = (rr->response.length -
1803				sizeof(struct chsc_header) -
1804				sizeof(struct chsc_brinfo_naihdr)) /
1805				size;
1806
1807		if (!isfirstblock && (rr->naihdr.instance != prev_instance)) {
1808			/* Inform the caller that they need to scrap */
1809			/* the data that was already reported via cb */
1810				rc = -EAGAIN;
1811				break;
1812		}
1813		isfirstblock = 0;
1814		prev_instance = rr->naihdr.instance;
1815		for (i = 0; i < elems; i++)
1816			switch (size) {
1817			case sizeof(struct qdio_brinfo_entry_l3_ipv6):
1818				(*cb)(priv, l3_ipv6_addr,
1819						&rr->entries.l3_ipv6[i]);
1820				break;
1821			case sizeof(struct qdio_brinfo_entry_l3_ipv4):
1822				(*cb)(priv, l3_ipv4_addr,
1823						&rr->entries.l3_ipv4[i]);
1824				break;
1825			case sizeof(struct qdio_brinfo_entry_l2):
1826				(*cb)(priv, l2_addr_lnid,
1827						&rr->entries.l2[i]);
1828				break;
1829			default:
1830				WARN_ON_ONCE(1);
1831				rc = -EIO;
1832				goto out;
1833			}
1834	} while (rr->response.code == 0x0107 ||  /* channel busy */
1835		  (rr->response.code == 1 && /* list stored */
1836		   /* resume token is non-zero => list incomplete */
1837		   (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2)));
1838	(*response) = rr->response.code;
1839
1840out:
1841	free_page((unsigned long)rr);
1842	return rc;
1843}
1844EXPORT_SYMBOL_GPL(qdio_pnso_brinfo);
1845
1846static int __init init_QDIO(void)
1847{
1848	int rc;
1849
1850	rc = qdio_debug_init();
1851	if (rc)
1852		return rc;
1853	rc = qdio_setup_init();
1854	if (rc)
1855		goto out_debug;
1856	rc = tiqdio_allocate_memory();
1857	if (rc)
1858		goto out_cache;
1859	rc = tiqdio_register_thinints();
1860	if (rc)
1861		goto out_ti;
1862	return 0;
1863
1864out_ti:
1865	tiqdio_free_memory();
1866out_cache:
1867	qdio_setup_exit();
1868out_debug:
1869	qdio_debug_exit();
1870	return rc;
1871}
1872
1873static void __exit exit_QDIO(void)
1874{
1875	tiqdio_unregister_thinints();
1876	tiqdio_free_memory();
1877	qdio_setup_exit();
1878	qdio_debug_exit();
1879}
1880
1881module_init(init_QDIO);
1882module_exit(exit_QDIO);