Loading...
1/*
2 * Copyright IBM Corp. 2001, 2007
3 * Authors: Fritz Elfert (felfert@millenux.com)
4 * Peter Tiedemann (ptiedem@de.ibm.com)
5 * MPC additions :
6 * Belinda Thompson (belindat@us.ibm.com)
7 * Andy Richter (richtera@us.ibm.com)
8 */
9
10#undef DEBUG
11#undef DEBUGDATA
12#undef DEBUGCCW
13
14#define KMSG_COMPONENT "ctcm"
15#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/kernel.h>
20#include <linux/slab.h>
21#include <linux/errno.h>
22#include <linux/types.h>
23#include <linux/interrupt.h>
24#include <linux/timer.h>
25#include <linux/bitops.h>
26
27#include <linux/signal.h>
28#include <linux/string.h>
29
30#include <linux/ip.h>
31#include <linux/if_arp.h>
32#include <linux/tcp.h>
33#include <linux/skbuff.h>
34#include <linux/ctype.h>
35#include <net/dst.h>
36
37#include <linux/io.h>
38#include <asm/ccwdev.h>
39#include <asm/ccwgroup.h>
40#include <linux/uaccess.h>
41
42#include <asm/idals.h>
43
44#include "fsm.h"
45
46#include "ctcm_dbug.h"
47#include "ctcm_main.h"
48#include "ctcm_fsms.h"
49
50const char *dev_state_names[] = {
51 [DEV_STATE_STOPPED] = "Stopped",
52 [DEV_STATE_STARTWAIT_RXTX] = "StartWait RXTX",
53 [DEV_STATE_STARTWAIT_RX] = "StartWait RX",
54 [DEV_STATE_STARTWAIT_TX] = "StartWait TX",
55 [DEV_STATE_STOPWAIT_RXTX] = "StopWait RXTX",
56 [DEV_STATE_STOPWAIT_RX] = "StopWait RX",
57 [DEV_STATE_STOPWAIT_TX] = "StopWait TX",
58 [DEV_STATE_RUNNING] = "Running",
59};
60
61const char *dev_event_names[] = {
62 [DEV_EVENT_START] = "Start",
63 [DEV_EVENT_STOP] = "Stop",
64 [DEV_EVENT_RXUP] = "RX up",
65 [DEV_EVENT_TXUP] = "TX up",
66 [DEV_EVENT_RXDOWN] = "RX down",
67 [DEV_EVENT_TXDOWN] = "TX down",
68 [DEV_EVENT_RESTART] = "Restart",
69};
70
71const char *ctc_ch_event_names[] = {
72 [CTC_EVENT_IO_SUCCESS] = "ccw_device success",
73 [CTC_EVENT_IO_EBUSY] = "ccw_device busy",
74 [CTC_EVENT_IO_ENODEV] = "ccw_device enodev",
75 [CTC_EVENT_IO_UNKNOWN] = "ccw_device unknown",
76 [CTC_EVENT_ATTNBUSY] = "Status ATTN & BUSY",
77 [CTC_EVENT_ATTN] = "Status ATTN",
78 [CTC_EVENT_BUSY] = "Status BUSY",
79 [CTC_EVENT_UC_RCRESET] = "Unit check remote reset",
80 [CTC_EVENT_UC_RSRESET] = "Unit check remote system reset",
81 [CTC_EVENT_UC_TXTIMEOUT] = "Unit check TX timeout",
82 [CTC_EVENT_UC_TXPARITY] = "Unit check TX parity",
83 [CTC_EVENT_UC_HWFAIL] = "Unit check Hardware failure",
84 [CTC_EVENT_UC_RXPARITY] = "Unit check RX parity",
85 [CTC_EVENT_UC_ZERO] = "Unit check ZERO",
86 [CTC_EVENT_UC_UNKNOWN] = "Unit check Unknown",
87 [CTC_EVENT_SC_UNKNOWN] = "SubChannel check Unknown",
88 [CTC_EVENT_MC_FAIL] = "Machine check failure",
89 [CTC_EVENT_MC_GOOD] = "Machine check operational",
90 [CTC_EVENT_IRQ] = "IRQ normal",
91 [CTC_EVENT_FINSTAT] = "IRQ final",
92 [CTC_EVENT_TIMER] = "Timer",
93 [CTC_EVENT_START] = "Start",
94 [CTC_EVENT_STOP] = "Stop",
95 /*
96 * additional MPC events
97 */
98 [CTC_EVENT_SEND_XID] = "XID Exchange",
99 [CTC_EVENT_RSWEEP_TIMER] = "MPC Group Sweep Timer",
100};
101
102const char *ctc_ch_state_names[] = {
103 [CTC_STATE_IDLE] = "Idle",
104 [CTC_STATE_STOPPED] = "Stopped",
105 [CTC_STATE_STARTWAIT] = "StartWait",
106 [CTC_STATE_STARTRETRY] = "StartRetry",
107 [CTC_STATE_SETUPWAIT] = "SetupWait",
108 [CTC_STATE_RXINIT] = "RX init",
109 [CTC_STATE_TXINIT] = "TX init",
110 [CTC_STATE_RX] = "RX",
111 [CTC_STATE_TX] = "TX",
112 [CTC_STATE_RXIDLE] = "RX idle",
113 [CTC_STATE_TXIDLE] = "TX idle",
114 [CTC_STATE_RXERR] = "RX error",
115 [CTC_STATE_TXERR] = "TX error",
116 [CTC_STATE_TERM] = "Terminating",
117 [CTC_STATE_DTERM] = "Restarting",
118 [CTC_STATE_NOTOP] = "Not operational",
119 /*
120 * additional MPC states
121 */
122 [CH_XID0_PENDING] = "Pending XID0 Start",
123 [CH_XID0_INPROGRESS] = "In XID0 Negotiations ",
124 [CH_XID7_PENDING] = "Pending XID7 P1 Start",
125 [CH_XID7_PENDING1] = "Active XID7 P1 Exchange ",
126 [CH_XID7_PENDING2] = "Pending XID7 P2 Start ",
127 [CH_XID7_PENDING3] = "Active XID7 P2 Exchange ",
128 [CH_XID7_PENDING4] = "XID7 Complete - Pending READY ",
129};
130
131static void ctcm_action_nop(fsm_instance *fi, int event, void *arg);
132
133/*
134 * ----- static ctcm actions for channel statemachine -----
135 *
136*/
137static void chx_txdone(fsm_instance *fi, int event, void *arg);
138static void chx_rx(fsm_instance *fi, int event, void *arg);
139static void chx_rxidle(fsm_instance *fi, int event, void *arg);
140static void chx_firstio(fsm_instance *fi, int event, void *arg);
141static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
142static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
143static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
144static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
145static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
146static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
147static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
148static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
149static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
150static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
151static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
152static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
153static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
154static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
155
156/*
157 * ----- static ctcmpc actions for ctcmpc channel statemachine -----
158 *
159*/
160static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg);
161static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg);
162static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg);
163/* shared :
164static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
165static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
166static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
167static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
168static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
169static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
170static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
171static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
172static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
173static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
174static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
175static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
176static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
177static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
178*/
179static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg);
180static void ctcmpc_chx_attnbusy(fsm_instance *, int, void *);
181static void ctcmpc_chx_resend(fsm_instance *, int, void *);
182static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg);
183
184/**
185 * Check return code of a preceding ccw_device call, halt_IO etc...
186 *
187 * ch : The channel, the error belongs to.
188 * Returns the error code (!= 0) to inspect.
189 */
190void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg)
191{
192 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
193 "%s(%s): %s: %04x\n",
194 CTCM_FUNTAIL, ch->id, msg, rc);
195 switch (rc) {
196 case -EBUSY:
197 pr_info("%s: The communication peer is busy\n",
198 ch->id);
199 fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch);
200 break;
201 case -ENODEV:
202 pr_err("%s: The specified target device is not valid\n",
203 ch->id);
204 fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch);
205 break;
206 default:
207 pr_err("An I/O operation resulted in error %04x\n",
208 rc);
209 fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch);
210 }
211}
212
213void ctcm_purge_skb_queue(struct sk_buff_head *q)
214{
215 struct sk_buff *skb;
216
217 CTCM_DBF_TEXT(TRACE, CTC_DBF_DEBUG, __func__);
218
219 while ((skb = skb_dequeue(q))) {
220 atomic_dec(&skb->users);
221 dev_kfree_skb_any(skb);
222 }
223}
224
225/**
226 * NOP action for statemachines
227 */
228static void ctcm_action_nop(fsm_instance *fi, int event, void *arg)
229{
230}
231
232/*
233 * Actions for channel - statemachines.
234 */
235
236/**
237 * Normal data has been send. Free the corresponding
238 * skb (it's in io_queue), reset dev->tbusy and
239 * revert to idle state.
240 *
241 * fi An instance of a channel statemachine.
242 * event The event, just happened.
243 * arg Generic pointer, casted from channel * upon call.
244 */
245static void chx_txdone(fsm_instance *fi, int event, void *arg)
246{
247 struct channel *ch = arg;
248 struct net_device *dev = ch->netdev;
249 struct ctcm_priv *priv = dev->ml_priv;
250 struct sk_buff *skb;
251 int first = 1;
252 int i;
253 unsigned long duration;
254 struct timespec done_stamp = current_kernel_time(); /* xtime */
255
256 CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
257
258 duration =
259 (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
260 (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
261 if (duration > ch->prof.tx_time)
262 ch->prof.tx_time = duration;
263
264 if (ch->irb->scsw.cmd.count != 0)
265 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
266 "%s(%s): TX not complete, remaining %d bytes",
267 CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count);
268 fsm_deltimer(&ch->timer);
269 while ((skb = skb_dequeue(&ch->io_queue))) {
270 priv->stats.tx_packets++;
271 priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
272 if (first) {
273 priv->stats.tx_bytes += 2;
274 first = 0;
275 }
276 atomic_dec(&skb->users);
277 dev_kfree_skb_irq(skb);
278 }
279 spin_lock(&ch->collect_lock);
280 clear_normalized_cda(&ch->ccw[4]);
281 if (ch->collect_len > 0) {
282 int rc;
283
284 if (ctcm_checkalloc_buffer(ch)) {
285 spin_unlock(&ch->collect_lock);
286 return;
287 }
288 ch->trans_skb->data = ch->trans_skb_data;
289 skb_reset_tail_pointer(ch->trans_skb);
290 ch->trans_skb->len = 0;
291 if (ch->prof.maxmulti < (ch->collect_len + 2))
292 ch->prof.maxmulti = ch->collect_len + 2;
293 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
294 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
295 *((__u16 *)skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
296 i = 0;
297 while ((skb = skb_dequeue(&ch->collect_queue))) {
298 skb_copy_from_linear_data(skb,
299 skb_put(ch->trans_skb, skb->len), skb->len);
300 priv->stats.tx_packets++;
301 priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
302 atomic_dec(&skb->users);
303 dev_kfree_skb_irq(skb);
304 i++;
305 }
306 ch->collect_len = 0;
307 spin_unlock(&ch->collect_lock);
308 ch->ccw[1].count = ch->trans_skb->len;
309 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
310 ch->prof.send_stamp = current_kernel_time(); /* xtime */
311 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
312 (unsigned long)ch, 0xff, 0);
313 ch->prof.doios_multi++;
314 if (rc != 0) {
315 priv->stats.tx_dropped += i;
316 priv->stats.tx_errors += i;
317 fsm_deltimer(&ch->timer);
318 ctcm_ccw_check_rc(ch, rc, "chained TX");
319 }
320 } else {
321 spin_unlock(&ch->collect_lock);
322 fsm_newstate(fi, CTC_STATE_TXIDLE);
323 }
324 ctcm_clear_busy_do(dev);
325}
326
327/**
328 * Initial data is sent.
329 * Notify device statemachine that we are up and
330 * running.
331 *
332 * fi An instance of a channel statemachine.
333 * event The event, just happened.
334 * arg Generic pointer, casted from channel * upon call.
335 */
336void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg)
337{
338 struct channel *ch = arg;
339 struct net_device *dev = ch->netdev;
340 struct ctcm_priv *priv = dev->ml_priv;
341
342 CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
343
344 fsm_deltimer(&ch->timer);
345 fsm_newstate(fi, CTC_STATE_TXIDLE);
346 fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev);
347}
348
349/**
350 * Got normal data, check for sanity, queue it up, allocate new buffer
351 * trigger bottom half, and initiate next read.
352 *
353 * fi An instance of a channel statemachine.
354 * event The event, just happened.
355 * arg Generic pointer, casted from channel * upon call.
356 */
357static void chx_rx(fsm_instance *fi, int event, void *arg)
358{
359 struct channel *ch = arg;
360 struct net_device *dev = ch->netdev;
361 struct ctcm_priv *priv = dev->ml_priv;
362 int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
363 struct sk_buff *skb = ch->trans_skb;
364 __u16 block_len = *((__u16 *)skb->data);
365 int check_len;
366 int rc;
367
368 fsm_deltimer(&ch->timer);
369 if (len < 8) {
370 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
371 "%s(%s): got packet with length %d < 8\n",
372 CTCM_FUNTAIL, dev->name, len);
373 priv->stats.rx_dropped++;
374 priv->stats.rx_length_errors++;
375 goto again;
376 }
377 if (len > ch->max_bufsize) {
378 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
379 "%s(%s): got packet with length %d > %d\n",
380 CTCM_FUNTAIL, dev->name, len, ch->max_bufsize);
381 priv->stats.rx_dropped++;
382 priv->stats.rx_length_errors++;
383 goto again;
384 }
385
386 /*
387 * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
388 */
389 switch (ch->protocol) {
390 case CTCM_PROTO_S390:
391 case CTCM_PROTO_OS390:
392 check_len = block_len + 2;
393 break;
394 default:
395 check_len = block_len;
396 break;
397 }
398 if ((len < block_len) || (len > check_len)) {
399 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
400 "%s(%s): got block length %d != rx length %d\n",
401 CTCM_FUNTAIL, dev->name, block_len, len);
402 if (do_debug)
403 ctcmpc_dump_skb(skb, 0);
404
405 *((__u16 *)skb->data) = len;
406 priv->stats.rx_dropped++;
407 priv->stats.rx_length_errors++;
408 goto again;
409 }
410 if (block_len > 2) {
411 *((__u16 *)skb->data) = block_len - 2;
412 ctcm_unpack_skb(ch, skb);
413 }
414 again:
415 skb->data = ch->trans_skb_data;
416 skb_reset_tail_pointer(skb);
417 skb->len = 0;
418 if (ctcm_checkalloc_buffer(ch))
419 return;
420 ch->ccw[1].count = ch->max_bufsize;
421 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
422 (unsigned long)ch, 0xff, 0);
423 if (rc != 0)
424 ctcm_ccw_check_rc(ch, rc, "normal RX");
425}
426
427/**
428 * Initialize connection by sending a __u16 of value 0.
429 *
430 * fi An instance of a channel statemachine.
431 * event The event, just happened.
432 * arg Generic pointer, casted from channel * upon call.
433 */
434static void chx_firstio(fsm_instance *fi, int event, void *arg)
435{
436 int rc;
437 struct channel *ch = arg;
438 int fsmstate = fsm_getstate(fi);
439
440 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
441 "%s(%s) : %02x",
442 CTCM_FUNTAIL, ch->id, fsmstate);
443
444 ch->sense_rc = 0; /* reset unit check report control */
445 if (fsmstate == CTC_STATE_TXIDLE)
446 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
447 "%s(%s): remote side issued READ?, init.\n",
448 CTCM_FUNTAIL, ch->id);
449 fsm_deltimer(&ch->timer);
450 if (ctcm_checkalloc_buffer(ch))
451 return;
452 if ((fsmstate == CTC_STATE_SETUPWAIT) &&
453 (ch->protocol == CTCM_PROTO_OS390)) {
454 /* OS/390 resp. z/OS */
455 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
456 *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
457 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC,
458 CTC_EVENT_TIMER, ch);
459 chx_rxidle(fi, event, arg);
460 } else {
461 struct net_device *dev = ch->netdev;
462 struct ctcm_priv *priv = dev->ml_priv;
463 fsm_newstate(fi, CTC_STATE_TXIDLE);
464 fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
465 }
466 return;
467 }
468 /*
469 * Don't setup a timer for receiving the initial RX frame
470 * if in compatibility mode, since VM TCP delays the initial
471 * frame until it has some data to send.
472 */
473 if ((CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) ||
474 (ch->protocol != CTCM_PROTO_S390))
475 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
476
477 *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
478 ch->ccw[1].count = 2; /* Transfer only length */
479
480 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
481 ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
482 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
483 (unsigned long)ch, 0xff, 0);
484 if (rc != 0) {
485 fsm_deltimer(&ch->timer);
486 fsm_newstate(fi, CTC_STATE_SETUPWAIT);
487 ctcm_ccw_check_rc(ch, rc, "init IO");
488 }
489 /*
490 * If in compatibility mode since we don't setup a timer, we
491 * also signal RX channel up immediately. This enables us
492 * to send packets early which in turn usually triggers some
493 * reply from VM TCP which brings up the RX channel to it's
494 * final state.
495 */
496 if ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) &&
497 (ch->protocol == CTCM_PROTO_S390)) {
498 struct net_device *dev = ch->netdev;
499 struct ctcm_priv *priv = dev->ml_priv;
500 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
501 }
502}
503
504/**
505 * Got initial data, check it. If OK,
506 * notify device statemachine that we are up and
507 * running.
508 *
509 * fi An instance of a channel statemachine.
510 * event The event, just happened.
511 * arg Generic pointer, casted from channel * upon call.
512 */
513static void chx_rxidle(fsm_instance *fi, int event, void *arg)
514{
515 struct channel *ch = arg;
516 struct net_device *dev = ch->netdev;
517 struct ctcm_priv *priv = dev->ml_priv;
518 __u16 buflen;
519 int rc;
520
521 fsm_deltimer(&ch->timer);
522 buflen = *((__u16 *)ch->trans_skb->data);
523 CTCM_PR_DEBUG("%s: %s: Initial RX count = %d\n",
524 __func__, dev->name, buflen);
525
526 if (buflen >= CTCM_INITIAL_BLOCKLEN) {
527 if (ctcm_checkalloc_buffer(ch))
528 return;
529 ch->ccw[1].count = ch->max_bufsize;
530 fsm_newstate(fi, CTC_STATE_RXIDLE);
531 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
532 (unsigned long)ch, 0xff, 0);
533 if (rc != 0) {
534 fsm_newstate(fi, CTC_STATE_RXINIT);
535 ctcm_ccw_check_rc(ch, rc, "initial RX");
536 } else
537 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
538 } else {
539 CTCM_PR_DEBUG("%s: %s: Initial RX count %d not %d\n",
540 __func__, dev->name,
541 buflen, CTCM_INITIAL_BLOCKLEN);
542 chx_firstio(fi, event, arg);
543 }
544}
545
546/**
547 * Set channel into extended mode.
548 *
549 * fi An instance of a channel statemachine.
550 * event The event, just happened.
551 * arg Generic pointer, casted from channel * upon call.
552 */
553static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg)
554{
555 struct channel *ch = arg;
556 int rc;
557 unsigned long saveflags = 0;
558 int timeout = CTCM_TIME_5_SEC;
559
560 fsm_deltimer(&ch->timer);
561 if (IS_MPC(ch)) {
562 timeout = 1500;
563 CTCM_PR_DEBUG("enter %s: cp=%i ch=0x%p id=%s\n",
564 __func__, smp_processor_id(), ch, ch->id);
565 }
566 fsm_addtimer(&ch->timer, timeout, CTC_EVENT_TIMER, ch);
567 fsm_newstate(fi, CTC_STATE_SETUPWAIT);
568 CTCM_CCW_DUMP((char *)&ch->ccw[6], sizeof(struct ccw1) * 2);
569
570 if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */
571 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
572 /* Such conditional locking is undeterministic in
573 * static view. => ignore sparse warnings here. */
574
575 rc = ccw_device_start(ch->cdev, &ch->ccw[6],
576 (unsigned long)ch, 0xff, 0);
577 if (event == CTC_EVENT_TIMER) /* see above comments */
578 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
579 if (rc != 0) {
580 fsm_deltimer(&ch->timer);
581 fsm_newstate(fi, CTC_STATE_STARTWAIT);
582 ctcm_ccw_check_rc(ch, rc, "set Mode");
583 } else
584 ch->retry = 0;
585}
586
587/**
588 * Setup channel.
589 *
590 * fi An instance of a channel statemachine.
591 * event The event, just happened.
592 * arg Generic pointer, casted from channel * upon call.
593 */
594static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
595{
596 struct channel *ch = arg;
597 unsigned long saveflags;
598 int rc;
599
600 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s",
601 CTCM_FUNTAIL, ch->id,
602 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX");
603
604 if (ch->trans_skb != NULL) {
605 clear_normalized_cda(&ch->ccw[1]);
606 dev_kfree_skb(ch->trans_skb);
607 ch->trans_skb = NULL;
608 }
609 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
610 ch->ccw[1].cmd_code = CCW_CMD_READ;
611 ch->ccw[1].flags = CCW_FLAG_SLI;
612 ch->ccw[1].count = 0;
613 } else {
614 ch->ccw[1].cmd_code = CCW_CMD_WRITE;
615 ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
616 ch->ccw[1].count = 0;
617 }
618 if (ctcm_checkalloc_buffer(ch)) {
619 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
620 "%s(%s): %s trans_skb alloc delayed "
621 "until first transfer",
622 CTCM_FUNTAIL, ch->id,
623 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
624 "RX" : "TX");
625 }
626 ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
627 ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
628 ch->ccw[0].count = 0;
629 ch->ccw[0].cda = 0;
630 ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */
631 ch->ccw[2].flags = CCW_FLAG_SLI;
632 ch->ccw[2].count = 0;
633 ch->ccw[2].cda = 0;
634 memcpy(&ch->ccw[3], &ch->ccw[0], sizeof(struct ccw1) * 3);
635 ch->ccw[4].cda = 0;
636 ch->ccw[4].flags &= ~CCW_FLAG_IDA;
637
638 fsm_newstate(fi, CTC_STATE_STARTWAIT);
639 fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
640 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
641 rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
642 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
643 if (rc != 0) {
644 if (rc != -EBUSY)
645 fsm_deltimer(&ch->timer);
646 ctcm_ccw_check_rc(ch, rc, "initial HaltIO");
647 }
648}
649
650/**
651 * Shutdown a channel.
652 *
653 * fi An instance of a channel statemachine.
654 * event The event, just happened.
655 * arg Generic pointer, casted from channel * upon call.
656 */
657static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg)
658{
659 struct channel *ch = arg;
660 unsigned long saveflags = 0;
661 int rc;
662 int oldstate;
663
664 fsm_deltimer(&ch->timer);
665 if (IS_MPC(ch))
666 fsm_deltimer(&ch->sweep_timer);
667
668 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
669
670 if (event == CTC_EVENT_STOP) /* only for STOP not yet locked */
671 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
672 /* Such conditional locking is undeterministic in
673 * static view. => ignore sparse warnings here. */
674 oldstate = fsm_getstate(fi);
675 fsm_newstate(fi, CTC_STATE_TERM);
676 rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
677
678 if (event == CTC_EVENT_STOP)
679 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
680 /* see remark above about conditional locking */
681
682 if (rc != 0 && rc != -EBUSY) {
683 fsm_deltimer(&ch->timer);
684 if (event != CTC_EVENT_STOP) {
685 fsm_newstate(fi, oldstate);
686 ctcm_ccw_check_rc(ch, rc, (char *)__func__);
687 }
688 }
689}
690
691/**
692 * Cleanup helper for chx_fail and chx_stopped
693 * cleanup channels queue and notify interface statemachine.
694 *
695 * fi An instance of a channel statemachine.
696 * state The next state (depending on caller).
697 * ch The channel to operate on.
698 */
699static void ctcm_chx_cleanup(fsm_instance *fi, int state,
700 struct channel *ch)
701{
702 struct net_device *dev = ch->netdev;
703 struct ctcm_priv *priv = dev->ml_priv;
704
705 CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE,
706 "%s(%s): %s[%d]\n",
707 CTCM_FUNTAIL, dev->name, ch->id, state);
708
709 fsm_deltimer(&ch->timer);
710 if (IS_MPC(ch))
711 fsm_deltimer(&ch->sweep_timer);
712
713 fsm_newstate(fi, state);
714 if (state == CTC_STATE_STOPPED && ch->trans_skb != NULL) {
715 clear_normalized_cda(&ch->ccw[1]);
716 dev_kfree_skb_any(ch->trans_skb);
717 ch->trans_skb = NULL;
718 }
719
720 ch->th_seg = 0x00;
721 ch->th_seq_num = 0x00;
722 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
723 skb_queue_purge(&ch->io_queue);
724 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
725 } else {
726 ctcm_purge_skb_queue(&ch->io_queue);
727 if (IS_MPC(ch))
728 ctcm_purge_skb_queue(&ch->sweep_queue);
729 spin_lock(&ch->collect_lock);
730 ctcm_purge_skb_queue(&ch->collect_queue);
731 ch->collect_len = 0;
732 spin_unlock(&ch->collect_lock);
733 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
734 }
735}
736
737/**
738 * A channel has successfully been halted.
739 * Cleanup it's queue and notify interface statemachine.
740 *
741 * fi An instance of a channel statemachine.
742 * event The event, just happened.
743 * arg Generic pointer, casted from channel * upon call.
744 */
745static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg)
746{
747 ctcm_chx_cleanup(fi, CTC_STATE_STOPPED, arg);
748}
749
750/**
751 * A stop command from device statemachine arrived and we are in
752 * not operational mode. Set state to stopped.
753 *
754 * fi An instance of a channel statemachine.
755 * event The event, just happened.
756 * arg Generic pointer, casted from channel * upon call.
757 */
758static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg)
759{
760 fsm_newstate(fi, CTC_STATE_STOPPED);
761}
762
763/**
764 * A machine check for no path, not operational status or gone device has
765 * happened.
766 * Cleanup queue and notify interface statemachine.
767 *
768 * fi An instance of a channel statemachine.
769 * event The event, just happened.
770 * arg Generic pointer, casted from channel * upon call.
771 */
772static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg)
773{
774 ctcm_chx_cleanup(fi, CTC_STATE_NOTOP, arg);
775}
776
777/**
778 * Handle error during setup of channel.
779 *
780 * fi An instance of a channel statemachine.
781 * event The event, just happened.
782 * arg Generic pointer, casted from channel * upon call.
783 */
784static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
785{
786 struct channel *ch = arg;
787 struct net_device *dev = ch->netdev;
788 struct ctcm_priv *priv = dev->ml_priv;
789
790 /*
791 * Special case: Got UC_RCRESET on setmode.
792 * This means that remote side isn't setup. In this case
793 * simply retry after some 10 secs...
794 */
795 if ((fsm_getstate(fi) == CTC_STATE_SETUPWAIT) &&
796 ((event == CTC_EVENT_UC_RCRESET) ||
797 (event == CTC_EVENT_UC_RSRESET))) {
798 fsm_newstate(fi, CTC_STATE_STARTRETRY);
799 fsm_deltimer(&ch->timer);
800 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
801 if (!IS_MPC(ch) &&
802 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) {
803 int rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
804 if (rc != 0)
805 ctcm_ccw_check_rc(ch, rc,
806 "HaltIO in chx_setuperr");
807 }
808 return;
809 }
810
811 CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT,
812 "%s(%s) : %s error during %s channel setup state=%s\n",
813 CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event],
814 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX",
815 fsm_getstate_str(fi));
816
817 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
818 fsm_newstate(fi, CTC_STATE_RXERR);
819 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
820 } else {
821 fsm_newstate(fi, CTC_STATE_TXERR);
822 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
823 }
824}
825
826/**
827 * Restart a channel after an error.
828 *
829 * fi An instance of a channel statemachine.
830 * event The event, just happened.
831 * arg Generic pointer, casted from channel * upon call.
832 */
833static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg)
834{
835 struct channel *ch = arg;
836 struct net_device *dev = ch->netdev;
837 unsigned long saveflags = 0;
838 int oldstate;
839 int rc;
840
841 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
842 "%s: %s[%d] of %s\n",
843 CTCM_FUNTAIL, ch->id, event, dev->name);
844
845 fsm_deltimer(&ch->timer);
846
847 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
848 oldstate = fsm_getstate(fi);
849 fsm_newstate(fi, CTC_STATE_STARTWAIT);
850 if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */
851 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
852 /* Such conditional locking is a known problem for
853 * sparse because its undeterministic in static view.
854 * Warnings should be ignored here. */
855 rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
856 if (event == CTC_EVENT_TIMER)
857 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
858 if (rc != 0) {
859 if (rc != -EBUSY) {
860 fsm_deltimer(&ch->timer);
861 fsm_newstate(fi, oldstate);
862 }
863 ctcm_ccw_check_rc(ch, rc, "HaltIO in ctcm_chx_restart");
864 }
865}
866
867/**
868 * Handle error during RX initial handshake (exchange of
869 * 0-length block header)
870 *
871 * fi An instance of a channel statemachine.
872 * event The event, just happened.
873 * arg Generic pointer, casted from channel * upon call.
874 */
875static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg)
876{
877 struct channel *ch = arg;
878 struct net_device *dev = ch->netdev;
879 struct ctcm_priv *priv = dev->ml_priv;
880
881 if (event == CTC_EVENT_TIMER) {
882 if (!IS_MPCDEV(dev))
883 /* TODO : check if MPC deletes timer somewhere */
884 fsm_deltimer(&ch->timer);
885 if (ch->retry++ < 3)
886 ctcm_chx_restart(fi, event, arg);
887 else {
888 fsm_newstate(fi, CTC_STATE_RXERR);
889 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
890 }
891 } else {
892 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
893 "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id,
894 ctc_ch_event_names[event], fsm_getstate_str(fi));
895
896 dev_warn(&dev->dev,
897 "Initialization failed with RX/TX init handshake "
898 "error %s\n", ctc_ch_event_names[event]);
899 }
900}
901
902/**
903 * Notify device statemachine if we gave up initialization
904 * of RX channel.
905 *
906 * fi An instance of a channel statemachine.
907 * event The event, just happened.
908 * arg Generic pointer, casted from channel * upon call.
909 */
910static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg)
911{
912 struct channel *ch = arg;
913 struct net_device *dev = ch->netdev;
914 struct ctcm_priv *priv = dev->ml_priv;
915
916 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
917 "%s(%s): RX %s busy, init. fail",
918 CTCM_FUNTAIL, dev->name, ch->id);
919 fsm_newstate(fi, CTC_STATE_RXERR);
920 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
921}
922
923/**
924 * Handle RX Unit check remote reset (remote disconnected)
925 *
926 * fi An instance of a channel statemachine.
927 * event The event, just happened.
928 * arg Generic pointer, casted from channel * upon call.
929 */
930static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg)
931{
932 struct channel *ch = arg;
933 struct channel *ch2;
934 struct net_device *dev = ch->netdev;
935 struct ctcm_priv *priv = dev->ml_priv;
936
937 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
938 "%s: %s: remote disconnect - re-init ...",
939 CTCM_FUNTAIL, dev->name);
940 fsm_deltimer(&ch->timer);
941 /*
942 * Notify device statemachine
943 */
944 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
945 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
946
947 fsm_newstate(fi, CTC_STATE_DTERM);
948 ch2 = priv->channel[CTCM_WRITE];
949 fsm_newstate(ch2->fsm, CTC_STATE_DTERM);
950
951 ccw_device_halt(ch->cdev, (unsigned long)ch);
952 ccw_device_halt(ch2->cdev, (unsigned long)ch2);
953}
954
955/**
956 * Handle error during TX channel initialization.
957 *
958 * fi An instance of a channel statemachine.
959 * event The event, just happened.
960 * arg Generic pointer, casted from channel * upon call.
961 */
962static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg)
963{
964 struct channel *ch = arg;
965 struct net_device *dev = ch->netdev;
966 struct ctcm_priv *priv = dev->ml_priv;
967
968 if (event == CTC_EVENT_TIMER) {
969 fsm_deltimer(&ch->timer);
970 if (ch->retry++ < 3)
971 ctcm_chx_restart(fi, event, arg);
972 else {
973 fsm_newstate(fi, CTC_STATE_TXERR);
974 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
975 }
976 } else {
977 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
978 "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id,
979 ctc_ch_event_names[event], fsm_getstate_str(fi));
980
981 dev_warn(&dev->dev,
982 "Initialization failed with RX/TX init handshake "
983 "error %s\n", ctc_ch_event_names[event]);
984 }
985}
986
987/**
988 * Handle TX timeout by retrying operation.
989 *
990 * fi An instance of a channel statemachine.
991 * event The event, just happened.
992 * arg Generic pointer, casted from channel * upon call.
993 */
994static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg)
995{
996 struct channel *ch = arg;
997 struct net_device *dev = ch->netdev;
998 struct ctcm_priv *priv = dev->ml_priv;
999 struct sk_buff *skb;
1000
1001 CTCM_PR_DEBUG("Enter: %s: cp=%i ch=0x%p id=%s\n",
1002 __func__, smp_processor_id(), ch, ch->id);
1003
1004 fsm_deltimer(&ch->timer);
1005 if (ch->retry++ > 3) {
1006 struct mpc_group *gptr = priv->mpcg;
1007 CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO,
1008 "%s: %s: retries exceeded",
1009 CTCM_FUNTAIL, ch->id);
1010 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
1011 /* call restart if not MPC or if MPC and mpcg fsm is ready.
1012 use gptr as mpc indicator */
1013 if (!(gptr && (fsm_getstate(gptr->fsm) != MPCG_STATE_READY)))
1014 ctcm_chx_restart(fi, event, arg);
1015 goto done;
1016 }
1017
1018 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
1019 "%s : %s: retry %d",
1020 CTCM_FUNTAIL, ch->id, ch->retry);
1021 skb = skb_peek(&ch->io_queue);
1022 if (skb) {
1023 int rc = 0;
1024 unsigned long saveflags = 0;
1025 clear_normalized_cda(&ch->ccw[4]);
1026 ch->ccw[4].count = skb->len;
1027 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
1028 CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO,
1029 "%s: %s: IDAL alloc failed",
1030 CTCM_FUNTAIL, ch->id);
1031 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
1032 ctcm_chx_restart(fi, event, arg);
1033 goto done;
1034 }
1035 fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
1036 if (event == CTC_EVENT_TIMER) /* for TIMER not yet locked */
1037 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1038 /* Such conditional locking is a known problem for
1039 * sparse because its undeterministic in static view.
1040 * Warnings should be ignored here. */
1041 if (do_debug_ccw)
1042 ctcmpc_dumpit((char *)&ch->ccw[3],
1043 sizeof(struct ccw1) * 3);
1044
1045 rc = ccw_device_start(ch->cdev, &ch->ccw[3],
1046 (unsigned long)ch, 0xff, 0);
1047 if (event == CTC_EVENT_TIMER)
1048 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
1049 saveflags);
1050 if (rc != 0) {
1051 fsm_deltimer(&ch->timer);
1052 ctcm_ccw_check_rc(ch, rc, "TX in chx_txretry");
1053 ctcm_purge_skb_queue(&ch->io_queue);
1054 }
1055 }
1056done:
1057 return;
1058}
1059
1060/**
1061 * Handle fatal errors during an I/O command.
1062 *
1063 * fi An instance of a channel statemachine.
1064 * event The event, just happened.
1065 * arg Generic pointer, casted from channel * upon call.
1066 */
1067static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg)
1068{
1069 struct channel *ch = arg;
1070 struct net_device *dev = ch->netdev;
1071 struct ctcm_priv *priv = dev->ml_priv;
1072 int rd = CHANNEL_DIRECTION(ch->flags);
1073
1074 fsm_deltimer(&ch->timer);
1075 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
1076 "%s: %s: %s unrecoverable channel error",
1077 CTCM_FUNTAIL, ch->id, rd == CTCM_READ ? "RX" : "TX");
1078
1079 if (IS_MPC(ch)) {
1080 priv->stats.tx_dropped++;
1081 priv->stats.tx_errors++;
1082 }
1083 if (rd == CTCM_READ) {
1084 fsm_newstate(fi, CTC_STATE_RXERR);
1085 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
1086 } else {
1087 fsm_newstate(fi, CTC_STATE_TXERR);
1088 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
1089 }
1090}
1091
1092/*
1093 * The ctcm statemachine for a channel.
1094 */
1095const fsm_node ch_fsm[] = {
1096 { CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop },
1097 { CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start },
1098 { CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop },
1099 { CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop },
1100
1101 { CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop },
1102 { CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop },
1103 { CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop },
1104 { CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop },
1105 { CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start },
1106
1107 { CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1108 { CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop },
1109 { CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
1110 { CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr },
1111 { CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1112 { CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1113
1114 { CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio },
1115 { CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode },
1116 { CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_action_nop },
1117 { CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1118
1119 { CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1120 { CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop },
1121 { CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, chx_firstio },
1122 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1123 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1124 { CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode },
1125 { CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1126 { CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1127
1128 { CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1129 { CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop },
1130 { CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, chx_rxidle },
1131 { CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr },
1132 { CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr },
1133 { CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr },
1134 { CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail },
1135 { CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1136 { CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, chx_firstio },
1137 { CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1138
1139 { CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
1140 { CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop },
1141 { CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, chx_rx },
1142 { CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc },
1143 { CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1144 { CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1145 { CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, chx_rx },
1146
1147 { CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1148 { CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop },
1149 { CTC_STATE_TXINIT, CTC_EVENT_FINSTAT, ctcm_chx_txidle },
1150 { CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr },
1151 { CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr },
1152 { CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr },
1153 { CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1154 { CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1155
1156 { CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
1157 { CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop },
1158 { CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, chx_firstio },
1159 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
1160 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
1161 { CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1162 { CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1163
1164 { CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop },
1165 { CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart },
1166 { CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped },
1167 { CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
1168 { CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
1169 { CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1170
1171 { CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio },
1172 { CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart },
1173 { CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
1174 { CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
1175 { CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
1176 { CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1177
1178 { CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio },
1179 { CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop },
1180 { CTC_STATE_TX, CTC_EVENT_FINSTAT, chx_txdone },
1181 { CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_txretry },
1182 { CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_txretry },
1183 { CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry },
1184 { CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1185 { CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1186
1187 { CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
1188 { CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
1189 { CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1190 { CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1191};
1192
1193int ch_fsm_len = ARRAY_SIZE(ch_fsm);
1194
1195/*
1196 * MPC actions for mpc channel statemachine
1197 * handling of MPC protocol requires extra
1198 * statemachine and actions which are prefixed ctcmpc_ .
1199 * The ctc_ch_states and ctc_ch_state_names,
1200 * ctc_ch_events and ctc_ch_event_names share the ctcm definitions
1201 * which are expanded by some elements.
1202 */
1203
1204/*
1205 * Actions for mpc channel statemachine.
1206 */
1207
1208/**
1209 * Normal data has been send. Free the corresponding
1210 * skb (it's in io_queue), reset dev->tbusy and
1211 * revert to idle state.
1212 *
1213 * fi An instance of a channel statemachine.
1214 * event The event, just happened.
1215 * arg Generic pointer, casted from channel * upon call.
1216 */
1217static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
1218{
1219 struct channel *ch = arg;
1220 struct net_device *dev = ch->netdev;
1221 struct ctcm_priv *priv = dev->ml_priv;
1222 struct mpc_group *grp = priv->mpcg;
1223 struct sk_buff *skb;
1224 int first = 1;
1225 int i;
1226 __u32 data_space;
1227 unsigned long duration;
1228 struct sk_buff *peekskb;
1229 int rc;
1230 struct th_header *header;
1231 struct pdu *p_header;
1232 struct timespec done_stamp = current_kernel_time(); /* xtime */
1233
1234 CTCM_PR_DEBUG("Enter %s: %s cp:%i\n",
1235 __func__, dev->name, smp_processor_id());
1236
1237 duration =
1238 (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
1239 (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
1240 if (duration > ch->prof.tx_time)
1241 ch->prof.tx_time = duration;
1242
1243 if (ch->irb->scsw.cmd.count != 0)
1244 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
1245 "%s(%s): TX not complete, remaining %d bytes",
1246 CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count);
1247 fsm_deltimer(&ch->timer);
1248 while ((skb = skb_dequeue(&ch->io_queue))) {
1249 priv->stats.tx_packets++;
1250 priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH;
1251 if (first) {
1252 priv->stats.tx_bytes += 2;
1253 first = 0;
1254 }
1255 atomic_dec(&skb->users);
1256 dev_kfree_skb_irq(skb);
1257 }
1258 spin_lock(&ch->collect_lock);
1259 clear_normalized_cda(&ch->ccw[4]);
1260 if ((ch->collect_len <= 0) || (grp->in_sweep != 0)) {
1261 spin_unlock(&ch->collect_lock);
1262 fsm_newstate(fi, CTC_STATE_TXIDLE);
1263 goto done;
1264 }
1265
1266 if (ctcm_checkalloc_buffer(ch)) {
1267 spin_unlock(&ch->collect_lock);
1268 goto done;
1269 }
1270 ch->trans_skb->data = ch->trans_skb_data;
1271 skb_reset_tail_pointer(ch->trans_skb);
1272 ch->trans_skb->len = 0;
1273 if (ch->prof.maxmulti < (ch->collect_len + TH_HEADER_LENGTH))
1274 ch->prof.maxmulti = ch->collect_len + TH_HEADER_LENGTH;
1275 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
1276 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
1277 i = 0;
1278 p_header = NULL;
1279 data_space = grp->group_max_buflen - TH_HEADER_LENGTH;
1280
1281 CTCM_PR_DBGDATA("%s: building trans_skb from collect_q"
1282 " data_space:%04x\n",
1283 __func__, data_space);
1284
1285 while ((skb = skb_dequeue(&ch->collect_queue))) {
1286 memcpy(skb_put(ch->trans_skb, skb->len), skb->data, skb->len);
1287 p_header = (struct pdu *)
1288 (skb_tail_pointer(ch->trans_skb) - skb->len);
1289 p_header->pdu_flag = 0x00;
1290 if (skb->protocol == ntohs(ETH_P_SNAP))
1291 p_header->pdu_flag |= 0x60;
1292 else
1293 p_header->pdu_flag |= 0x20;
1294
1295 CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n",
1296 __func__, ch->trans_skb->len);
1297 CTCM_PR_DBGDATA("%s: pdu header and data for up"
1298 " to 32 bytes sent to vtam\n", __func__);
1299 CTCM_D3_DUMP((char *)p_header, min_t(int, skb->len, 32));
1300
1301 ch->collect_len -= skb->len;
1302 data_space -= skb->len;
1303 priv->stats.tx_packets++;
1304 priv->stats.tx_bytes += skb->len;
1305 atomic_dec(&skb->users);
1306 dev_kfree_skb_any(skb);
1307 peekskb = skb_peek(&ch->collect_queue);
1308 if (peekskb->len > data_space)
1309 break;
1310 i++;
1311 }
1312 /* p_header points to the last one we handled */
1313 if (p_header)
1314 p_header->pdu_flag |= PDU_LAST; /*Say it's the last one*/
1315 header = kzalloc(TH_HEADER_LENGTH, gfp_type());
1316 if (!header) {
1317 spin_unlock(&ch->collect_lock);
1318 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
1319 goto done;
1320 }
1321 header->th_ch_flag = TH_HAS_PDU; /* Normal data */
1322 ch->th_seq_num++;
1323 header->th_seq_num = ch->th_seq_num;
1324
1325 CTCM_PR_DBGDATA("%s: ToVTAM_th_seq= %08x\n" ,
1326 __func__, ch->th_seq_num);
1327
1328 memcpy(skb_push(ch->trans_skb, TH_HEADER_LENGTH), header,
1329 TH_HEADER_LENGTH); /* put the TH on the packet */
1330
1331 kfree(header);
1332
1333 CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n",
1334 __func__, ch->trans_skb->len);
1335 CTCM_PR_DBGDATA("%s: up-to-50 bytes of trans_skb "
1336 "data to vtam from collect_q\n", __func__);
1337 CTCM_D3_DUMP((char *)ch->trans_skb->data,
1338 min_t(int, ch->trans_skb->len, 50));
1339
1340 spin_unlock(&ch->collect_lock);
1341 clear_normalized_cda(&ch->ccw[1]);
1342
1343 CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n",
1344 (void *)(unsigned long)ch->ccw[1].cda,
1345 ch->trans_skb->data);
1346 ch->ccw[1].count = ch->max_bufsize;
1347
1348 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
1349 dev_kfree_skb_any(ch->trans_skb);
1350 ch->trans_skb = NULL;
1351 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR,
1352 "%s: %s: IDAL alloc failed",
1353 CTCM_FUNTAIL, ch->id);
1354 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
1355 return;
1356 }
1357
1358 CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n",
1359 (void *)(unsigned long)ch->ccw[1].cda,
1360 ch->trans_skb->data);
1361
1362 ch->ccw[1].count = ch->trans_skb->len;
1363 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
1364 ch->prof.send_stamp = current_kernel_time(); /* xtime */
1365 if (do_debug_ccw)
1366 ctcmpc_dumpit((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
1367 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1368 (unsigned long)ch, 0xff, 0);
1369 ch->prof.doios_multi++;
1370 if (rc != 0) {
1371 priv->stats.tx_dropped += i;
1372 priv->stats.tx_errors += i;
1373 fsm_deltimer(&ch->timer);
1374 ctcm_ccw_check_rc(ch, rc, "chained TX");
1375 }
1376done:
1377 ctcm_clear_busy(dev);
1378 return;
1379}
1380
1381/**
1382 * Got normal data, check for sanity, queue it up, allocate new buffer
1383 * trigger bottom half, and initiate next read.
1384 *
1385 * fi An instance of a channel statemachine.
1386 * event The event, just happened.
1387 * arg Generic pointer, casted from channel * upon call.
1388 */
1389static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg)
1390{
1391 struct channel *ch = arg;
1392 struct net_device *dev = ch->netdev;
1393 struct ctcm_priv *priv = dev->ml_priv;
1394 struct mpc_group *grp = priv->mpcg;
1395 struct sk_buff *skb = ch->trans_skb;
1396 struct sk_buff *new_skb;
1397 unsigned long saveflags = 0; /* avoids compiler warning */
1398 int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
1399
1400 CTCM_PR_DEBUG("%s: %s: cp:%i %s maxbuf : %04x, len: %04x\n",
1401 CTCM_FUNTAIL, dev->name, smp_processor_id(),
1402 ch->id, ch->max_bufsize, len);
1403 fsm_deltimer(&ch->timer);
1404
1405 if (skb == NULL) {
1406 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
1407 "%s(%s): TRANS_SKB = NULL",
1408 CTCM_FUNTAIL, dev->name);
1409 goto again;
1410 }
1411
1412 if (len < TH_HEADER_LENGTH) {
1413 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
1414 "%s(%s): packet length %d to short",
1415 CTCM_FUNTAIL, dev->name, len);
1416 priv->stats.rx_dropped++;
1417 priv->stats.rx_length_errors++;
1418 } else {
1419 /* must have valid th header or game over */
1420 __u32 block_len = len;
1421 len = TH_HEADER_LENGTH + XID2_LENGTH + 4;
1422 new_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC);
1423
1424 if (new_skb == NULL) {
1425 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
1426 "%s(%d): skb allocation failed",
1427 CTCM_FUNTAIL, dev->name);
1428 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
1429 goto again;
1430 }
1431 switch (fsm_getstate(grp->fsm)) {
1432 case MPCG_STATE_RESET:
1433 case MPCG_STATE_INOP:
1434 dev_kfree_skb_any(new_skb);
1435 break;
1436 case MPCG_STATE_FLOWC:
1437 case MPCG_STATE_READY:
1438 memcpy(skb_put(new_skb, block_len),
1439 skb->data, block_len);
1440 skb_queue_tail(&ch->io_queue, new_skb);
1441 tasklet_schedule(&ch->ch_tasklet);
1442 break;
1443 default:
1444 memcpy(skb_put(new_skb, len), skb->data, len);
1445 skb_queue_tail(&ch->io_queue, new_skb);
1446 tasklet_hi_schedule(&ch->ch_tasklet);
1447 break;
1448 }
1449 }
1450
1451again:
1452 switch (fsm_getstate(grp->fsm)) {
1453 int rc, dolock;
1454 case MPCG_STATE_FLOWC:
1455 case MPCG_STATE_READY:
1456 if (ctcm_checkalloc_buffer(ch))
1457 break;
1458 ch->trans_skb->data = ch->trans_skb_data;
1459 skb_reset_tail_pointer(ch->trans_skb);
1460 ch->trans_skb->len = 0;
1461 ch->ccw[1].count = ch->max_bufsize;
1462 if (do_debug_ccw)
1463 ctcmpc_dumpit((char *)&ch->ccw[0],
1464 sizeof(struct ccw1) * 3);
1465 dolock = !in_irq();
1466 if (dolock)
1467 spin_lock_irqsave(
1468 get_ccwdev_lock(ch->cdev), saveflags);
1469 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1470 (unsigned long)ch, 0xff, 0);
1471 if (dolock) /* see remark about conditional locking */
1472 spin_unlock_irqrestore(
1473 get_ccwdev_lock(ch->cdev), saveflags);
1474 if (rc != 0)
1475 ctcm_ccw_check_rc(ch, rc, "normal RX");
1476 default:
1477 break;
1478 }
1479
1480 CTCM_PR_DEBUG("Exit %s: %s, ch=0x%p, id=%s\n",
1481 __func__, dev->name, ch, ch->id);
1482
1483}
1484
1485/**
1486 * Initialize connection by sending a __u16 of value 0.
1487 *
1488 * fi An instance of a channel statemachine.
1489 * event The event, just happened.
1490 * arg Generic pointer, casted from channel * upon call.
1491 */
1492static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
1493{
1494 struct channel *ch = arg;
1495 struct net_device *dev = ch->netdev;
1496 struct ctcm_priv *priv = dev->ml_priv;
1497 struct mpc_group *gptr = priv->mpcg;
1498
1499 CTCM_PR_DEBUG("Enter %s: id=%s, ch=0x%p\n",
1500 __func__, ch->id, ch);
1501
1502 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_INFO,
1503 "%s: %s: chstate:%i, grpstate:%i, prot:%i\n",
1504 CTCM_FUNTAIL, ch->id, fsm_getstate(fi),
1505 fsm_getstate(gptr->fsm), ch->protocol);
1506
1507 if (fsm_getstate(fi) == CTC_STATE_TXIDLE)
1508 MPC_DBF_DEV_NAME(TRACE, dev, "remote side issued READ? ");
1509
1510 fsm_deltimer(&ch->timer);
1511 if (ctcm_checkalloc_buffer(ch))
1512 goto done;
1513
1514 switch (fsm_getstate(fi)) {
1515 case CTC_STATE_STARTRETRY:
1516 case CTC_STATE_SETUPWAIT:
1517 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
1518 ctcmpc_chx_rxidle(fi, event, arg);
1519 } else {
1520 fsm_newstate(fi, CTC_STATE_TXIDLE);
1521 fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
1522 }
1523 goto done;
1524 default:
1525 break;
1526 }
1527
1528 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
1529 ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
1530
1531done:
1532 CTCM_PR_DEBUG("Exit %s: id=%s, ch=0x%p\n",
1533 __func__, ch->id, ch);
1534 return;
1535}
1536
1537/**
1538 * Got initial data, check it. If OK,
1539 * notify device statemachine that we are up and
1540 * running.
1541 *
1542 * fi An instance of a channel statemachine.
1543 * event The event, just happened.
1544 * arg Generic pointer, casted from channel * upon call.
1545 */
1546void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg)
1547{
1548 struct channel *ch = arg;
1549 struct net_device *dev = ch->netdev;
1550 struct ctcm_priv *priv = dev->ml_priv;
1551 struct mpc_group *grp = priv->mpcg;
1552 int rc;
1553 unsigned long saveflags = 0; /* avoids compiler warning */
1554
1555 fsm_deltimer(&ch->timer);
1556 CTCM_PR_DEBUG("%s: %s: %s: cp:%i, chstate:%i grpstate:%i\n",
1557 __func__, ch->id, dev->name, smp_processor_id(),
1558 fsm_getstate(fi), fsm_getstate(grp->fsm));
1559
1560 fsm_newstate(fi, CTC_STATE_RXIDLE);
1561 /* XID processing complete */
1562
1563 switch (fsm_getstate(grp->fsm)) {
1564 case MPCG_STATE_FLOWC:
1565 case MPCG_STATE_READY:
1566 if (ctcm_checkalloc_buffer(ch))
1567 goto done;
1568 ch->trans_skb->data = ch->trans_skb_data;
1569 skb_reset_tail_pointer(ch->trans_skb);
1570 ch->trans_skb->len = 0;
1571 ch->ccw[1].count = ch->max_bufsize;
1572 CTCM_CCW_DUMP((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
1573 if (event == CTC_EVENT_START)
1574 /* see remark about conditional locking */
1575 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1576 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1577 (unsigned long)ch, 0xff, 0);
1578 if (event == CTC_EVENT_START)
1579 spin_unlock_irqrestore(
1580 get_ccwdev_lock(ch->cdev), saveflags);
1581 if (rc != 0) {
1582 fsm_newstate(fi, CTC_STATE_RXINIT);
1583 ctcm_ccw_check_rc(ch, rc, "initial RX");
1584 goto done;
1585 }
1586 break;
1587 default:
1588 break;
1589 }
1590
1591 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
1592done:
1593 return;
1594}
1595
1596/*
1597 * ctcmpc channel FSM action
1598 * called from several points in ctcmpc_ch_fsm
1599 * ctcmpc only
1600 */
1601static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg)
1602{
1603 struct channel *ch = arg;
1604 struct net_device *dev = ch->netdev;
1605 struct ctcm_priv *priv = dev->ml_priv;
1606 struct mpc_group *grp = priv->mpcg;
1607
1608 CTCM_PR_DEBUG("%s(%s): %s(ch=0x%p), cp=%i, ChStat:%s, GrpStat:%s\n",
1609 __func__, dev->name, ch->id, ch, smp_processor_id(),
1610 fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm));
1611
1612 switch (fsm_getstate(grp->fsm)) {
1613 case MPCG_STATE_XID2INITW:
1614 /* ok..start yside xid exchanges */
1615 if (!ch->in_mpcgroup)
1616 break;
1617 if (fsm_getstate(ch->fsm) == CH_XID0_PENDING) {
1618 fsm_deltimer(&grp->timer);
1619 fsm_addtimer(&grp->timer,
1620 MPC_XID_TIMEOUT_VALUE,
1621 MPCG_EVENT_TIMER, dev);
1622 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
1623
1624 } else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1)
1625 /* attn rcvd before xid0 processed via bh */
1626 fsm_newstate(ch->fsm, CH_XID7_PENDING1);
1627 break;
1628 case MPCG_STATE_XID2INITX:
1629 case MPCG_STATE_XID0IOWAIT:
1630 case MPCG_STATE_XID0IOWAIX:
1631 /* attn rcvd before xid0 processed on ch
1632 but mid-xid0 processing for group */
1633 if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1)
1634 fsm_newstate(ch->fsm, CH_XID7_PENDING1);
1635 break;
1636 case MPCG_STATE_XID7INITW:
1637 case MPCG_STATE_XID7INITX:
1638 case MPCG_STATE_XID7INITI:
1639 case MPCG_STATE_XID7INITZ:
1640 switch (fsm_getstate(ch->fsm)) {
1641 case CH_XID7_PENDING:
1642 fsm_newstate(ch->fsm, CH_XID7_PENDING1);
1643 break;
1644 case CH_XID7_PENDING2:
1645 fsm_newstate(ch->fsm, CH_XID7_PENDING3);
1646 break;
1647 }
1648 fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev);
1649 break;
1650 }
1651
1652 return;
1653}
1654
1655/*
1656 * ctcmpc channel FSM action
1657 * called from one point in ctcmpc_ch_fsm
1658 * ctcmpc only
1659 */
1660static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg)
1661{
1662 struct channel *ch = arg;
1663 struct net_device *dev = ch->netdev;
1664 struct ctcm_priv *priv = dev->ml_priv;
1665 struct mpc_group *grp = priv->mpcg;
1666
1667 CTCM_PR_DEBUG("%s(%s): %s\n ChState:%s GrpState:%s\n",
1668 __func__, dev->name, ch->id,
1669 fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm));
1670
1671 fsm_deltimer(&ch->timer);
1672
1673 switch (fsm_getstate(grp->fsm)) {
1674 case MPCG_STATE_XID0IOWAIT:
1675 /* vtam wants to be primary.start yside xid exchanges*/
1676 /* only receive one attn-busy at a time so must not */
1677 /* change state each time */
1678 grp->changed_side = 1;
1679 fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);
1680 break;
1681 case MPCG_STATE_XID2INITW:
1682 if (grp->changed_side == 1) {
1683 grp->changed_side = 2;
1684 break;
1685 }
1686 /* process began via call to establish_conn */
1687 /* so must report failure instead of reverting */
1688 /* back to ready-for-xid passive state */
1689 if (grp->estconnfunc)
1690 goto done;
1691 /* this attnbusy is NOT the result of xside xid */
1692 /* collisions so yside must have been triggered */
1693 /* by an ATTN that was not intended to start XID */
1694 /* processing. Revert back to ready-for-xid and */
1695 /* wait for ATTN interrupt to signal xid start */
1696 if (fsm_getstate(ch->fsm) == CH_XID0_INPROGRESS) {
1697 fsm_newstate(ch->fsm, CH_XID0_PENDING) ;
1698 fsm_deltimer(&grp->timer);
1699 goto done;
1700 }
1701 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1702 goto done;
1703 case MPCG_STATE_XID2INITX:
1704 /* XID2 was received before ATTN Busy for second
1705 channel.Send yside xid for second channel.
1706 */
1707 if (grp->changed_side == 1) {
1708 grp->changed_side = 2;
1709 break;
1710 }
1711 case MPCG_STATE_XID0IOWAIX:
1712 case MPCG_STATE_XID7INITW:
1713 case MPCG_STATE_XID7INITX:
1714 case MPCG_STATE_XID7INITI:
1715 case MPCG_STATE_XID7INITZ:
1716 default:
1717 /* multiple attn-busy indicates too out-of-sync */
1718 /* and they are certainly not being received as part */
1719 /* of valid mpc group negotiations.. */
1720 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1721 goto done;
1722 }
1723
1724 if (grp->changed_side == 1) {
1725 fsm_deltimer(&grp->timer);
1726 fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE,
1727 MPCG_EVENT_TIMER, dev);
1728 }
1729 if (ch->in_mpcgroup)
1730 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
1731 else
1732 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
1733 "%s(%s): channel %s not added to group",
1734 CTCM_FUNTAIL, dev->name, ch->id);
1735
1736done:
1737 return;
1738}
1739
1740/*
1741 * ctcmpc channel FSM action
1742 * called from several points in ctcmpc_ch_fsm
1743 * ctcmpc only
1744 */
1745static void ctcmpc_chx_resend(fsm_instance *fsm, int event, void *arg)
1746{
1747 struct channel *ch = arg;
1748 struct net_device *dev = ch->netdev;
1749 struct ctcm_priv *priv = dev->ml_priv;
1750 struct mpc_group *grp = priv->mpcg;
1751
1752 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
1753 return;
1754}
1755
1756/*
1757 * ctcmpc channel FSM action
1758 * called from several points in ctcmpc_ch_fsm
1759 * ctcmpc only
1760 */
1761static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
1762{
1763 struct channel *ach = arg;
1764 struct net_device *dev = ach->netdev;
1765 struct ctcm_priv *priv = dev->ml_priv;
1766 struct mpc_group *grp = priv->mpcg;
1767 struct channel *wch = priv->channel[CTCM_WRITE];
1768 struct channel *rch = priv->channel[CTCM_READ];
1769 struct sk_buff *skb;
1770 struct th_sweep *header;
1771 int rc = 0;
1772 unsigned long saveflags = 0;
1773
1774 CTCM_PR_DEBUG("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n",
1775 __func__, smp_processor_id(), ach, ach->id);
1776
1777 if (grp->in_sweep == 0)
1778 goto done;
1779
1780 CTCM_PR_DBGDATA("%s: 1: ToVTAM_th_seq= %08x\n" ,
1781 __func__, wch->th_seq_num);
1782 CTCM_PR_DBGDATA("%s: 1: FromVTAM_th_seq= %08x\n" ,
1783 __func__, rch->th_seq_num);
1784
1785 if (fsm_getstate(wch->fsm) != CTC_STATE_TXIDLE) {
1786 /* give the previous IO time to complete */
1787 fsm_addtimer(&wch->sweep_timer,
1788 200, CTC_EVENT_RSWEEP_TIMER, wch);
1789 goto done;
1790 }
1791
1792 skb = skb_dequeue(&wch->sweep_queue);
1793 if (!skb)
1794 goto done;
1795
1796 if (set_normalized_cda(&wch->ccw[4], skb->data)) {
1797 grp->in_sweep = 0;
1798 ctcm_clear_busy_do(dev);
1799 dev_kfree_skb_any(skb);
1800 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1801 goto done;
1802 } else {
1803 atomic_inc(&skb->users);
1804 skb_queue_tail(&wch->io_queue, skb);
1805 }
1806
1807 /* send out the sweep */
1808 wch->ccw[4].count = skb->len;
1809
1810 header = (struct th_sweep *)skb->data;
1811 switch (header->th.th_ch_flag) {
1812 case TH_SWEEP_REQ:
1813 grp->sweep_req_pend_num--;
1814 break;
1815 case TH_SWEEP_RESP:
1816 grp->sweep_rsp_pend_num--;
1817 break;
1818 }
1819
1820 header->sw.th_last_seq = wch->th_seq_num;
1821
1822 CTCM_CCW_DUMP((char *)&wch->ccw[3], sizeof(struct ccw1) * 3);
1823 CTCM_PR_DBGDATA("%s: sweep packet\n", __func__);
1824 CTCM_D3_DUMP((char *)header, TH_SWEEP_LENGTH);
1825
1826 fsm_addtimer(&wch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, wch);
1827 fsm_newstate(wch->fsm, CTC_STATE_TX);
1828
1829 spin_lock_irqsave(get_ccwdev_lock(wch->cdev), saveflags);
1830 wch->prof.send_stamp = current_kernel_time(); /* xtime */
1831 rc = ccw_device_start(wch->cdev, &wch->ccw[3],
1832 (unsigned long) wch, 0xff, 0);
1833 spin_unlock_irqrestore(get_ccwdev_lock(wch->cdev), saveflags);
1834
1835 if ((grp->sweep_req_pend_num == 0) &&
1836 (grp->sweep_rsp_pend_num == 0)) {
1837 grp->in_sweep = 0;
1838 rch->th_seq_num = 0x00;
1839 wch->th_seq_num = 0x00;
1840 ctcm_clear_busy_do(dev);
1841 }
1842
1843 CTCM_PR_DBGDATA("%s: To-/From-VTAM_th_seq = %08x/%08x\n" ,
1844 __func__, wch->th_seq_num, rch->th_seq_num);
1845
1846 if (rc != 0)
1847 ctcm_ccw_check_rc(wch, rc, "send sweep");
1848
1849done:
1850 return;
1851}
1852
1853
1854/*
1855 * The ctcmpc statemachine for a channel.
1856 */
1857
1858const fsm_node ctcmpc_ch_fsm[] = {
1859 { CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop },
1860 { CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start },
1861 { CTC_STATE_STOPPED, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1862 { CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop },
1863 { CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop },
1864
1865 { CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop },
1866 { CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop },
1867 { CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop },
1868 { CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop },
1869 { CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start },
1870 { CTC_STATE_NOTOP, CTC_EVENT_UC_RCRESET, ctcm_chx_stop },
1871 { CTC_STATE_NOTOP, CTC_EVENT_UC_RSRESET, ctcm_chx_stop },
1872 { CTC_STATE_NOTOP, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1873
1874 { CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1875 { CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop },
1876 { CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
1877 { CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr },
1878 { CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1879 { CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1880
1881 { CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio },
1882 { CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode },
1883 { CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
1884 { CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1885 { CTC_STATE_STARTRETRY, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1886
1887 { CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1888 { CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop },
1889 { CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio },
1890 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1891 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1892 { CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode },
1893 { CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1894 { CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1895
1896 { CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1897 { CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop },
1898 { CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, ctcmpc_chx_rxidle },
1899 { CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr },
1900 { CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr },
1901 { CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr },
1902 { CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail },
1903 { CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1904 { CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, ctcmpc_chx_firstio },
1905 { CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1906
1907 { CH_XID0_PENDING, CTC_EVENT_FINSTAT, ctcm_action_nop },
1908 { CH_XID0_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn },
1909 { CH_XID0_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio },
1910 { CH_XID0_PENDING, CTC_EVENT_START, ctcm_action_nop },
1911 { CH_XID0_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1912 { CH_XID0_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1913 { CH_XID0_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1914 { CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1915 { CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1916 { CH_XID0_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
1917
1918 { CH_XID0_INPROGRESS, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
1919 { CH_XID0_INPROGRESS, CTC_EVENT_ATTN, ctcmpc_chx_attn },
1920 { CH_XID0_INPROGRESS, CTC_EVENT_STOP, ctcm_chx_haltio },
1921 { CH_XID0_INPROGRESS, CTC_EVENT_START, ctcm_action_nop },
1922 { CH_XID0_INPROGRESS, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1923 { CH_XID0_INPROGRESS, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1924 { CH_XID0_INPROGRESS, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
1925 { CH_XID0_INPROGRESS, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1926 { CH_XID0_INPROGRESS, CTC_EVENT_ATTNBUSY, ctcmpc_chx_attnbusy },
1927 { CH_XID0_INPROGRESS, CTC_EVENT_TIMER, ctcmpc_chx_resend },
1928 { CH_XID0_INPROGRESS, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
1929
1930 { CH_XID7_PENDING, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
1931 { CH_XID7_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn },
1932 { CH_XID7_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio },
1933 { CH_XID7_PENDING, CTC_EVENT_START, ctcm_action_nop },
1934 { CH_XID7_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1935 { CH_XID7_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1936 { CH_XID7_PENDING, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
1937 { CH_XID7_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1938 { CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1939 { CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1940 { CH_XID7_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
1941 { CH_XID7_PENDING, CTC_EVENT_TIMER, ctcmpc_chx_resend },
1942 { CH_XID7_PENDING, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
1943
1944 { CH_XID7_PENDING1, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
1945 { CH_XID7_PENDING1, CTC_EVENT_ATTN, ctcmpc_chx_attn },
1946 { CH_XID7_PENDING1, CTC_EVENT_STOP, ctcm_chx_haltio },
1947 { CH_XID7_PENDING1, CTC_EVENT_START, ctcm_action_nop },
1948 { CH_XID7_PENDING1, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1949 { CH_XID7_PENDING1, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1950 { CH_XID7_PENDING1, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
1951 { CH_XID7_PENDING1, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1952 { CH_XID7_PENDING1, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1953 { CH_XID7_PENDING1, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
1954 { CH_XID7_PENDING1, CTC_EVENT_TIMER, ctcmpc_chx_resend },
1955 { CH_XID7_PENDING1, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
1956
1957 { CH_XID7_PENDING2, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
1958 { CH_XID7_PENDING2, CTC_EVENT_ATTN, ctcmpc_chx_attn },
1959 { CH_XID7_PENDING2, CTC_EVENT_STOP, ctcm_chx_haltio },
1960 { CH_XID7_PENDING2, CTC_EVENT_START, ctcm_action_nop },
1961 { CH_XID7_PENDING2, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1962 { CH_XID7_PENDING2, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1963 { CH_XID7_PENDING2, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
1964 { CH_XID7_PENDING2, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1965 { CH_XID7_PENDING2, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1966 { CH_XID7_PENDING2, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
1967 { CH_XID7_PENDING2, CTC_EVENT_TIMER, ctcmpc_chx_resend },
1968 { CH_XID7_PENDING2, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
1969
1970 { CH_XID7_PENDING3, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
1971 { CH_XID7_PENDING3, CTC_EVENT_ATTN, ctcmpc_chx_attn },
1972 { CH_XID7_PENDING3, CTC_EVENT_STOP, ctcm_chx_haltio },
1973 { CH_XID7_PENDING3, CTC_EVENT_START, ctcm_action_nop },
1974 { CH_XID7_PENDING3, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1975 { CH_XID7_PENDING3, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1976 { CH_XID7_PENDING3, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
1977 { CH_XID7_PENDING3, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1978 { CH_XID7_PENDING3, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1979 { CH_XID7_PENDING3, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
1980 { CH_XID7_PENDING3, CTC_EVENT_TIMER, ctcmpc_chx_resend },
1981 { CH_XID7_PENDING3, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
1982
1983 { CH_XID7_PENDING4, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
1984 { CH_XID7_PENDING4, CTC_EVENT_ATTN, ctcmpc_chx_attn },
1985 { CH_XID7_PENDING4, CTC_EVENT_STOP, ctcm_chx_haltio },
1986 { CH_XID7_PENDING4, CTC_EVENT_START, ctcm_action_nop },
1987 { CH_XID7_PENDING4, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1988 { CH_XID7_PENDING4, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1989 { CH_XID7_PENDING4, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
1990 { CH_XID7_PENDING4, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1991 { CH_XID7_PENDING4, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1992 { CH_XID7_PENDING4, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
1993 { CH_XID7_PENDING4, CTC_EVENT_TIMER, ctcmpc_chx_resend },
1994 { CH_XID7_PENDING4, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
1995
1996 { CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
1997 { CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop },
1998 { CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
1999 { CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc },
2000 { CTC_STATE_RXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail },
2001 { CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2002 { CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2003 { CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
2004
2005 { CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
2006 { CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop },
2007 { CTC_STATE_TXINIT, CTC_EVENT_FINSTAT, ctcm_chx_txidle },
2008 { CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr },
2009 { CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr },
2010 { CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr },
2011 { CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2012 { CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2013 { CTC_STATE_TXINIT, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep },
2014
2015 { CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
2016 { CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop },
2017 { CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio },
2018 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_fail },
2019 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail },
2020 { CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2021 { CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2022 { CTC_STATE_TXIDLE, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep },
2023
2024 { CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop },
2025 { CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart },
2026 { CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped },
2027 { CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
2028 { CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
2029 { CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2030 { CTC_STATE_TERM, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
2031 { CTC_STATE_TERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2032
2033 { CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio },
2034 { CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart },
2035 { CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
2036 { CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
2037 { CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
2038 { CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2039 { CTC_STATE_DTERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2040
2041 { CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio },
2042 { CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop },
2043 { CTC_STATE_TX, CTC_EVENT_FINSTAT, ctcmpc_chx_txdone },
2044 { CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_fail },
2045 { CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_fail },
2046 { CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry },
2047 { CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2048 { CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2049 { CTC_STATE_TX, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep },
2050 { CTC_STATE_TX, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
2051
2052 { CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
2053 { CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
2054 { CTC_STATE_TXERR, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2055 { CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2056 { CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2057};
2058
2059int mpc_ch_fsm_len = ARRAY_SIZE(ctcmpc_ch_fsm);
2060
2061/*
2062 * Actions for interface - statemachine.
2063 */
2064
2065/**
2066 * Startup channels by sending CTC_EVENT_START to each channel.
2067 *
2068 * fi An instance of an interface statemachine.
2069 * event The event, just happened.
2070 * arg Generic pointer, casted from struct net_device * upon call.
2071 */
2072static void dev_action_start(fsm_instance *fi, int event, void *arg)
2073{
2074 struct net_device *dev = arg;
2075 struct ctcm_priv *priv = dev->ml_priv;
2076 int direction;
2077
2078 CTCMY_DBF_DEV_NAME(SETUP, dev, "");
2079
2080 fsm_deltimer(&priv->restart_timer);
2081 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2082 if (IS_MPC(priv))
2083 priv->mpcg->channels_terminating = 0;
2084 for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
2085 struct channel *ch = priv->channel[direction];
2086 fsm_event(ch->fsm, CTC_EVENT_START, ch);
2087 }
2088}
2089
2090/**
2091 * Shutdown channels by sending CTC_EVENT_STOP to each channel.
2092 *
2093 * fi An instance of an interface statemachine.
2094 * event The event, just happened.
2095 * arg Generic pointer, casted from struct net_device * upon call.
2096 */
2097static void dev_action_stop(fsm_instance *fi, int event, void *arg)
2098{
2099 int direction;
2100 struct net_device *dev = arg;
2101 struct ctcm_priv *priv = dev->ml_priv;
2102
2103 CTCMY_DBF_DEV_NAME(SETUP, dev, "");
2104
2105 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2106 for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
2107 struct channel *ch = priv->channel[direction];
2108 fsm_event(ch->fsm, CTC_EVENT_STOP, ch);
2109 ch->th_seq_num = 0x00;
2110 CTCM_PR_DEBUG("%s: CH_th_seq= %08x\n",
2111 __func__, ch->th_seq_num);
2112 }
2113 if (IS_MPC(priv))
2114 fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET);
2115}
2116
2117static void dev_action_restart(fsm_instance *fi, int event, void *arg)
2118{
2119 int restart_timer;
2120 struct net_device *dev = arg;
2121 struct ctcm_priv *priv = dev->ml_priv;
2122
2123 CTCMY_DBF_DEV_NAME(TRACE, dev, "");
2124
2125 if (IS_MPC(priv)) {
2126 restart_timer = CTCM_TIME_1_SEC;
2127 } else {
2128 restart_timer = CTCM_TIME_5_SEC;
2129 }
2130 dev_info(&dev->dev, "Restarting device\n");
2131
2132 dev_action_stop(fi, event, arg);
2133 fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
2134 if (IS_MPC(priv))
2135 fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET);
2136
2137 /* going back into start sequence too quickly can */
2138 /* result in the other side becoming unreachable due */
2139 /* to sense reported when IO is aborted */
2140 fsm_addtimer(&priv->restart_timer, restart_timer,
2141 DEV_EVENT_START, dev);
2142}
2143
2144/**
2145 * Called from channel statemachine
2146 * when a channel is up and running.
2147 *
2148 * fi An instance of an interface statemachine.
2149 * event The event, just happened.
2150 * arg Generic pointer, casted from struct net_device * upon call.
2151 */
2152static void dev_action_chup(fsm_instance *fi, int event, void *arg)
2153{
2154 struct net_device *dev = arg;
2155 struct ctcm_priv *priv = dev->ml_priv;
2156 int dev_stat = fsm_getstate(fi);
2157
2158 CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE,
2159 "%s(%s): priv = %p [%d,%d]\n ", CTCM_FUNTAIL,
2160 dev->name, dev->ml_priv, dev_stat, event);
2161
2162 switch (fsm_getstate(fi)) {
2163 case DEV_STATE_STARTWAIT_RXTX:
2164 if (event == DEV_EVENT_RXUP)
2165 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2166 else
2167 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2168 break;
2169 case DEV_STATE_STARTWAIT_RX:
2170 if (event == DEV_EVENT_RXUP) {
2171 fsm_newstate(fi, DEV_STATE_RUNNING);
2172 dev_info(&dev->dev,
2173 "Connected with remote side\n");
2174 ctcm_clear_busy(dev);
2175 }
2176 break;
2177 case DEV_STATE_STARTWAIT_TX:
2178 if (event == DEV_EVENT_TXUP) {
2179 fsm_newstate(fi, DEV_STATE_RUNNING);
2180 dev_info(&dev->dev,
2181 "Connected with remote side\n");
2182 ctcm_clear_busy(dev);
2183 }
2184 break;
2185 case DEV_STATE_STOPWAIT_TX:
2186 if (event == DEV_EVENT_RXUP)
2187 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2188 break;
2189 case DEV_STATE_STOPWAIT_RX:
2190 if (event == DEV_EVENT_TXUP)
2191 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2192 break;
2193 }
2194
2195 if (IS_MPC(priv)) {
2196 if (event == DEV_EVENT_RXUP)
2197 mpc_channel_action(priv->channel[CTCM_READ],
2198 CTCM_READ, MPC_CHANNEL_ADD);
2199 else
2200 mpc_channel_action(priv->channel[CTCM_WRITE],
2201 CTCM_WRITE, MPC_CHANNEL_ADD);
2202 }
2203}
2204
2205/**
2206 * Called from device statemachine
2207 * when a channel has been shutdown.
2208 *
2209 * fi An instance of an interface statemachine.
2210 * event The event, just happened.
2211 * arg Generic pointer, casted from struct net_device * upon call.
2212 */
2213static void dev_action_chdown(fsm_instance *fi, int event, void *arg)
2214{
2215
2216 struct net_device *dev = arg;
2217 struct ctcm_priv *priv = dev->ml_priv;
2218
2219 CTCMY_DBF_DEV_NAME(SETUP, dev, "");
2220
2221 switch (fsm_getstate(fi)) {
2222 case DEV_STATE_RUNNING:
2223 if (event == DEV_EVENT_TXDOWN)
2224 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2225 else
2226 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2227 break;
2228 case DEV_STATE_STARTWAIT_RX:
2229 if (event == DEV_EVENT_TXDOWN)
2230 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2231 break;
2232 case DEV_STATE_STARTWAIT_TX:
2233 if (event == DEV_EVENT_RXDOWN)
2234 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2235 break;
2236 case DEV_STATE_STOPWAIT_RXTX:
2237 if (event == DEV_EVENT_TXDOWN)
2238 fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
2239 else
2240 fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
2241 break;
2242 case DEV_STATE_STOPWAIT_RX:
2243 if (event == DEV_EVENT_RXDOWN)
2244 fsm_newstate(fi, DEV_STATE_STOPPED);
2245 break;
2246 case DEV_STATE_STOPWAIT_TX:
2247 if (event == DEV_EVENT_TXDOWN)
2248 fsm_newstate(fi, DEV_STATE_STOPPED);
2249 break;
2250 }
2251 if (IS_MPC(priv)) {
2252 if (event == DEV_EVENT_RXDOWN)
2253 mpc_channel_action(priv->channel[CTCM_READ],
2254 CTCM_READ, MPC_CHANNEL_REMOVE);
2255 else
2256 mpc_channel_action(priv->channel[CTCM_WRITE],
2257 CTCM_WRITE, MPC_CHANNEL_REMOVE);
2258 }
2259}
2260
2261const fsm_node dev_fsm[] = {
2262 { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start },
2263 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start },
2264 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2265 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2266 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2267 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start },
2268 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2269 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2270 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown },
2271 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2272 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start },
2273 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2274 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2275 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown },
2276 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2277 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop },
2278 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup },
2279 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup },
2280 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2281 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2282 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2283 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop },
2284 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2285 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2286 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown },
2287 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2288 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop },
2289 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2290 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2291 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown },
2292 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2293 { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
2294 { DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown },
2295 { DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown },
2296 { DEV_STATE_RUNNING, DEV_EVENT_TXUP, ctcm_action_nop },
2297 { DEV_STATE_RUNNING, DEV_EVENT_RXUP, ctcm_action_nop },
2298 { DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart },
2299};
2300
2301int dev_fsm_len = ARRAY_SIZE(dev_fsm);
2302
2303/* --- This is the END my friend --- */
2304
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2001, 2007
4 * Authors: Fritz Elfert (felfert@millenux.com)
5 * Peter Tiedemann (ptiedem@de.ibm.com)
6 * MPC additions :
7 * Belinda Thompson (belindat@us.ibm.com)
8 * Andy Richter (richtera@us.ibm.com)
9 */
10
11#undef DEBUG
12#undef DEBUGDATA
13#undef DEBUGCCW
14
15#define KMSG_COMPONENT "ctcm"
16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/slab.h>
22#include <linux/errno.h>
23#include <linux/types.h>
24#include <linux/interrupt.h>
25#include <linux/timer.h>
26#include <linux/bitops.h>
27
28#include <linux/signal.h>
29#include <linux/string.h>
30
31#include <linux/ip.h>
32#include <linux/if_arp.h>
33#include <linux/tcp.h>
34#include <linux/skbuff.h>
35#include <linux/ctype.h>
36#include <net/dst.h>
37
38#include <linux/io.h>
39#include <asm/ccwdev.h>
40#include <asm/ccwgroup.h>
41#include <linux/uaccess.h>
42
43#include <asm/idals.h>
44
45#include "fsm.h"
46
47#include "ctcm_dbug.h"
48#include "ctcm_main.h"
49#include "ctcm_fsms.h"
50
51const char *dev_state_names[] = {
52 [DEV_STATE_STOPPED] = "Stopped",
53 [DEV_STATE_STARTWAIT_RXTX] = "StartWait RXTX",
54 [DEV_STATE_STARTWAIT_RX] = "StartWait RX",
55 [DEV_STATE_STARTWAIT_TX] = "StartWait TX",
56 [DEV_STATE_STOPWAIT_RXTX] = "StopWait RXTX",
57 [DEV_STATE_STOPWAIT_RX] = "StopWait RX",
58 [DEV_STATE_STOPWAIT_TX] = "StopWait TX",
59 [DEV_STATE_RUNNING] = "Running",
60};
61
62const char *dev_event_names[] = {
63 [DEV_EVENT_START] = "Start",
64 [DEV_EVENT_STOP] = "Stop",
65 [DEV_EVENT_RXUP] = "RX up",
66 [DEV_EVENT_TXUP] = "TX up",
67 [DEV_EVENT_RXDOWN] = "RX down",
68 [DEV_EVENT_TXDOWN] = "TX down",
69 [DEV_EVENT_RESTART] = "Restart",
70};
71
72const char *ctc_ch_event_names[] = {
73 [CTC_EVENT_IO_SUCCESS] = "ccw_device success",
74 [CTC_EVENT_IO_EBUSY] = "ccw_device busy",
75 [CTC_EVENT_IO_ENODEV] = "ccw_device enodev",
76 [CTC_EVENT_IO_UNKNOWN] = "ccw_device unknown",
77 [CTC_EVENT_ATTNBUSY] = "Status ATTN & BUSY",
78 [CTC_EVENT_ATTN] = "Status ATTN",
79 [CTC_EVENT_BUSY] = "Status BUSY",
80 [CTC_EVENT_UC_RCRESET] = "Unit check remote reset",
81 [CTC_EVENT_UC_RSRESET] = "Unit check remote system reset",
82 [CTC_EVENT_UC_TXTIMEOUT] = "Unit check TX timeout",
83 [CTC_EVENT_UC_TXPARITY] = "Unit check TX parity",
84 [CTC_EVENT_UC_HWFAIL] = "Unit check Hardware failure",
85 [CTC_EVENT_UC_RXPARITY] = "Unit check RX parity",
86 [CTC_EVENT_UC_ZERO] = "Unit check ZERO",
87 [CTC_EVENT_UC_UNKNOWN] = "Unit check Unknown",
88 [CTC_EVENT_SC_UNKNOWN] = "SubChannel check Unknown",
89 [CTC_EVENT_MC_FAIL] = "Machine check failure",
90 [CTC_EVENT_MC_GOOD] = "Machine check operational",
91 [CTC_EVENT_IRQ] = "IRQ normal",
92 [CTC_EVENT_FINSTAT] = "IRQ final",
93 [CTC_EVENT_TIMER] = "Timer",
94 [CTC_EVENT_START] = "Start",
95 [CTC_EVENT_STOP] = "Stop",
96 /*
97 * additional MPC events
98 */
99 [CTC_EVENT_SEND_XID] = "XID Exchange",
100 [CTC_EVENT_RSWEEP_TIMER] = "MPC Group Sweep Timer",
101};
102
103const char *ctc_ch_state_names[] = {
104 [CTC_STATE_IDLE] = "Idle",
105 [CTC_STATE_STOPPED] = "Stopped",
106 [CTC_STATE_STARTWAIT] = "StartWait",
107 [CTC_STATE_STARTRETRY] = "StartRetry",
108 [CTC_STATE_SETUPWAIT] = "SetupWait",
109 [CTC_STATE_RXINIT] = "RX init",
110 [CTC_STATE_TXINIT] = "TX init",
111 [CTC_STATE_RX] = "RX",
112 [CTC_STATE_TX] = "TX",
113 [CTC_STATE_RXIDLE] = "RX idle",
114 [CTC_STATE_TXIDLE] = "TX idle",
115 [CTC_STATE_RXERR] = "RX error",
116 [CTC_STATE_TXERR] = "TX error",
117 [CTC_STATE_TERM] = "Terminating",
118 [CTC_STATE_DTERM] = "Restarting",
119 [CTC_STATE_NOTOP] = "Not operational",
120 /*
121 * additional MPC states
122 */
123 [CH_XID0_PENDING] = "Pending XID0 Start",
124 [CH_XID0_INPROGRESS] = "In XID0 Negotiations ",
125 [CH_XID7_PENDING] = "Pending XID7 P1 Start",
126 [CH_XID7_PENDING1] = "Active XID7 P1 Exchange ",
127 [CH_XID7_PENDING2] = "Pending XID7 P2 Start ",
128 [CH_XID7_PENDING3] = "Active XID7 P2 Exchange ",
129 [CH_XID7_PENDING4] = "XID7 Complete - Pending READY ",
130};
131
132static void ctcm_action_nop(fsm_instance *fi, int event, void *arg);
133
134/*
135 * ----- static ctcm actions for channel statemachine -----
136 *
137*/
138static void chx_txdone(fsm_instance *fi, int event, void *arg);
139static void chx_rx(fsm_instance *fi, int event, void *arg);
140static void chx_rxidle(fsm_instance *fi, int event, void *arg);
141static void chx_firstio(fsm_instance *fi, int event, void *arg);
142static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
143static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
144static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
145static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
146static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
147static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
148static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
149static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
150static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
151static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
152static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
153static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
154static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
155static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
156
157/*
158 * ----- static ctcmpc actions for ctcmpc channel statemachine -----
159 *
160*/
161static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg);
162static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg);
163static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg);
164/* shared :
165static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
166static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
167static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
168static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
169static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
170static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
171static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
172static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
173static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
174static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
175static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
176static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
177static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
178static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
179*/
180static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg);
181static void ctcmpc_chx_attnbusy(fsm_instance *, int, void *);
182static void ctcmpc_chx_resend(fsm_instance *, int, void *);
183static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg);
184
185/**
186 * Check return code of a preceding ccw_device call, halt_IO etc...
187 *
188 * ch : The channel, the error belongs to.
189 * Returns the error code (!= 0) to inspect.
190 */
191void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg)
192{
193 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
194 "%s(%s): %s: %04x\n",
195 CTCM_FUNTAIL, ch->id, msg, rc);
196 switch (rc) {
197 case -EBUSY:
198 pr_info("%s: The communication peer is busy\n",
199 ch->id);
200 fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch);
201 break;
202 case -ENODEV:
203 pr_err("%s: The specified target device is not valid\n",
204 ch->id);
205 fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch);
206 break;
207 default:
208 pr_err("An I/O operation resulted in error %04x\n",
209 rc);
210 fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch);
211 }
212}
213
214void ctcm_purge_skb_queue(struct sk_buff_head *q)
215{
216 struct sk_buff *skb;
217
218 CTCM_DBF_TEXT(TRACE, CTC_DBF_DEBUG, __func__);
219
220 while ((skb = skb_dequeue(q))) {
221 refcount_dec(&skb->users);
222 dev_kfree_skb_any(skb);
223 }
224}
225
226/**
227 * NOP action for statemachines
228 */
229static void ctcm_action_nop(fsm_instance *fi, int event, void *arg)
230{
231}
232
233/*
234 * Actions for channel - statemachines.
235 */
236
237/**
238 * Normal data has been send. Free the corresponding
239 * skb (it's in io_queue), reset dev->tbusy and
240 * revert to idle state.
241 *
242 * fi An instance of a channel statemachine.
243 * event The event, just happened.
244 * arg Generic pointer, casted from channel * upon call.
245 */
246static void chx_txdone(fsm_instance *fi, int event, void *arg)
247{
248 struct channel *ch = arg;
249 struct net_device *dev = ch->netdev;
250 struct ctcm_priv *priv = dev->ml_priv;
251 struct sk_buff *skb;
252 int first = 1;
253 int i;
254 unsigned long duration;
255 unsigned long done_stamp = jiffies;
256
257 CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
258
259 duration = done_stamp - ch->prof.send_stamp;
260 if (duration > ch->prof.tx_time)
261 ch->prof.tx_time = duration;
262
263 if (ch->irb->scsw.cmd.count != 0)
264 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
265 "%s(%s): TX not complete, remaining %d bytes",
266 CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count);
267 fsm_deltimer(&ch->timer);
268 while ((skb = skb_dequeue(&ch->io_queue))) {
269 priv->stats.tx_packets++;
270 priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
271 if (first) {
272 priv->stats.tx_bytes += 2;
273 first = 0;
274 }
275 refcount_dec(&skb->users);
276 dev_kfree_skb_irq(skb);
277 }
278 spin_lock(&ch->collect_lock);
279 clear_normalized_cda(&ch->ccw[4]);
280 if (ch->collect_len > 0) {
281 int rc;
282
283 if (ctcm_checkalloc_buffer(ch)) {
284 spin_unlock(&ch->collect_lock);
285 return;
286 }
287 ch->trans_skb->data = ch->trans_skb_data;
288 skb_reset_tail_pointer(ch->trans_skb);
289 ch->trans_skb->len = 0;
290 if (ch->prof.maxmulti < (ch->collect_len + 2))
291 ch->prof.maxmulti = ch->collect_len + 2;
292 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
293 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
294 *((__u16 *)skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
295 i = 0;
296 while ((skb = skb_dequeue(&ch->collect_queue))) {
297 skb_copy_from_linear_data(skb,
298 skb_put(ch->trans_skb, skb->len), skb->len);
299 priv->stats.tx_packets++;
300 priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
301 refcount_dec(&skb->users);
302 dev_kfree_skb_irq(skb);
303 i++;
304 }
305 ch->collect_len = 0;
306 spin_unlock(&ch->collect_lock);
307 ch->ccw[1].count = ch->trans_skb->len;
308 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
309 ch->prof.send_stamp = jiffies;
310 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
311 (unsigned long)ch, 0xff, 0);
312 ch->prof.doios_multi++;
313 if (rc != 0) {
314 priv->stats.tx_dropped += i;
315 priv->stats.tx_errors += i;
316 fsm_deltimer(&ch->timer);
317 ctcm_ccw_check_rc(ch, rc, "chained TX");
318 }
319 } else {
320 spin_unlock(&ch->collect_lock);
321 fsm_newstate(fi, CTC_STATE_TXIDLE);
322 }
323 ctcm_clear_busy_do(dev);
324}
325
326/**
327 * Initial data is sent.
328 * Notify device statemachine that we are up and
329 * running.
330 *
331 * fi An instance of a channel statemachine.
332 * event The event, just happened.
333 * arg Generic pointer, casted from channel * upon call.
334 */
335void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg)
336{
337 struct channel *ch = arg;
338 struct net_device *dev = ch->netdev;
339 struct ctcm_priv *priv = dev->ml_priv;
340
341 CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
342
343 fsm_deltimer(&ch->timer);
344 fsm_newstate(fi, CTC_STATE_TXIDLE);
345 fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev);
346}
347
348/**
349 * Got normal data, check for sanity, queue it up, allocate new buffer
350 * trigger bottom half, and initiate next read.
351 *
352 * fi An instance of a channel statemachine.
353 * event The event, just happened.
354 * arg Generic pointer, casted from channel * upon call.
355 */
356static void chx_rx(fsm_instance *fi, int event, void *arg)
357{
358 struct channel *ch = arg;
359 struct net_device *dev = ch->netdev;
360 struct ctcm_priv *priv = dev->ml_priv;
361 int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
362 struct sk_buff *skb = ch->trans_skb;
363 __u16 block_len = *((__u16 *)skb->data);
364 int check_len;
365 int rc;
366
367 fsm_deltimer(&ch->timer);
368 if (len < 8) {
369 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
370 "%s(%s): got packet with length %d < 8\n",
371 CTCM_FUNTAIL, dev->name, len);
372 priv->stats.rx_dropped++;
373 priv->stats.rx_length_errors++;
374 goto again;
375 }
376 if (len > ch->max_bufsize) {
377 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
378 "%s(%s): got packet with length %d > %d\n",
379 CTCM_FUNTAIL, dev->name, len, ch->max_bufsize);
380 priv->stats.rx_dropped++;
381 priv->stats.rx_length_errors++;
382 goto again;
383 }
384
385 /*
386 * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
387 */
388 switch (ch->protocol) {
389 case CTCM_PROTO_S390:
390 case CTCM_PROTO_OS390:
391 check_len = block_len + 2;
392 break;
393 default:
394 check_len = block_len;
395 break;
396 }
397 if ((len < block_len) || (len > check_len)) {
398 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
399 "%s(%s): got block length %d != rx length %d\n",
400 CTCM_FUNTAIL, dev->name, block_len, len);
401 if (do_debug)
402 ctcmpc_dump_skb(skb, 0);
403
404 *((__u16 *)skb->data) = len;
405 priv->stats.rx_dropped++;
406 priv->stats.rx_length_errors++;
407 goto again;
408 }
409 if (block_len > 2) {
410 *((__u16 *)skb->data) = block_len - 2;
411 ctcm_unpack_skb(ch, skb);
412 }
413 again:
414 skb->data = ch->trans_skb_data;
415 skb_reset_tail_pointer(skb);
416 skb->len = 0;
417 if (ctcm_checkalloc_buffer(ch))
418 return;
419 ch->ccw[1].count = ch->max_bufsize;
420 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
421 (unsigned long)ch, 0xff, 0);
422 if (rc != 0)
423 ctcm_ccw_check_rc(ch, rc, "normal RX");
424}
425
426/**
427 * Initialize connection by sending a __u16 of value 0.
428 *
429 * fi An instance of a channel statemachine.
430 * event The event, just happened.
431 * arg Generic pointer, casted from channel * upon call.
432 */
433static void chx_firstio(fsm_instance *fi, int event, void *arg)
434{
435 int rc;
436 struct channel *ch = arg;
437 int fsmstate = fsm_getstate(fi);
438
439 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
440 "%s(%s) : %02x",
441 CTCM_FUNTAIL, ch->id, fsmstate);
442
443 ch->sense_rc = 0; /* reset unit check report control */
444 if (fsmstate == CTC_STATE_TXIDLE)
445 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
446 "%s(%s): remote side issued READ?, init.\n",
447 CTCM_FUNTAIL, ch->id);
448 fsm_deltimer(&ch->timer);
449 if (ctcm_checkalloc_buffer(ch))
450 return;
451 if ((fsmstate == CTC_STATE_SETUPWAIT) &&
452 (ch->protocol == CTCM_PROTO_OS390)) {
453 /* OS/390 resp. z/OS */
454 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
455 *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
456 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC,
457 CTC_EVENT_TIMER, ch);
458 chx_rxidle(fi, event, arg);
459 } else {
460 struct net_device *dev = ch->netdev;
461 struct ctcm_priv *priv = dev->ml_priv;
462 fsm_newstate(fi, CTC_STATE_TXIDLE);
463 fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
464 }
465 return;
466 }
467 /*
468 * Don't setup a timer for receiving the initial RX frame
469 * if in compatibility mode, since VM TCP delays the initial
470 * frame until it has some data to send.
471 */
472 if ((CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) ||
473 (ch->protocol != CTCM_PROTO_S390))
474 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
475
476 *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
477 ch->ccw[1].count = 2; /* Transfer only length */
478
479 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
480 ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
481 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
482 (unsigned long)ch, 0xff, 0);
483 if (rc != 0) {
484 fsm_deltimer(&ch->timer);
485 fsm_newstate(fi, CTC_STATE_SETUPWAIT);
486 ctcm_ccw_check_rc(ch, rc, "init IO");
487 }
488 /*
489 * If in compatibility mode since we don't setup a timer, we
490 * also signal RX channel up immediately. This enables us
491 * to send packets early which in turn usually triggers some
492 * reply from VM TCP which brings up the RX channel to it's
493 * final state.
494 */
495 if ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) &&
496 (ch->protocol == CTCM_PROTO_S390)) {
497 struct net_device *dev = ch->netdev;
498 struct ctcm_priv *priv = dev->ml_priv;
499 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
500 }
501}
502
503/**
504 * Got initial data, check it. If OK,
505 * notify device statemachine that we are up and
506 * running.
507 *
508 * fi An instance of a channel statemachine.
509 * event The event, just happened.
510 * arg Generic pointer, casted from channel * upon call.
511 */
512static void chx_rxidle(fsm_instance *fi, int event, void *arg)
513{
514 struct channel *ch = arg;
515 struct net_device *dev = ch->netdev;
516 struct ctcm_priv *priv = dev->ml_priv;
517 __u16 buflen;
518 int rc;
519
520 fsm_deltimer(&ch->timer);
521 buflen = *((__u16 *)ch->trans_skb->data);
522 CTCM_PR_DEBUG("%s: %s: Initial RX count = %d\n",
523 __func__, dev->name, buflen);
524
525 if (buflen >= CTCM_INITIAL_BLOCKLEN) {
526 if (ctcm_checkalloc_buffer(ch))
527 return;
528 ch->ccw[1].count = ch->max_bufsize;
529 fsm_newstate(fi, CTC_STATE_RXIDLE);
530 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
531 (unsigned long)ch, 0xff, 0);
532 if (rc != 0) {
533 fsm_newstate(fi, CTC_STATE_RXINIT);
534 ctcm_ccw_check_rc(ch, rc, "initial RX");
535 } else
536 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
537 } else {
538 CTCM_PR_DEBUG("%s: %s: Initial RX count %d not %d\n",
539 __func__, dev->name,
540 buflen, CTCM_INITIAL_BLOCKLEN);
541 chx_firstio(fi, event, arg);
542 }
543}
544
545/**
546 * Set channel into extended mode.
547 *
548 * fi An instance of a channel statemachine.
549 * event The event, just happened.
550 * arg Generic pointer, casted from channel * upon call.
551 */
552static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg)
553{
554 struct channel *ch = arg;
555 int rc;
556 unsigned long saveflags = 0;
557 int timeout = CTCM_TIME_5_SEC;
558
559 fsm_deltimer(&ch->timer);
560 if (IS_MPC(ch)) {
561 timeout = 1500;
562 CTCM_PR_DEBUG("enter %s: cp=%i ch=0x%p id=%s\n",
563 __func__, smp_processor_id(), ch, ch->id);
564 }
565 fsm_addtimer(&ch->timer, timeout, CTC_EVENT_TIMER, ch);
566 fsm_newstate(fi, CTC_STATE_SETUPWAIT);
567 CTCM_CCW_DUMP((char *)&ch->ccw[6], sizeof(struct ccw1) * 2);
568
569 if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */
570 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
571 /* Such conditional locking is undeterministic in
572 * static view. => ignore sparse warnings here. */
573
574 rc = ccw_device_start(ch->cdev, &ch->ccw[6],
575 (unsigned long)ch, 0xff, 0);
576 if (event == CTC_EVENT_TIMER) /* see above comments */
577 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
578 if (rc != 0) {
579 fsm_deltimer(&ch->timer);
580 fsm_newstate(fi, CTC_STATE_STARTWAIT);
581 ctcm_ccw_check_rc(ch, rc, "set Mode");
582 } else
583 ch->retry = 0;
584}
585
586/**
587 * Setup channel.
588 *
589 * fi An instance of a channel statemachine.
590 * event The event, just happened.
591 * arg Generic pointer, casted from channel * upon call.
592 */
593static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
594{
595 struct channel *ch = arg;
596 unsigned long saveflags;
597 int rc;
598
599 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s",
600 CTCM_FUNTAIL, ch->id,
601 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX");
602
603 if (ch->trans_skb != NULL) {
604 clear_normalized_cda(&ch->ccw[1]);
605 dev_kfree_skb(ch->trans_skb);
606 ch->trans_skb = NULL;
607 }
608 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
609 ch->ccw[1].cmd_code = CCW_CMD_READ;
610 ch->ccw[1].flags = CCW_FLAG_SLI;
611 ch->ccw[1].count = 0;
612 } else {
613 ch->ccw[1].cmd_code = CCW_CMD_WRITE;
614 ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
615 ch->ccw[1].count = 0;
616 }
617 if (ctcm_checkalloc_buffer(ch)) {
618 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
619 "%s(%s): %s trans_skb alloc delayed "
620 "until first transfer",
621 CTCM_FUNTAIL, ch->id,
622 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
623 "RX" : "TX");
624 }
625 ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
626 ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
627 ch->ccw[0].count = 0;
628 ch->ccw[0].cda = 0;
629 ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */
630 ch->ccw[2].flags = CCW_FLAG_SLI;
631 ch->ccw[2].count = 0;
632 ch->ccw[2].cda = 0;
633 memcpy(&ch->ccw[3], &ch->ccw[0], sizeof(struct ccw1) * 3);
634 ch->ccw[4].cda = 0;
635 ch->ccw[4].flags &= ~CCW_FLAG_IDA;
636
637 fsm_newstate(fi, CTC_STATE_STARTWAIT);
638 fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
639 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
640 rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
641 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
642 if (rc != 0) {
643 if (rc != -EBUSY)
644 fsm_deltimer(&ch->timer);
645 ctcm_ccw_check_rc(ch, rc, "initial HaltIO");
646 }
647}
648
649/**
650 * Shutdown a channel.
651 *
652 * fi An instance of a channel statemachine.
653 * event The event, just happened.
654 * arg Generic pointer, casted from channel * upon call.
655 */
656static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg)
657{
658 struct channel *ch = arg;
659 unsigned long saveflags = 0;
660 int rc;
661 int oldstate;
662
663 fsm_deltimer(&ch->timer);
664 if (IS_MPC(ch))
665 fsm_deltimer(&ch->sweep_timer);
666
667 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
668
669 if (event == CTC_EVENT_STOP) /* only for STOP not yet locked */
670 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
671 /* Such conditional locking is undeterministic in
672 * static view. => ignore sparse warnings here. */
673 oldstate = fsm_getstate(fi);
674 fsm_newstate(fi, CTC_STATE_TERM);
675 rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
676
677 if (event == CTC_EVENT_STOP)
678 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
679 /* see remark above about conditional locking */
680
681 if (rc != 0 && rc != -EBUSY) {
682 fsm_deltimer(&ch->timer);
683 if (event != CTC_EVENT_STOP) {
684 fsm_newstate(fi, oldstate);
685 ctcm_ccw_check_rc(ch, rc, (char *)__func__);
686 }
687 }
688}
689
690/**
691 * Cleanup helper for chx_fail and chx_stopped
692 * cleanup channels queue and notify interface statemachine.
693 *
694 * fi An instance of a channel statemachine.
695 * state The next state (depending on caller).
696 * ch The channel to operate on.
697 */
698static void ctcm_chx_cleanup(fsm_instance *fi, int state,
699 struct channel *ch)
700{
701 struct net_device *dev = ch->netdev;
702 struct ctcm_priv *priv = dev->ml_priv;
703
704 CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE,
705 "%s(%s): %s[%d]\n",
706 CTCM_FUNTAIL, dev->name, ch->id, state);
707
708 fsm_deltimer(&ch->timer);
709 if (IS_MPC(ch))
710 fsm_deltimer(&ch->sweep_timer);
711
712 fsm_newstate(fi, state);
713 if (state == CTC_STATE_STOPPED && ch->trans_skb != NULL) {
714 clear_normalized_cda(&ch->ccw[1]);
715 dev_kfree_skb_any(ch->trans_skb);
716 ch->trans_skb = NULL;
717 }
718
719 ch->th_seg = 0x00;
720 ch->th_seq_num = 0x00;
721 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
722 skb_queue_purge(&ch->io_queue);
723 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
724 } else {
725 ctcm_purge_skb_queue(&ch->io_queue);
726 if (IS_MPC(ch))
727 ctcm_purge_skb_queue(&ch->sweep_queue);
728 spin_lock(&ch->collect_lock);
729 ctcm_purge_skb_queue(&ch->collect_queue);
730 ch->collect_len = 0;
731 spin_unlock(&ch->collect_lock);
732 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
733 }
734}
735
736/**
737 * A channel has successfully been halted.
738 * Cleanup it's queue and notify interface statemachine.
739 *
740 * fi An instance of a channel statemachine.
741 * event The event, just happened.
742 * arg Generic pointer, casted from channel * upon call.
743 */
744static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg)
745{
746 ctcm_chx_cleanup(fi, CTC_STATE_STOPPED, arg);
747}
748
749/**
750 * A stop command from device statemachine arrived and we are in
751 * not operational mode. Set state to stopped.
752 *
753 * fi An instance of a channel statemachine.
754 * event The event, just happened.
755 * arg Generic pointer, casted from channel * upon call.
756 */
757static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg)
758{
759 fsm_newstate(fi, CTC_STATE_STOPPED);
760}
761
762/**
763 * A machine check for no path, not operational status or gone device has
764 * happened.
765 * Cleanup queue and notify interface statemachine.
766 *
767 * fi An instance of a channel statemachine.
768 * event The event, just happened.
769 * arg Generic pointer, casted from channel * upon call.
770 */
771static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg)
772{
773 ctcm_chx_cleanup(fi, CTC_STATE_NOTOP, arg);
774}
775
776/**
777 * Handle error during setup of channel.
778 *
779 * fi An instance of a channel statemachine.
780 * event The event, just happened.
781 * arg Generic pointer, casted from channel * upon call.
782 */
783static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
784{
785 struct channel *ch = arg;
786 struct net_device *dev = ch->netdev;
787 struct ctcm_priv *priv = dev->ml_priv;
788
789 /*
790 * Special case: Got UC_RCRESET on setmode.
791 * This means that remote side isn't setup. In this case
792 * simply retry after some 10 secs...
793 */
794 if ((fsm_getstate(fi) == CTC_STATE_SETUPWAIT) &&
795 ((event == CTC_EVENT_UC_RCRESET) ||
796 (event == CTC_EVENT_UC_RSRESET))) {
797 fsm_newstate(fi, CTC_STATE_STARTRETRY);
798 fsm_deltimer(&ch->timer);
799 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
800 if (!IS_MPC(ch) &&
801 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) {
802 int rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
803 if (rc != 0)
804 ctcm_ccw_check_rc(ch, rc,
805 "HaltIO in chx_setuperr");
806 }
807 return;
808 }
809
810 CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT,
811 "%s(%s) : %s error during %s channel setup state=%s\n",
812 CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event],
813 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX",
814 fsm_getstate_str(fi));
815
816 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
817 fsm_newstate(fi, CTC_STATE_RXERR);
818 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
819 } else {
820 fsm_newstate(fi, CTC_STATE_TXERR);
821 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
822 }
823}
824
825/**
826 * Restart a channel after an error.
827 *
828 * fi An instance of a channel statemachine.
829 * event The event, just happened.
830 * arg Generic pointer, casted from channel * upon call.
831 */
832static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg)
833{
834 struct channel *ch = arg;
835 struct net_device *dev = ch->netdev;
836 unsigned long saveflags = 0;
837 int oldstate;
838 int rc;
839
840 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
841 "%s: %s[%d] of %s\n",
842 CTCM_FUNTAIL, ch->id, event, dev->name);
843
844 fsm_deltimer(&ch->timer);
845
846 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
847 oldstate = fsm_getstate(fi);
848 fsm_newstate(fi, CTC_STATE_STARTWAIT);
849 if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */
850 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
851 /* Such conditional locking is a known problem for
852 * sparse because its undeterministic in static view.
853 * Warnings should be ignored here. */
854 rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
855 if (event == CTC_EVENT_TIMER)
856 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
857 if (rc != 0) {
858 if (rc != -EBUSY) {
859 fsm_deltimer(&ch->timer);
860 fsm_newstate(fi, oldstate);
861 }
862 ctcm_ccw_check_rc(ch, rc, "HaltIO in ctcm_chx_restart");
863 }
864}
865
866/**
867 * Handle error during RX initial handshake (exchange of
868 * 0-length block header)
869 *
870 * fi An instance of a channel statemachine.
871 * event The event, just happened.
872 * arg Generic pointer, casted from channel * upon call.
873 */
874static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg)
875{
876 struct channel *ch = arg;
877 struct net_device *dev = ch->netdev;
878 struct ctcm_priv *priv = dev->ml_priv;
879
880 if (event == CTC_EVENT_TIMER) {
881 if (!IS_MPCDEV(dev))
882 /* TODO : check if MPC deletes timer somewhere */
883 fsm_deltimer(&ch->timer);
884 if (ch->retry++ < 3)
885 ctcm_chx_restart(fi, event, arg);
886 else {
887 fsm_newstate(fi, CTC_STATE_RXERR);
888 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
889 }
890 } else {
891 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
892 "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id,
893 ctc_ch_event_names[event], fsm_getstate_str(fi));
894
895 dev_warn(&dev->dev,
896 "Initialization failed with RX/TX init handshake "
897 "error %s\n", ctc_ch_event_names[event]);
898 }
899}
900
901/**
902 * Notify device statemachine if we gave up initialization
903 * of RX channel.
904 *
905 * fi An instance of a channel statemachine.
906 * event The event, just happened.
907 * arg Generic pointer, casted from channel * upon call.
908 */
909static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg)
910{
911 struct channel *ch = arg;
912 struct net_device *dev = ch->netdev;
913 struct ctcm_priv *priv = dev->ml_priv;
914
915 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
916 "%s(%s): RX %s busy, init. fail",
917 CTCM_FUNTAIL, dev->name, ch->id);
918 fsm_newstate(fi, CTC_STATE_RXERR);
919 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
920}
921
922/**
923 * Handle RX Unit check remote reset (remote disconnected)
924 *
925 * fi An instance of a channel statemachine.
926 * event The event, just happened.
927 * arg Generic pointer, casted from channel * upon call.
928 */
929static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg)
930{
931 struct channel *ch = arg;
932 struct channel *ch2;
933 struct net_device *dev = ch->netdev;
934 struct ctcm_priv *priv = dev->ml_priv;
935
936 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
937 "%s: %s: remote disconnect - re-init ...",
938 CTCM_FUNTAIL, dev->name);
939 fsm_deltimer(&ch->timer);
940 /*
941 * Notify device statemachine
942 */
943 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
944 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
945
946 fsm_newstate(fi, CTC_STATE_DTERM);
947 ch2 = priv->channel[CTCM_WRITE];
948 fsm_newstate(ch2->fsm, CTC_STATE_DTERM);
949
950 ccw_device_halt(ch->cdev, (unsigned long)ch);
951 ccw_device_halt(ch2->cdev, (unsigned long)ch2);
952}
953
954/**
955 * Handle error during TX channel initialization.
956 *
957 * fi An instance of a channel statemachine.
958 * event The event, just happened.
959 * arg Generic pointer, casted from channel * upon call.
960 */
961static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg)
962{
963 struct channel *ch = arg;
964 struct net_device *dev = ch->netdev;
965 struct ctcm_priv *priv = dev->ml_priv;
966
967 if (event == CTC_EVENT_TIMER) {
968 fsm_deltimer(&ch->timer);
969 if (ch->retry++ < 3)
970 ctcm_chx_restart(fi, event, arg);
971 else {
972 fsm_newstate(fi, CTC_STATE_TXERR);
973 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
974 }
975 } else {
976 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
977 "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id,
978 ctc_ch_event_names[event], fsm_getstate_str(fi));
979
980 dev_warn(&dev->dev,
981 "Initialization failed with RX/TX init handshake "
982 "error %s\n", ctc_ch_event_names[event]);
983 }
984}
985
986/**
987 * Handle TX timeout by retrying operation.
988 *
989 * fi An instance of a channel statemachine.
990 * event The event, just happened.
991 * arg Generic pointer, casted from channel * upon call.
992 */
993static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg)
994{
995 struct channel *ch = arg;
996 struct net_device *dev = ch->netdev;
997 struct ctcm_priv *priv = dev->ml_priv;
998 struct sk_buff *skb;
999
1000 CTCM_PR_DEBUG("Enter: %s: cp=%i ch=0x%p id=%s\n",
1001 __func__, smp_processor_id(), ch, ch->id);
1002
1003 fsm_deltimer(&ch->timer);
1004 if (ch->retry++ > 3) {
1005 struct mpc_group *gptr = priv->mpcg;
1006 CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO,
1007 "%s: %s: retries exceeded",
1008 CTCM_FUNTAIL, ch->id);
1009 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
1010 /* call restart if not MPC or if MPC and mpcg fsm is ready.
1011 use gptr as mpc indicator */
1012 if (!(gptr && (fsm_getstate(gptr->fsm) != MPCG_STATE_READY)))
1013 ctcm_chx_restart(fi, event, arg);
1014 goto done;
1015 }
1016
1017 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
1018 "%s : %s: retry %d",
1019 CTCM_FUNTAIL, ch->id, ch->retry);
1020 skb = skb_peek(&ch->io_queue);
1021 if (skb) {
1022 int rc = 0;
1023 unsigned long saveflags = 0;
1024 clear_normalized_cda(&ch->ccw[4]);
1025 ch->ccw[4].count = skb->len;
1026 if (set_normalized_cda(&ch->ccw[4], skb->data)) {
1027 CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO,
1028 "%s: %s: IDAL alloc failed",
1029 CTCM_FUNTAIL, ch->id);
1030 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
1031 ctcm_chx_restart(fi, event, arg);
1032 goto done;
1033 }
1034 fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
1035 if (event == CTC_EVENT_TIMER) /* for TIMER not yet locked */
1036 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1037 /* Such conditional locking is a known problem for
1038 * sparse because its undeterministic in static view.
1039 * Warnings should be ignored here. */
1040 if (do_debug_ccw)
1041 ctcmpc_dumpit((char *)&ch->ccw[3],
1042 sizeof(struct ccw1) * 3);
1043
1044 rc = ccw_device_start(ch->cdev, &ch->ccw[3],
1045 (unsigned long)ch, 0xff, 0);
1046 if (event == CTC_EVENT_TIMER)
1047 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
1048 saveflags);
1049 if (rc != 0) {
1050 fsm_deltimer(&ch->timer);
1051 ctcm_ccw_check_rc(ch, rc, "TX in chx_txretry");
1052 ctcm_purge_skb_queue(&ch->io_queue);
1053 }
1054 }
1055done:
1056 return;
1057}
1058
1059/**
1060 * Handle fatal errors during an I/O command.
1061 *
1062 * fi An instance of a channel statemachine.
1063 * event The event, just happened.
1064 * arg Generic pointer, casted from channel * upon call.
1065 */
1066static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg)
1067{
1068 struct channel *ch = arg;
1069 struct net_device *dev = ch->netdev;
1070 struct ctcm_priv *priv = dev->ml_priv;
1071 int rd = CHANNEL_DIRECTION(ch->flags);
1072
1073 fsm_deltimer(&ch->timer);
1074 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
1075 "%s: %s: %s unrecoverable channel error",
1076 CTCM_FUNTAIL, ch->id, rd == CTCM_READ ? "RX" : "TX");
1077
1078 if (IS_MPC(ch)) {
1079 priv->stats.tx_dropped++;
1080 priv->stats.tx_errors++;
1081 }
1082 if (rd == CTCM_READ) {
1083 fsm_newstate(fi, CTC_STATE_RXERR);
1084 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
1085 } else {
1086 fsm_newstate(fi, CTC_STATE_TXERR);
1087 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
1088 }
1089}
1090
1091/*
1092 * The ctcm statemachine for a channel.
1093 */
1094const fsm_node ch_fsm[] = {
1095 { CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop },
1096 { CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start },
1097 { CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop },
1098 { CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop },
1099
1100 { CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop },
1101 { CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop },
1102 { CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop },
1103 { CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop },
1104 { CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start },
1105
1106 { CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1107 { CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop },
1108 { CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
1109 { CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr },
1110 { CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1111 { CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1112
1113 { CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio },
1114 { CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode },
1115 { CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_action_nop },
1116 { CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1117
1118 { CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1119 { CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop },
1120 { CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, chx_firstio },
1121 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1122 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1123 { CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode },
1124 { CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1125 { CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1126
1127 { CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1128 { CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop },
1129 { CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, chx_rxidle },
1130 { CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr },
1131 { CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr },
1132 { CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr },
1133 { CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail },
1134 { CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1135 { CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, chx_firstio },
1136 { CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1137
1138 { CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
1139 { CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop },
1140 { CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, chx_rx },
1141 { CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc },
1142 { CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1143 { CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1144 { CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, chx_rx },
1145
1146 { CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1147 { CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop },
1148 { CTC_STATE_TXINIT, CTC_EVENT_FINSTAT, ctcm_chx_txidle },
1149 { CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr },
1150 { CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr },
1151 { CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr },
1152 { CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1153 { CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1154
1155 { CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
1156 { CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop },
1157 { CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, chx_firstio },
1158 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
1159 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
1160 { CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1161 { CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1162
1163 { CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop },
1164 { CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart },
1165 { CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped },
1166 { CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
1167 { CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
1168 { CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1169
1170 { CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio },
1171 { CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart },
1172 { CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
1173 { CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
1174 { CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
1175 { CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1176
1177 { CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio },
1178 { CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop },
1179 { CTC_STATE_TX, CTC_EVENT_FINSTAT, chx_txdone },
1180 { CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_txretry },
1181 { CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_txretry },
1182 { CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry },
1183 { CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1184 { CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1185
1186 { CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
1187 { CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
1188 { CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1189 { CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1190};
1191
1192int ch_fsm_len = ARRAY_SIZE(ch_fsm);
1193
1194/*
1195 * MPC actions for mpc channel statemachine
1196 * handling of MPC protocol requires extra
1197 * statemachine and actions which are prefixed ctcmpc_ .
1198 * The ctc_ch_states and ctc_ch_state_names,
1199 * ctc_ch_events and ctc_ch_event_names share the ctcm definitions
1200 * which are expanded by some elements.
1201 */
1202
1203/*
1204 * Actions for mpc channel statemachine.
1205 */
1206
1207/**
1208 * Normal data has been send. Free the corresponding
1209 * skb (it's in io_queue), reset dev->tbusy and
1210 * revert to idle state.
1211 *
1212 * fi An instance of a channel statemachine.
1213 * event The event, just happened.
1214 * arg Generic pointer, casted from channel * upon call.
1215 */
1216static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
1217{
1218 struct channel *ch = arg;
1219 struct net_device *dev = ch->netdev;
1220 struct ctcm_priv *priv = dev->ml_priv;
1221 struct mpc_group *grp = priv->mpcg;
1222 struct sk_buff *skb;
1223 int first = 1;
1224 int i;
1225 __u32 data_space;
1226 unsigned long duration;
1227 struct sk_buff *peekskb;
1228 int rc;
1229 struct th_header *header;
1230 struct pdu *p_header;
1231 unsigned long done_stamp = jiffies;
1232
1233 CTCM_PR_DEBUG("Enter %s: %s cp:%i\n",
1234 __func__, dev->name, smp_processor_id());
1235
1236 duration = done_stamp - ch->prof.send_stamp;
1237 if (duration > ch->prof.tx_time)
1238 ch->prof.tx_time = duration;
1239
1240 if (ch->irb->scsw.cmd.count != 0)
1241 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
1242 "%s(%s): TX not complete, remaining %d bytes",
1243 CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count);
1244 fsm_deltimer(&ch->timer);
1245 while ((skb = skb_dequeue(&ch->io_queue))) {
1246 priv->stats.tx_packets++;
1247 priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH;
1248 if (first) {
1249 priv->stats.tx_bytes += 2;
1250 first = 0;
1251 }
1252 refcount_dec(&skb->users);
1253 dev_kfree_skb_irq(skb);
1254 }
1255 spin_lock(&ch->collect_lock);
1256 clear_normalized_cda(&ch->ccw[4]);
1257 if ((ch->collect_len <= 0) || (grp->in_sweep != 0)) {
1258 spin_unlock(&ch->collect_lock);
1259 fsm_newstate(fi, CTC_STATE_TXIDLE);
1260 goto done;
1261 }
1262
1263 if (ctcm_checkalloc_buffer(ch)) {
1264 spin_unlock(&ch->collect_lock);
1265 goto done;
1266 }
1267 ch->trans_skb->data = ch->trans_skb_data;
1268 skb_reset_tail_pointer(ch->trans_skb);
1269 ch->trans_skb->len = 0;
1270 if (ch->prof.maxmulti < (ch->collect_len + TH_HEADER_LENGTH))
1271 ch->prof.maxmulti = ch->collect_len + TH_HEADER_LENGTH;
1272 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
1273 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
1274 i = 0;
1275 p_header = NULL;
1276 data_space = grp->group_max_buflen - TH_HEADER_LENGTH;
1277
1278 CTCM_PR_DBGDATA("%s: building trans_skb from collect_q"
1279 " data_space:%04x\n",
1280 __func__, data_space);
1281
1282 while ((skb = skb_dequeue(&ch->collect_queue))) {
1283 skb_put_data(ch->trans_skb, skb->data, skb->len);
1284 p_header = (struct pdu *)
1285 (skb_tail_pointer(ch->trans_skb) - skb->len);
1286 p_header->pdu_flag = 0x00;
1287 if (be16_to_cpu(skb->protocol) == ETH_P_SNAP)
1288 p_header->pdu_flag |= 0x60;
1289 else
1290 p_header->pdu_flag |= 0x20;
1291
1292 CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n",
1293 __func__, ch->trans_skb->len);
1294 CTCM_PR_DBGDATA("%s: pdu header and data for up"
1295 " to 32 bytes sent to vtam\n", __func__);
1296 CTCM_D3_DUMP((char *)p_header, min_t(int, skb->len, 32));
1297
1298 ch->collect_len -= skb->len;
1299 data_space -= skb->len;
1300 priv->stats.tx_packets++;
1301 priv->stats.tx_bytes += skb->len;
1302 refcount_dec(&skb->users);
1303 dev_kfree_skb_any(skb);
1304 peekskb = skb_peek(&ch->collect_queue);
1305 if (peekskb->len > data_space)
1306 break;
1307 i++;
1308 }
1309 /* p_header points to the last one we handled */
1310 if (p_header)
1311 p_header->pdu_flag |= PDU_LAST; /*Say it's the last one*/
1312 header = kzalloc(TH_HEADER_LENGTH, gfp_type());
1313 if (!header) {
1314 spin_unlock(&ch->collect_lock);
1315 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
1316 goto done;
1317 }
1318 header->th_ch_flag = TH_HAS_PDU; /* Normal data */
1319 ch->th_seq_num++;
1320 header->th_seq_num = ch->th_seq_num;
1321
1322 CTCM_PR_DBGDATA("%s: ToVTAM_th_seq= %08x\n" ,
1323 __func__, ch->th_seq_num);
1324
1325 memcpy(skb_push(ch->trans_skb, TH_HEADER_LENGTH), header,
1326 TH_HEADER_LENGTH); /* put the TH on the packet */
1327
1328 kfree(header);
1329
1330 CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n",
1331 __func__, ch->trans_skb->len);
1332 CTCM_PR_DBGDATA("%s: up-to-50 bytes of trans_skb "
1333 "data to vtam from collect_q\n", __func__);
1334 CTCM_D3_DUMP((char *)ch->trans_skb->data,
1335 min_t(int, ch->trans_skb->len, 50));
1336
1337 spin_unlock(&ch->collect_lock);
1338 clear_normalized_cda(&ch->ccw[1]);
1339
1340 CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n",
1341 (void *)(unsigned long)ch->ccw[1].cda,
1342 ch->trans_skb->data);
1343 ch->ccw[1].count = ch->max_bufsize;
1344
1345 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
1346 dev_kfree_skb_any(ch->trans_skb);
1347 ch->trans_skb = NULL;
1348 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR,
1349 "%s: %s: IDAL alloc failed",
1350 CTCM_FUNTAIL, ch->id);
1351 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
1352 return;
1353 }
1354
1355 CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n",
1356 (void *)(unsigned long)ch->ccw[1].cda,
1357 ch->trans_skb->data);
1358
1359 ch->ccw[1].count = ch->trans_skb->len;
1360 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
1361 ch->prof.send_stamp = jiffies;
1362 if (do_debug_ccw)
1363 ctcmpc_dumpit((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
1364 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1365 (unsigned long)ch, 0xff, 0);
1366 ch->prof.doios_multi++;
1367 if (rc != 0) {
1368 priv->stats.tx_dropped += i;
1369 priv->stats.tx_errors += i;
1370 fsm_deltimer(&ch->timer);
1371 ctcm_ccw_check_rc(ch, rc, "chained TX");
1372 }
1373done:
1374 ctcm_clear_busy(dev);
1375 return;
1376}
1377
1378/**
1379 * Got normal data, check for sanity, queue it up, allocate new buffer
1380 * trigger bottom half, and initiate next read.
1381 *
1382 * fi An instance of a channel statemachine.
1383 * event The event, just happened.
1384 * arg Generic pointer, casted from channel * upon call.
1385 */
1386static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg)
1387{
1388 struct channel *ch = arg;
1389 struct net_device *dev = ch->netdev;
1390 struct ctcm_priv *priv = dev->ml_priv;
1391 struct mpc_group *grp = priv->mpcg;
1392 struct sk_buff *skb = ch->trans_skb;
1393 struct sk_buff *new_skb;
1394 unsigned long saveflags = 0; /* avoids compiler warning */
1395 int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
1396
1397 CTCM_PR_DEBUG("%s: %s: cp:%i %s maxbuf : %04x, len: %04x\n",
1398 CTCM_FUNTAIL, dev->name, smp_processor_id(),
1399 ch->id, ch->max_bufsize, len);
1400 fsm_deltimer(&ch->timer);
1401
1402 if (skb == NULL) {
1403 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
1404 "%s(%s): TRANS_SKB = NULL",
1405 CTCM_FUNTAIL, dev->name);
1406 goto again;
1407 }
1408
1409 if (len < TH_HEADER_LENGTH) {
1410 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
1411 "%s(%s): packet length %d to short",
1412 CTCM_FUNTAIL, dev->name, len);
1413 priv->stats.rx_dropped++;
1414 priv->stats.rx_length_errors++;
1415 } else {
1416 /* must have valid th header or game over */
1417 __u32 block_len = len;
1418 len = TH_HEADER_LENGTH + XID2_LENGTH + 4;
1419 new_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC);
1420
1421 if (new_skb == NULL) {
1422 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
1423 "%s(%d): skb allocation failed",
1424 CTCM_FUNTAIL, dev->name);
1425 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
1426 goto again;
1427 }
1428 switch (fsm_getstate(grp->fsm)) {
1429 case MPCG_STATE_RESET:
1430 case MPCG_STATE_INOP:
1431 dev_kfree_skb_any(new_skb);
1432 break;
1433 case MPCG_STATE_FLOWC:
1434 case MPCG_STATE_READY:
1435 skb_put_data(new_skb, skb->data, block_len);
1436 skb_queue_tail(&ch->io_queue, new_skb);
1437 tasklet_schedule(&ch->ch_tasklet);
1438 break;
1439 default:
1440 skb_put_data(new_skb, skb->data, len);
1441 skb_queue_tail(&ch->io_queue, new_skb);
1442 tasklet_hi_schedule(&ch->ch_tasklet);
1443 break;
1444 }
1445 }
1446
1447again:
1448 switch (fsm_getstate(grp->fsm)) {
1449 int rc, dolock;
1450 case MPCG_STATE_FLOWC:
1451 case MPCG_STATE_READY:
1452 if (ctcm_checkalloc_buffer(ch))
1453 break;
1454 ch->trans_skb->data = ch->trans_skb_data;
1455 skb_reset_tail_pointer(ch->trans_skb);
1456 ch->trans_skb->len = 0;
1457 ch->ccw[1].count = ch->max_bufsize;
1458 if (do_debug_ccw)
1459 ctcmpc_dumpit((char *)&ch->ccw[0],
1460 sizeof(struct ccw1) * 3);
1461 dolock = !in_irq();
1462 if (dolock)
1463 spin_lock_irqsave(
1464 get_ccwdev_lock(ch->cdev), saveflags);
1465 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1466 (unsigned long)ch, 0xff, 0);
1467 if (dolock) /* see remark about conditional locking */
1468 spin_unlock_irqrestore(
1469 get_ccwdev_lock(ch->cdev), saveflags);
1470 if (rc != 0)
1471 ctcm_ccw_check_rc(ch, rc, "normal RX");
1472 default:
1473 break;
1474 }
1475
1476 CTCM_PR_DEBUG("Exit %s: %s, ch=0x%p, id=%s\n",
1477 __func__, dev->name, ch, ch->id);
1478
1479}
1480
1481/**
1482 * Initialize connection by sending a __u16 of value 0.
1483 *
1484 * fi An instance of a channel statemachine.
1485 * event The event, just happened.
1486 * arg Generic pointer, casted from channel * upon call.
1487 */
1488static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
1489{
1490 struct channel *ch = arg;
1491 struct net_device *dev = ch->netdev;
1492 struct ctcm_priv *priv = dev->ml_priv;
1493 struct mpc_group *gptr = priv->mpcg;
1494
1495 CTCM_PR_DEBUG("Enter %s: id=%s, ch=0x%p\n",
1496 __func__, ch->id, ch);
1497
1498 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_INFO,
1499 "%s: %s: chstate:%i, grpstate:%i, prot:%i\n",
1500 CTCM_FUNTAIL, ch->id, fsm_getstate(fi),
1501 fsm_getstate(gptr->fsm), ch->protocol);
1502
1503 if (fsm_getstate(fi) == CTC_STATE_TXIDLE)
1504 MPC_DBF_DEV_NAME(TRACE, dev, "remote side issued READ? ");
1505
1506 fsm_deltimer(&ch->timer);
1507 if (ctcm_checkalloc_buffer(ch))
1508 goto done;
1509
1510 switch (fsm_getstate(fi)) {
1511 case CTC_STATE_STARTRETRY:
1512 case CTC_STATE_SETUPWAIT:
1513 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
1514 ctcmpc_chx_rxidle(fi, event, arg);
1515 } else {
1516 fsm_newstate(fi, CTC_STATE_TXIDLE);
1517 fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
1518 }
1519 goto done;
1520 default:
1521 break;
1522 }
1523
1524 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
1525 ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
1526
1527done:
1528 CTCM_PR_DEBUG("Exit %s: id=%s, ch=0x%p\n",
1529 __func__, ch->id, ch);
1530 return;
1531}
1532
1533/**
1534 * Got initial data, check it. If OK,
1535 * notify device statemachine that we are up and
1536 * running.
1537 *
1538 * fi An instance of a channel statemachine.
1539 * event The event, just happened.
1540 * arg Generic pointer, casted from channel * upon call.
1541 */
1542void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg)
1543{
1544 struct channel *ch = arg;
1545 struct net_device *dev = ch->netdev;
1546 struct ctcm_priv *priv = dev->ml_priv;
1547 struct mpc_group *grp = priv->mpcg;
1548 int rc;
1549 unsigned long saveflags = 0; /* avoids compiler warning */
1550
1551 fsm_deltimer(&ch->timer);
1552 CTCM_PR_DEBUG("%s: %s: %s: cp:%i, chstate:%i grpstate:%i\n",
1553 __func__, ch->id, dev->name, smp_processor_id(),
1554 fsm_getstate(fi), fsm_getstate(grp->fsm));
1555
1556 fsm_newstate(fi, CTC_STATE_RXIDLE);
1557 /* XID processing complete */
1558
1559 switch (fsm_getstate(grp->fsm)) {
1560 case MPCG_STATE_FLOWC:
1561 case MPCG_STATE_READY:
1562 if (ctcm_checkalloc_buffer(ch))
1563 goto done;
1564 ch->trans_skb->data = ch->trans_skb_data;
1565 skb_reset_tail_pointer(ch->trans_skb);
1566 ch->trans_skb->len = 0;
1567 ch->ccw[1].count = ch->max_bufsize;
1568 CTCM_CCW_DUMP((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
1569 if (event == CTC_EVENT_START)
1570 /* see remark about conditional locking */
1571 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1572 rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1573 (unsigned long)ch, 0xff, 0);
1574 if (event == CTC_EVENT_START)
1575 spin_unlock_irqrestore(
1576 get_ccwdev_lock(ch->cdev), saveflags);
1577 if (rc != 0) {
1578 fsm_newstate(fi, CTC_STATE_RXINIT);
1579 ctcm_ccw_check_rc(ch, rc, "initial RX");
1580 goto done;
1581 }
1582 break;
1583 default:
1584 break;
1585 }
1586
1587 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
1588done:
1589 return;
1590}
1591
1592/*
1593 * ctcmpc channel FSM action
1594 * called from several points in ctcmpc_ch_fsm
1595 * ctcmpc only
1596 */
1597static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg)
1598{
1599 struct channel *ch = arg;
1600 struct net_device *dev = ch->netdev;
1601 struct ctcm_priv *priv = dev->ml_priv;
1602 struct mpc_group *grp = priv->mpcg;
1603
1604 CTCM_PR_DEBUG("%s(%s): %s(ch=0x%p), cp=%i, ChStat:%s, GrpStat:%s\n",
1605 __func__, dev->name, ch->id, ch, smp_processor_id(),
1606 fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm));
1607
1608 switch (fsm_getstate(grp->fsm)) {
1609 case MPCG_STATE_XID2INITW:
1610 /* ok..start yside xid exchanges */
1611 if (!ch->in_mpcgroup)
1612 break;
1613 if (fsm_getstate(ch->fsm) == CH_XID0_PENDING) {
1614 fsm_deltimer(&grp->timer);
1615 fsm_addtimer(&grp->timer,
1616 MPC_XID_TIMEOUT_VALUE,
1617 MPCG_EVENT_TIMER, dev);
1618 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
1619
1620 } else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1)
1621 /* attn rcvd before xid0 processed via bh */
1622 fsm_newstate(ch->fsm, CH_XID7_PENDING1);
1623 break;
1624 case MPCG_STATE_XID2INITX:
1625 case MPCG_STATE_XID0IOWAIT:
1626 case MPCG_STATE_XID0IOWAIX:
1627 /* attn rcvd before xid0 processed on ch
1628 but mid-xid0 processing for group */
1629 if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1)
1630 fsm_newstate(ch->fsm, CH_XID7_PENDING1);
1631 break;
1632 case MPCG_STATE_XID7INITW:
1633 case MPCG_STATE_XID7INITX:
1634 case MPCG_STATE_XID7INITI:
1635 case MPCG_STATE_XID7INITZ:
1636 switch (fsm_getstate(ch->fsm)) {
1637 case CH_XID7_PENDING:
1638 fsm_newstate(ch->fsm, CH_XID7_PENDING1);
1639 break;
1640 case CH_XID7_PENDING2:
1641 fsm_newstate(ch->fsm, CH_XID7_PENDING3);
1642 break;
1643 }
1644 fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev);
1645 break;
1646 }
1647
1648 return;
1649}
1650
1651/*
1652 * ctcmpc channel FSM action
1653 * called from one point in ctcmpc_ch_fsm
1654 * ctcmpc only
1655 */
1656static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg)
1657{
1658 struct channel *ch = arg;
1659 struct net_device *dev = ch->netdev;
1660 struct ctcm_priv *priv = dev->ml_priv;
1661 struct mpc_group *grp = priv->mpcg;
1662
1663 CTCM_PR_DEBUG("%s(%s): %s\n ChState:%s GrpState:%s\n",
1664 __func__, dev->name, ch->id,
1665 fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm));
1666
1667 fsm_deltimer(&ch->timer);
1668
1669 switch (fsm_getstate(grp->fsm)) {
1670 case MPCG_STATE_XID0IOWAIT:
1671 /* vtam wants to be primary.start yside xid exchanges*/
1672 /* only receive one attn-busy at a time so must not */
1673 /* change state each time */
1674 grp->changed_side = 1;
1675 fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);
1676 break;
1677 case MPCG_STATE_XID2INITW:
1678 if (grp->changed_side == 1) {
1679 grp->changed_side = 2;
1680 break;
1681 }
1682 /* process began via call to establish_conn */
1683 /* so must report failure instead of reverting */
1684 /* back to ready-for-xid passive state */
1685 if (grp->estconnfunc)
1686 goto done;
1687 /* this attnbusy is NOT the result of xside xid */
1688 /* collisions so yside must have been triggered */
1689 /* by an ATTN that was not intended to start XID */
1690 /* processing. Revert back to ready-for-xid and */
1691 /* wait for ATTN interrupt to signal xid start */
1692 if (fsm_getstate(ch->fsm) == CH_XID0_INPROGRESS) {
1693 fsm_newstate(ch->fsm, CH_XID0_PENDING) ;
1694 fsm_deltimer(&grp->timer);
1695 goto done;
1696 }
1697 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1698 goto done;
1699 case MPCG_STATE_XID2INITX:
1700 /* XID2 was received before ATTN Busy for second
1701 channel.Send yside xid for second channel.
1702 */
1703 if (grp->changed_side == 1) {
1704 grp->changed_side = 2;
1705 break;
1706 }
1707 case MPCG_STATE_XID0IOWAIX:
1708 case MPCG_STATE_XID7INITW:
1709 case MPCG_STATE_XID7INITX:
1710 case MPCG_STATE_XID7INITI:
1711 case MPCG_STATE_XID7INITZ:
1712 default:
1713 /* multiple attn-busy indicates too out-of-sync */
1714 /* and they are certainly not being received as part */
1715 /* of valid mpc group negotiations.. */
1716 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1717 goto done;
1718 }
1719
1720 if (grp->changed_side == 1) {
1721 fsm_deltimer(&grp->timer);
1722 fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE,
1723 MPCG_EVENT_TIMER, dev);
1724 }
1725 if (ch->in_mpcgroup)
1726 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
1727 else
1728 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
1729 "%s(%s): channel %s not added to group",
1730 CTCM_FUNTAIL, dev->name, ch->id);
1731
1732done:
1733 return;
1734}
1735
1736/*
1737 * ctcmpc channel FSM action
1738 * called from several points in ctcmpc_ch_fsm
1739 * ctcmpc only
1740 */
1741static void ctcmpc_chx_resend(fsm_instance *fsm, int event, void *arg)
1742{
1743 struct channel *ch = arg;
1744 struct net_device *dev = ch->netdev;
1745 struct ctcm_priv *priv = dev->ml_priv;
1746 struct mpc_group *grp = priv->mpcg;
1747
1748 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
1749 return;
1750}
1751
1752/*
1753 * ctcmpc channel FSM action
1754 * called from several points in ctcmpc_ch_fsm
1755 * ctcmpc only
1756 */
1757static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
1758{
1759 struct channel *ach = arg;
1760 struct net_device *dev = ach->netdev;
1761 struct ctcm_priv *priv = dev->ml_priv;
1762 struct mpc_group *grp = priv->mpcg;
1763 struct channel *wch = priv->channel[CTCM_WRITE];
1764 struct channel *rch = priv->channel[CTCM_READ];
1765 struct sk_buff *skb;
1766 struct th_sweep *header;
1767 int rc = 0;
1768 unsigned long saveflags = 0;
1769
1770 CTCM_PR_DEBUG("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n",
1771 __func__, smp_processor_id(), ach, ach->id);
1772
1773 if (grp->in_sweep == 0)
1774 goto done;
1775
1776 CTCM_PR_DBGDATA("%s: 1: ToVTAM_th_seq= %08x\n" ,
1777 __func__, wch->th_seq_num);
1778 CTCM_PR_DBGDATA("%s: 1: FromVTAM_th_seq= %08x\n" ,
1779 __func__, rch->th_seq_num);
1780
1781 if (fsm_getstate(wch->fsm) != CTC_STATE_TXIDLE) {
1782 /* give the previous IO time to complete */
1783 fsm_addtimer(&wch->sweep_timer,
1784 200, CTC_EVENT_RSWEEP_TIMER, wch);
1785 goto done;
1786 }
1787
1788 skb = skb_dequeue(&wch->sweep_queue);
1789 if (!skb)
1790 goto done;
1791
1792 if (set_normalized_cda(&wch->ccw[4], skb->data)) {
1793 grp->in_sweep = 0;
1794 ctcm_clear_busy_do(dev);
1795 dev_kfree_skb_any(skb);
1796 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1797 goto done;
1798 } else {
1799 refcount_inc(&skb->users);
1800 skb_queue_tail(&wch->io_queue, skb);
1801 }
1802
1803 /* send out the sweep */
1804 wch->ccw[4].count = skb->len;
1805
1806 header = (struct th_sweep *)skb->data;
1807 switch (header->th.th_ch_flag) {
1808 case TH_SWEEP_REQ:
1809 grp->sweep_req_pend_num--;
1810 break;
1811 case TH_SWEEP_RESP:
1812 grp->sweep_rsp_pend_num--;
1813 break;
1814 }
1815
1816 header->sw.th_last_seq = wch->th_seq_num;
1817
1818 CTCM_CCW_DUMP((char *)&wch->ccw[3], sizeof(struct ccw1) * 3);
1819 CTCM_PR_DBGDATA("%s: sweep packet\n", __func__);
1820 CTCM_D3_DUMP((char *)header, TH_SWEEP_LENGTH);
1821
1822 fsm_addtimer(&wch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, wch);
1823 fsm_newstate(wch->fsm, CTC_STATE_TX);
1824
1825 spin_lock_irqsave(get_ccwdev_lock(wch->cdev), saveflags);
1826 wch->prof.send_stamp = jiffies;
1827 rc = ccw_device_start(wch->cdev, &wch->ccw[3],
1828 (unsigned long) wch, 0xff, 0);
1829 spin_unlock_irqrestore(get_ccwdev_lock(wch->cdev), saveflags);
1830
1831 if ((grp->sweep_req_pend_num == 0) &&
1832 (grp->sweep_rsp_pend_num == 0)) {
1833 grp->in_sweep = 0;
1834 rch->th_seq_num = 0x00;
1835 wch->th_seq_num = 0x00;
1836 ctcm_clear_busy_do(dev);
1837 }
1838
1839 CTCM_PR_DBGDATA("%s: To-/From-VTAM_th_seq = %08x/%08x\n" ,
1840 __func__, wch->th_seq_num, rch->th_seq_num);
1841
1842 if (rc != 0)
1843 ctcm_ccw_check_rc(wch, rc, "send sweep");
1844
1845done:
1846 return;
1847}
1848
1849
1850/*
1851 * The ctcmpc statemachine for a channel.
1852 */
1853
1854const fsm_node ctcmpc_ch_fsm[] = {
1855 { CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop },
1856 { CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start },
1857 { CTC_STATE_STOPPED, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1858 { CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop },
1859 { CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop },
1860
1861 { CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop },
1862 { CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop },
1863 { CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop },
1864 { CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop },
1865 { CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start },
1866 { CTC_STATE_NOTOP, CTC_EVENT_UC_RCRESET, ctcm_chx_stop },
1867 { CTC_STATE_NOTOP, CTC_EVENT_UC_RSRESET, ctcm_chx_stop },
1868 { CTC_STATE_NOTOP, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1869
1870 { CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1871 { CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop },
1872 { CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
1873 { CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr },
1874 { CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1875 { CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1876
1877 { CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio },
1878 { CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode },
1879 { CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
1880 { CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1881 { CTC_STATE_STARTRETRY, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1882
1883 { CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1884 { CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop },
1885 { CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio },
1886 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1887 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1888 { CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode },
1889 { CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1890 { CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1891
1892 { CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
1893 { CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop },
1894 { CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, ctcmpc_chx_rxidle },
1895 { CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr },
1896 { CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr },
1897 { CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr },
1898 { CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail },
1899 { CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1900 { CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, ctcmpc_chx_firstio },
1901 { CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1902
1903 { CH_XID0_PENDING, CTC_EVENT_FINSTAT, ctcm_action_nop },
1904 { CH_XID0_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn },
1905 { CH_XID0_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio },
1906 { CH_XID0_PENDING, CTC_EVENT_START, ctcm_action_nop },
1907 { CH_XID0_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1908 { CH_XID0_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1909 { CH_XID0_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1910 { CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1911 { CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1912 { CH_XID0_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
1913
1914 { CH_XID0_INPROGRESS, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
1915 { CH_XID0_INPROGRESS, CTC_EVENT_ATTN, ctcmpc_chx_attn },
1916 { CH_XID0_INPROGRESS, CTC_EVENT_STOP, ctcm_chx_haltio },
1917 { CH_XID0_INPROGRESS, CTC_EVENT_START, ctcm_action_nop },
1918 { CH_XID0_INPROGRESS, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1919 { CH_XID0_INPROGRESS, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1920 { CH_XID0_INPROGRESS, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
1921 { CH_XID0_INPROGRESS, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1922 { CH_XID0_INPROGRESS, CTC_EVENT_ATTNBUSY, ctcmpc_chx_attnbusy },
1923 { CH_XID0_INPROGRESS, CTC_EVENT_TIMER, ctcmpc_chx_resend },
1924 { CH_XID0_INPROGRESS, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
1925
1926 { CH_XID7_PENDING, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
1927 { CH_XID7_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn },
1928 { CH_XID7_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio },
1929 { CH_XID7_PENDING, CTC_EVENT_START, ctcm_action_nop },
1930 { CH_XID7_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1931 { CH_XID7_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1932 { CH_XID7_PENDING, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
1933 { CH_XID7_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1934 { CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1935 { CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1936 { CH_XID7_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
1937 { CH_XID7_PENDING, CTC_EVENT_TIMER, ctcmpc_chx_resend },
1938 { CH_XID7_PENDING, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
1939
1940 { CH_XID7_PENDING1, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
1941 { CH_XID7_PENDING1, CTC_EVENT_ATTN, ctcmpc_chx_attn },
1942 { CH_XID7_PENDING1, CTC_EVENT_STOP, ctcm_chx_haltio },
1943 { CH_XID7_PENDING1, CTC_EVENT_START, ctcm_action_nop },
1944 { CH_XID7_PENDING1, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1945 { CH_XID7_PENDING1, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1946 { CH_XID7_PENDING1, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
1947 { CH_XID7_PENDING1, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1948 { CH_XID7_PENDING1, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1949 { CH_XID7_PENDING1, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
1950 { CH_XID7_PENDING1, CTC_EVENT_TIMER, ctcmpc_chx_resend },
1951 { CH_XID7_PENDING1, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
1952
1953 { CH_XID7_PENDING2, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
1954 { CH_XID7_PENDING2, CTC_EVENT_ATTN, ctcmpc_chx_attn },
1955 { CH_XID7_PENDING2, CTC_EVENT_STOP, ctcm_chx_haltio },
1956 { CH_XID7_PENDING2, CTC_EVENT_START, ctcm_action_nop },
1957 { CH_XID7_PENDING2, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1958 { CH_XID7_PENDING2, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1959 { CH_XID7_PENDING2, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
1960 { CH_XID7_PENDING2, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1961 { CH_XID7_PENDING2, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1962 { CH_XID7_PENDING2, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
1963 { CH_XID7_PENDING2, CTC_EVENT_TIMER, ctcmpc_chx_resend },
1964 { CH_XID7_PENDING2, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
1965
1966 { CH_XID7_PENDING3, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
1967 { CH_XID7_PENDING3, CTC_EVENT_ATTN, ctcmpc_chx_attn },
1968 { CH_XID7_PENDING3, CTC_EVENT_STOP, ctcm_chx_haltio },
1969 { CH_XID7_PENDING3, CTC_EVENT_START, ctcm_action_nop },
1970 { CH_XID7_PENDING3, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1971 { CH_XID7_PENDING3, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1972 { CH_XID7_PENDING3, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
1973 { CH_XID7_PENDING3, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1974 { CH_XID7_PENDING3, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1975 { CH_XID7_PENDING3, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
1976 { CH_XID7_PENDING3, CTC_EVENT_TIMER, ctcmpc_chx_resend },
1977 { CH_XID7_PENDING3, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
1978
1979 { CH_XID7_PENDING4, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
1980 { CH_XID7_PENDING4, CTC_EVENT_ATTN, ctcmpc_chx_attn },
1981 { CH_XID7_PENDING4, CTC_EVENT_STOP, ctcm_chx_haltio },
1982 { CH_XID7_PENDING4, CTC_EVENT_START, ctcm_action_nop },
1983 { CH_XID7_PENDING4, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1984 { CH_XID7_PENDING4, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1985 { CH_XID7_PENDING4, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
1986 { CH_XID7_PENDING4, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr },
1987 { CH_XID7_PENDING4, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr },
1988 { CH_XID7_PENDING4, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal },
1989 { CH_XID7_PENDING4, CTC_EVENT_TIMER, ctcmpc_chx_resend },
1990 { CH_XID7_PENDING4, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
1991
1992 { CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
1993 { CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop },
1994 { CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_rx },
1995 { CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc },
1996 { CTC_STATE_RXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail },
1997 { CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
1998 { CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
1999 { CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx },
2000
2001 { CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio },
2002 { CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop },
2003 { CTC_STATE_TXINIT, CTC_EVENT_FINSTAT, ctcm_chx_txidle },
2004 { CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr },
2005 { CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr },
2006 { CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr },
2007 { CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2008 { CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2009 { CTC_STATE_TXINIT, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep },
2010
2011 { CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio },
2012 { CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop },
2013 { CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio },
2014 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_fail },
2015 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail },
2016 { CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2017 { CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2018 { CTC_STATE_TXIDLE, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep },
2019
2020 { CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop },
2021 { CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart },
2022 { CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped },
2023 { CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
2024 { CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
2025 { CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2026 { CTC_STATE_TERM, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
2027 { CTC_STATE_TERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2028
2029 { CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio },
2030 { CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart },
2031 { CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode },
2032 { CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop },
2033 { CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop },
2034 { CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2035 { CTC_STATE_DTERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2036
2037 { CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio },
2038 { CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop },
2039 { CTC_STATE_TX, CTC_EVENT_FINSTAT, ctcmpc_chx_txdone },
2040 { CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_fail },
2041 { CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_fail },
2042 { CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry },
2043 { CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2044 { CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2045 { CTC_STATE_TX, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep },
2046 { CTC_STATE_TX, CTC_EVENT_IO_EBUSY, ctcm_chx_fail },
2047
2048 { CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
2049 { CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio },
2050 { CTC_STATE_TXERR, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal },
2051 { CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2052 { CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail },
2053};
2054
2055int mpc_ch_fsm_len = ARRAY_SIZE(ctcmpc_ch_fsm);
2056
2057/*
2058 * Actions for interface - statemachine.
2059 */
2060
2061/**
2062 * Startup channels by sending CTC_EVENT_START to each channel.
2063 *
2064 * fi An instance of an interface statemachine.
2065 * event The event, just happened.
2066 * arg Generic pointer, casted from struct net_device * upon call.
2067 */
2068static void dev_action_start(fsm_instance *fi, int event, void *arg)
2069{
2070 struct net_device *dev = arg;
2071 struct ctcm_priv *priv = dev->ml_priv;
2072 int direction;
2073
2074 CTCMY_DBF_DEV_NAME(SETUP, dev, "");
2075
2076 fsm_deltimer(&priv->restart_timer);
2077 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2078 if (IS_MPC(priv))
2079 priv->mpcg->channels_terminating = 0;
2080 for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
2081 struct channel *ch = priv->channel[direction];
2082 fsm_event(ch->fsm, CTC_EVENT_START, ch);
2083 }
2084}
2085
2086/**
2087 * Shutdown channels by sending CTC_EVENT_STOP to each channel.
2088 *
2089 * fi An instance of an interface statemachine.
2090 * event The event, just happened.
2091 * arg Generic pointer, casted from struct net_device * upon call.
2092 */
2093static void dev_action_stop(fsm_instance *fi, int event, void *arg)
2094{
2095 int direction;
2096 struct net_device *dev = arg;
2097 struct ctcm_priv *priv = dev->ml_priv;
2098
2099 CTCMY_DBF_DEV_NAME(SETUP, dev, "");
2100
2101 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2102 for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
2103 struct channel *ch = priv->channel[direction];
2104 fsm_event(ch->fsm, CTC_EVENT_STOP, ch);
2105 ch->th_seq_num = 0x00;
2106 CTCM_PR_DEBUG("%s: CH_th_seq= %08x\n",
2107 __func__, ch->th_seq_num);
2108 }
2109 if (IS_MPC(priv))
2110 fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET);
2111}
2112
2113static void dev_action_restart(fsm_instance *fi, int event, void *arg)
2114{
2115 int restart_timer;
2116 struct net_device *dev = arg;
2117 struct ctcm_priv *priv = dev->ml_priv;
2118
2119 CTCMY_DBF_DEV_NAME(TRACE, dev, "");
2120
2121 if (IS_MPC(priv)) {
2122 restart_timer = CTCM_TIME_1_SEC;
2123 } else {
2124 restart_timer = CTCM_TIME_5_SEC;
2125 }
2126 dev_info(&dev->dev, "Restarting device\n");
2127
2128 dev_action_stop(fi, event, arg);
2129 fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
2130 if (IS_MPC(priv))
2131 fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET);
2132
2133 /* going back into start sequence too quickly can */
2134 /* result in the other side becoming unreachable due */
2135 /* to sense reported when IO is aborted */
2136 fsm_addtimer(&priv->restart_timer, restart_timer,
2137 DEV_EVENT_START, dev);
2138}
2139
2140/**
2141 * Called from channel statemachine
2142 * when a channel is up and running.
2143 *
2144 * fi An instance of an interface statemachine.
2145 * event The event, just happened.
2146 * arg Generic pointer, casted from struct net_device * upon call.
2147 */
2148static void dev_action_chup(fsm_instance *fi, int event, void *arg)
2149{
2150 struct net_device *dev = arg;
2151 struct ctcm_priv *priv = dev->ml_priv;
2152 int dev_stat = fsm_getstate(fi);
2153
2154 CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE,
2155 "%s(%s): priv = %p [%d,%d]\n ", CTCM_FUNTAIL,
2156 dev->name, dev->ml_priv, dev_stat, event);
2157
2158 switch (fsm_getstate(fi)) {
2159 case DEV_STATE_STARTWAIT_RXTX:
2160 if (event == DEV_EVENT_RXUP)
2161 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2162 else
2163 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2164 break;
2165 case DEV_STATE_STARTWAIT_RX:
2166 if (event == DEV_EVENT_RXUP) {
2167 fsm_newstate(fi, DEV_STATE_RUNNING);
2168 dev_info(&dev->dev,
2169 "Connected with remote side\n");
2170 ctcm_clear_busy(dev);
2171 }
2172 break;
2173 case DEV_STATE_STARTWAIT_TX:
2174 if (event == DEV_EVENT_TXUP) {
2175 fsm_newstate(fi, DEV_STATE_RUNNING);
2176 dev_info(&dev->dev,
2177 "Connected with remote side\n");
2178 ctcm_clear_busy(dev);
2179 }
2180 break;
2181 case DEV_STATE_STOPWAIT_TX:
2182 if (event == DEV_EVENT_RXUP)
2183 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2184 break;
2185 case DEV_STATE_STOPWAIT_RX:
2186 if (event == DEV_EVENT_TXUP)
2187 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2188 break;
2189 }
2190
2191 if (IS_MPC(priv)) {
2192 if (event == DEV_EVENT_RXUP)
2193 mpc_channel_action(priv->channel[CTCM_READ],
2194 CTCM_READ, MPC_CHANNEL_ADD);
2195 else
2196 mpc_channel_action(priv->channel[CTCM_WRITE],
2197 CTCM_WRITE, MPC_CHANNEL_ADD);
2198 }
2199}
2200
2201/**
2202 * Called from device statemachine
2203 * when a channel has been shutdown.
2204 *
2205 * fi An instance of an interface statemachine.
2206 * event The event, just happened.
2207 * arg Generic pointer, casted from struct net_device * upon call.
2208 */
2209static void dev_action_chdown(fsm_instance *fi, int event, void *arg)
2210{
2211
2212 struct net_device *dev = arg;
2213 struct ctcm_priv *priv = dev->ml_priv;
2214
2215 CTCMY_DBF_DEV_NAME(SETUP, dev, "");
2216
2217 switch (fsm_getstate(fi)) {
2218 case DEV_STATE_RUNNING:
2219 if (event == DEV_EVENT_TXDOWN)
2220 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2221 else
2222 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2223 break;
2224 case DEV_STATE_STARTWAIT_RX:
2225 if (event == DEV_EVENT_TXDOWN)
2226 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2227 break;
2228 case DEV_STATE_STARTWAIT_TX:
2229 if (event == DEV_EVENT_RXDOWN)
2230 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2231 break;
2232 case DEV_STATE_STOPWAIT_RXTX:
2233 if (event == DEV_EVENT_TXDOWN)
2234 fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
2235 else
2236 fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
2237 break;
2238 case DEV_STATE_STOPWAIT_RX:
2239 if (event == DEV_EVENT_RXDOWN)
2240 fsm_newstate(fi, DEV_STATE_STOPPED);
2241 break;
2242 case DEV_STATE_STOPWAIT_TX:
2243 if (event == DEV_EVENT_TXDOWN)
2244 fsm_newstate(fi, DEV_STATE_STOPPED);
2245 break;
2246 }
2247 if (IS_MPC(priv)) {
2248 if (event == DEV_EVENT_RXDOWN)
2249 mpc_channel_action(priv->channel[CTCM_READ],
2250 CTCM_READ, MPC_CHANNEL_REMOVE);
2251 else
2252 mpc_channel_action(priv->channel[CTCM_WRITE],
2253 CTCM_WRITE, MPC_CHANNEL_REMOVE);
2254 }
2255}
2256
2257const fsm_node dev_fsm[] = {
2258 { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start },
2259 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start },
2260 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2261 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2262 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2263 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start },
2264 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2265 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2266 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown },
2267 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2268 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start },
2269 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2270 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2271 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown },
2272 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2273 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop },
2274 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup },
2275 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup },
2276 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown },
2277 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown },
2278 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2279 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop },
2280 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup },
2281 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup },
2282 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown },
2283 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart },
2284 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop },
2285 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup },
2286 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup },
2287 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown },
2288 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart },
2289 { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop },
2290 { DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown },
2291 { DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown },
2292 { DEV_STATE_RUNNING, DEV_EVENT_TXUP, ctcm_action_nop },
2293 { DEV_STATE_RUNNING, DEV_EVENT_RXUP, ctcm_action_nop },
2294 { DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart },
2295};
2296
2297int dev_fsm_len = ARRAY_SIZE(dev_fsm);
2298
2299/* --- This is the END my friend --- */
2300