1 /* 2 * drivers/s390/net/ctcm_fsms.c 3 * 4 * Copyright IBM Corp. 2001, 2007 5 * Authors: Fritz Elfert (felfert@millenux.com) 6 * Peter Tiedemann (ptiedem@de.ibm.com) 7 * MPC additions : 8 * Belinda Thompson (belindat@us.ibm.com) 9 * Andy Richter (richtera@us.ibm.com) 10 */ 11 12 #undef DEBUG 13 #undef DEBUGDATA 14 #undef DEBUGCCW 15 16 #include <linux/module.h> 17 #include <linux/init.h> 18 #include <linux/kernel.h> 19 #include <linux/slab.h> 20 #include <linux/errno.h> 21 #include <linux/types.h> 22 #include <linux/interrupt.h> 23 #include <linux/timer.h> 24 #include <linux/bitops.h> 25 26 #include <linux/signal.h> 27 #include <linux/string.h> 28 29 #include <linux/ip.h> 30 #include <linux/if_arp.h> 31 #include <linux/tcp.h> 32 #include <linux/skbuff.h> 33 #include <linux/ctype.h> 34 #include <net/dst.h> 35 36 #include <linux/io.h> 37 #include <asm/ccwdev.h> 38 #include <asm/ccwgroup.h> 39 #include <linux/uaccess.h> 40 41 #include <asm/idals.h> 42 43 #include "fsm.h" 44 #include "cu3088.h" 45 46 #include "ctcm_dbug.h" 47 #include "ctcm_main.h" 48 #include "ctcm_fsms.h" 49 50 const char *dev_state_names[] = { 51 [DEV_STATE_STOPPED] = "Stopped", 52 [DEV_STATE_STARTWAIT_RXTX] = "StartWait RXTX", 53 [DEV_STATE_STARTWAIT_RX] = "StartWait RX", 54 [DEV_STATE_STARTWAIT_TX] = "StartWait TX", 55 [DEV_STATE_STOPWAIT_RXTX] = "StopWait RXTX", 56 [DEV_STATE_STOPWAIT_RX] = "StopWait RX", 57 [DEV_STATE_STOPWAIT_TX] = "StopWait TX", 58 [DEV_STATE_RUNNING] = "Running", 59 }; 60 61 const char *dev_event_names[] = { 62 [DEV_EVENT_START] = "Start", 63 [DEV_EVENT_STOP] = "Stop", 64 [DEV_EVENT_RXUP] = "RX up", 65 [DEV_EVENT_TXUP] = "TX up", 66 [DEV_EVENT_RXDOWN] = "RX down", 67 [DEV_EVENT_TXDOWN] = "TX down", 68 [DEV_EVENT_RESTART] = "Restart", 69 }; 70 71 const char *ctc_ch_event_names[] = { 72 [CTC_EVENT_IO_SUCCESS] = "ccw_device success", 73 [CTC_EVENT_IO_EBUSY] = "ccw_device busy", 74 [CTC_EVENT_IO_ENODEV] = "ccw_device enodev", 75 [CTC_EVENT_IO_UNKNOWN] = "ccw_device unknown", 76 [CTC_EVENT_ATTNBUSY] = "Status ATTN & BUSY", 77 [CTC_EVENT_ATTN] = "Status ATTN", 78 [CTC_EVENT_BUSY] = "Status BUSY", 79 [CTC_EVENT_UC_RCRESET] = "Unit check remote reset", 80 [CTC_EVENT_UC_RSRESET] = "Unit check remote system reset", 81 [CTC_EVENT_UC_TXTIMEOUT] = "Unit check TX timeout", 82 [CTC_EVENT_UC_TXPARITY] = "Unit check TX parity", 83 [CTC_EVENT_UC_HWFAIL] = "Unit check Hardware failure", 84 [CTC_EVENT_UC_RXPARITY] = "Unit check RX parity", 85 [CTC_EVENT_UC_ZERO] = "Unit check ZERO", 86 [CTC_EVENT_UC_UNKNOWN] = "Unit check Unknown", 87 [CTC_EVENT_SC_UNKNOWN] = "SubChannel check Unknown", 88 [CTC_EVENT_MC_FAIL] = "Machine check failure", 89 [CTC_EVENT_MC_GOOD] = "Machine check operational", 90 [CTC_EVENT_IRQ] = "IRQ normal", 91 [CTC_EVENT_FINSTAT] = "IRQ final", 92 [CTC_EVENT_TIMER] = "Timer", 93 [CTC_EVENT_START] = "Start", 94 [CTC_EVENT_STOP] = "Stop", 95 /* 96 * additional MPC events 97 */ 98 [CTC_EVENT_SEND_XID] = "XID Exchange", 99 [CTC_EVENT_RSWEEP_TIMER] = "MPC Group Sweep Timer", 100 }; 101 102 const char *ctc_ch_state_names[] = { 103 [CTC_STATE_IDLE] = "Idle", 104 [CTC_STATE_STOPPED] = "Stopped", 105 [CTC_STATE_STARTWAIT] = "StartWait", 106 [CTC_STATE_STARTRETRY] = "StartRetry", 107 [CTC_STATE_SETUPWAIT] = "SetupWait", 108 [CTC_STATE_RXINIT] = "RX init", 109 [CTC_STATE_TXINIT] = "TX init", 110 [CTC_STATE_RX] = "RX", 111 [CTC_STATE_TX] = "TX", 112 [CTC_STATE_RXIDLE] = "RX idle", 113 [CTC_STATE_TXIDLE] = "TX idle", 114 [CTC_STATE_RXERR] = "RX error", 115 [CTC_STATE_TXERR] = "TX error", 116 [CTC_STATE_TERM] = "Terminating", 117 [CTC_STATE_DTERM] = "Restarting", 118 [CTC_STATE_NOTOP] = "Not operational", 119 /* 120 * additional MPC states 121 */ 122 [CH_XID0_PENDING] = "Pending XID0 Start", 123 [CH_XID0_INPROGRESS] = "In XID0 Negotiations ", 124 [CH_XID7_PENDING] = "Pending XID7 P1 Start", 125 [CH_XID7_PENDING1] = "Active XID7 P1 Exchange ", 126 [CH_XID7_PENDING2] = "Pending XID7 P2 Start ", 127 [CH_XID7_PENDING3] = "Active XID7 P2 Exchange ", 128 [CH_XID7_PENDING4] = "XID7 Complete - Pending READY ", 129 }; 130 131 static void ctcm_action_nop(fsm_instance *fi, int event, void *arg); 132 133 /* 134 * ----- static ctcm actions for channel statemachine ----- 135 * 136 */ 137 static void chx_txdone(fsm_instance *fi, int event, void *arg); 138 static void chx_rx(fsm_instance *fi, int event, void *arg); 139 static void chx_rxidle(fsm_instance *fi, int event, void *arg); 140 static void chx_firstio(fsm_instance *fi, int event, void *arg); 141 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg); 142 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg); 143 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg); 144 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg); 145 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg); 146 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg); 147 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg); 148 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg); 149 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg); 150 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg); 151 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg); 152 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg); 153 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg); 154 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg); 155 156 /* 157 * ----- static ctcmpc actions for ctcmpc channel statemachine ----- 158 * 159 */ 160 static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg); 161 static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg); 162 static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg); 163 /* shared : 164 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg); 165 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg); 166 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg); 167 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg); 168 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg); 169 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg); 170 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg); 171 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg); 172 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg); 173 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg); 174 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg); 175 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg); 176 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg); 177 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg); 178 */ 179 static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg); 180 static void ctcmpc_chx_attnbusy(fsm_instance *, int, void *); 181 static void ctcmpc_chx_resend(fsm_instance *, int, void *); 182 static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg); 183 184 /** 185 * Check return code of a preceeding ccw_device call, halt_IO etc... 186 * 187 * ch : The channel, the error belongs to. 188 * Returns the error code (!= 0) to inspect. 189 */ 190 void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg) 191 { 192 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 193 "ccw error %s (%s): %04x\n", ch->id, msg, rc); 194 switch (rc) { 195 case -EBUSY: 196 ctcm_pr_warn("%s (%s): Busy !\n", ch->id, msg); 197 fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch); 198 break; 199 case -ENODEV: 200 ctcm_pr_emerg("%s (%s): Invalid device called for IO\n", 201 ch->id, msg); 202 fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch); 203 break; 204 default: 205 ctcm_pr_emerg("%s (%s): Unknown error in do_IO %04x\n", 206 ch->id, msg, rc); 207 fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch); 208 } 209 } 210 211 void ctcm_purge_skb_queue(struct sk_buff_head *q) 212 { 213 struct sk_buff *skb; 214 215 CTCM_DBF_TEXT(TRACE, 3, __FUNCTION__); 216 217 while ((skb = skb_dequeue(q))) { 218 atomic_dec(&skb->users); 219 dev_kfree_skb_any(skb); 220 } 221 } 222 223 /** 224 * NOP action for statemachines 225 */ 226 static void ctcm_action_nop(fsm_instance *fi, int event, void *arg) 227 { 228 } 229 230 /* 231 * Actions for channel - statemachines. 232 */ 233 234 /** 235 * Normal data has been send. Free the corresponding 236 * skb (it's in io_queue), reset dev->tbusy and 237 * revert to idle state. 238 * 239 * fi An instance of a channel statemachine. 240 * event The event, just happened. 241 * arg Generic pointer, casted from channel * upon call. 242 */ 243 static void chx_txdone(fsm_instance *fi, int event, void *arg) 244 { 245 struct channel *ch = arg; 246 struct net_device *dev = ch->netdev; 247 struct ctcm_priv *priv = dev->priv; 248 struct sk_buff *skb; 249 int first = 1; 250 int i; 251 unsigned long duration; 252 struct timespec done_stamp = current_kernel_time(); /* xtime */ 253 254 duration = 255 (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 + 256 (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000; 257 if (duration > ch->prof.tx_time) 258 ch->prof.tx_time = duration; 259 260 if (ch->irb->scsw.count != 0) 261 ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n", 262 dev->name, ch->irb->scsw.count); 263 fsm_deltimer(&ch->timer); 264 while ((skb = skb_dequeue(&ch->io_queue))) { 265 priv->stats.tx_packets++; 266 priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; 267 if (first) { 268 priv->stats.tx_bytes += 2; 269 first = 0; 270 } 271 atomic_dec(&skb->users); 272 dev_kfree_skb_irq(skb); 273 } 274 spin_lock(&ch->collect_lock); 275 clear_normalized_cda(&ch->ccw[4]); 276 if (ch->collect_len > 0) { 277 int rc; 278 279 if (ctcm_checkalloc_buffer(ch)) { 280 spin_unlock(&ch->collect_lock); 281 return; 282 } 283 ch->trans_skb->data = ch->trans_skb_data; 284 skb_reset_tail_pointer(ch->trans_skb); 285 ch->trans_skb->len = 0; 286 if (ch->prof.maxmulti < (ch->collect_len + 2)) 287 ch->prof.maxmulti = ch->collect_len + 2; 288 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue)) 289 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue); 290 *((__u16 *)skb_put(ch->trans_skb, 2)) = ch->collect_len + 2; 291 i = 0; 292 while ((skb = skb_dequeue(&ch->collect_queue))) { 293 skb_copy_from_linear_data(skb, 294 skb_put(ch->trans_skb, skb->len), skb->len); 295 priv->stats.tx_packets++; 296 priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; 297 atomic_dec(&skb->users); 298 dev_kfree_skb_irq(skb); 299 i++; 300 } 301 ch->collect_len = 0; 302 spin_unlock(&ch->collect_lock); 303 ch->ccw[1].count = ch->trans_skb->len; 304 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 305 ch->prof.send_stamp = current_kernel_time(); /* xtime */ 306 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 307 (unsigned long)ch, 0xff, 0); 308 ch->prof.doios_multi++; 309 if (rc != 0) { 310 priv->stats.tx_dropped += i; 311 priv->stats.tx_errors += i; 312 fsm_deltimer(&ch->timer); 313 ctcm_ccw_check_rc(ch, rc, "chained TX"); 314 } 315 } else { 316 spin_unlock(&ch->collect_lock); 317 fsm_newstate(fi, CTC_STATE_TXIDLE); 318 } 319 ctcm_clear_busy_do(dev); 320 } 321 322 /** 323 * Initial data is sent. 324 * Notify device statemachine that we are up and 325 * running. 326 * 327 * fi An instance of a channel statemachine. 328 * event The event, just happened. 329 * arg Generic pointer, casted from channel * upon call. 330 */ 331 void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg) 332 { 333 struct channel *ch = arg; 334 struct net_device *dev = ch->netdev; 335 struct ctcm_priv *priv = dev->priv; 336 337 CTCM_DBF_TEXT(TRACE, 6, __FUNCTION__); 338 fsm_deltimer(&ch->timer); 339 fsm_newstate(fi, CTC_STATE_TXIDLE); 340 fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev); 341 } 342 343 /** 344 * Got normal data, check for sanity, queue it up, allocate new buffer 345 * trigger bottom half, and initiate next read. 346 * 347 * fi An instance of a channel statemachine. 348 * event The event, just happened. 349 * arg Generic pointer, casted from channel * upon call. 350 */ 351 static void chx_rx(fsm_instance *fi, int event, void *arg) 352 { 353 struct channel *ch = arg; 354 struct net_device *dev = ch->netdev; 355 struct ctcm_priv *priv = dev->priv; 356 int len = ch->max_bufsize - ch->irb->scsw.count; 357 struct sk_buff *skb = ch->trans_skb; 358 __u16 block_len = *((__u16 *)skb->data); 359 int check_len; 360 int rc; 361 362 fsm_deltimer(&ch->timer); 363 if (len < 8) { 364 ctcm_pr_debug("%s: got packet with length %d < 8\n", 365 dev->name, len); 366 priv->stats.rx_dropped++; 367 priv->stats.rx_length_errors++; 368 goto again; 369 } 370 if (len > ch->max_bufsize) { 371 ctcm_pr_debug("%s: got packet with length %d > %d\n", 372 dev->name, len, ch->max_bufsize); 373 priv->stats.rx_dropped++; 374 priv->stats.rx_length_errors++; 375 goto again; 376 } 377 378 /* 379 * VM TCP seems to have a bug sending 2 trailing bytes of garbage. 380 */ 381 switch (ch->protocol) { 382 case CTCM_PROTO_S390: 383 case CTCM_PROTO_OS390: 384 check_len = block_len + 2; 385 break; 386 default: 387 check_len = block_len; 388 break; 389 } 390 if ((len < block_len) || (len > check_len)) { 391 ctcm_pr_debug("%s: got block length %d != rx length %d\n", 392 dev->name, block_len, len); 393 if (do_debug) 394 ctcmpc_dump_skb(skb, 0); 395 396 *((__u16 *)skb->data) = len; 397 priv->stats.rx_dropped++; 398 priv->stats.rx_length_errors++; 399 goto again; 400 } 401 block_len -= 2; 402 if (block_len > 0) { 403 *((__u16 *)skb->data) = block_len; 404 ctcm_unpack_skb(ch, skb); 405 } 406 again: 407 skb->data = ch->trans_skb_data; 408 skb_reset_tail_pointer(skb); 409 skb->len = 0; 410 if (ctcm_checkalloc_buffer(ch)) 411 return; 412 ch->ccw[1].count = ch->max_bufsize; 413 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 414 (unsigned long)ch, 0xff, 0); 415 if (rc != 0) 416 ctcm_ccw_check_rc(ch, rc, "normal RX"); 417 } 418 419 /** 420 * Initialize connection by sending a __u16 of value 0. 421 * 422 * fi An instance of a channel statemachine. 423 * event The event, just happened. 424 * arg Generic pointer, casted from channel * upon call. 425 */ 426 static void chx_firstio(fsm_instance *fi, int event, void *arg) 427 { 428 struct channel *ch = arg; 429 int rc; 430 431 CTCM_DBF_TEXT(TRACE, 6, __FUNCTION__); 432 433 if (fsm_getstate(fi) == CTC_STATE_TXIDLE) 434 ctcm_pr_debug("%s: remote side issued READ?, init.\n", ch->id); 435 fsm_deltimer(&ch->timer); 436 if (ctcm_checkalloc_buffer(ch)) 437 return; 438 if ((fsm_getstate(fi) == CTC_STATE_SETUPWAIT) && 439 (ch->protocol == CTCM_PROTO_OS390)) { 440 /* OS/390 resp. z/OS */ 441 if (CHANNEL_DIRECTION(ch->flags) == READ) { 442 *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN; 443 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, 444 CTC_EVENT_TIMER, ch); 445 chx_rxidle(fi, event, arg); 446 } else { 447 struct net_device *dev = ch->netdev; 448 struct ctcm_priv *priv = dev->priv; 449 fsm_newstate(fi, CTC_STATE_TXIDLE); 450 fsm_event(priv->fsm, DEV_EVENT_TXUP, dev); 451 } 452 return; 453 } 454 455 /* 456 * Don't setup a timer for receiving the initial RX frame 457 * if in compatibility mode, since VM TCP delays the initial 458 * frame until it has some data to send. 459 */ 460 if ((CHANNEL_DIRECTION(ch->flags) == WRITE) || 461 (ch->protocol != CTCM_PROTO_S390)) 462 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 463 464 *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN; 465 ch->ccw[1].count = 2; /* Transfer only length */ 466 467 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ) 468 ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); 469 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 470 (unsigned long)ch, 0xff, 0); 471 if (rc != 0) { 472 fsm_deltimer(&ch->timer); 473 fsm_newstate(fi, CTC_STATE_SETUPWAIT); 474 ctcm_ccw_check_rc(ch, rc, "init IO"); 475 } 476 /* 477 * If in compatibility mode since we don't setup a timer, we 478 * also signal RX channel up immediately. This enables us 479 * to send packets early which in turn usually triggers some 480 * reply from VM TCP which brings up the RX channel to it's 481 * final state. 482 */ 483 if ((CHANNEL_DIRECTION(ch->flags) == READ) && 484 (ch->protocol == CTCM_PROTO_S390)) { 485 struct net_device *dev = ch->netdev; 486 struct ctcm_priv *priv = dev->priv; 487 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); 488 } 489 } 490 491 /** 492 * Got initial data, check it. If OK, 493 * notify device statemachine that we are up and 494 * running. 495 * 496 * fi An instance of a channel statemachine. 497 * event The event, just happened. 498 * arg Generic pointer, casted from channel * upon call. 499 */ 500 static void chx_rxidle(fsm_instance *fi, int event, void *arg) 501 { 502 struct channel *ch = arg; 503 struct net_device *dev = ch->netdev; 504 struct ctcm_priv *priv = dev->priv; 505 __u16 buflen; 506 int rc; 507 508 CTCM_DBF_TEXT(TRACE, 6, __FUNCTION__); 509 fsm_deltimer(&ch->timer); 510 buflen = *((__u16 *)ch->trans_skb->data); 511 if (do_debug) 512 ctcm_pr_debug("%s: Initial RX count %d\n", dev->name, buflen); 513 514 if (buflen >= CTCM_INITIAL_BLOCKLEN) { 515 if (ctcm_checkalloc_buffer(ch)) 516 return; 517 ch->ccw[1].count = ch->max_bufsize; 518 fsm_newstate(fi, CTC_STATE_RXIDLE); 519 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 520 (unsigned long)ch, 0xff, 0); 521 if (rc != 0) { 522 fsm_newstate(fi, CTC_STATE_RXINIT); 523 ctcm_ccw_check_rc(ch, rc, "initial RX"); 524 } else 525 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); 526 } else { 527 if (do_debug) 528 ctcm_pr_debug("%s: Initial RX count %d not %d\n", 529 dev->name, buflen, CTCM_INITIAL_BLOCKLEN); 530 chx_firstio(fi, event, arg); 531 } 532 } 533 534 /** 535 * Set channel into extended mode. 536 * 537 * fi An instance of a channel statemachine. 538 * event The event, just happened. 539 * arg Generic pointer, casted from channel * upon call. 540 */ 541 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg) 542 { 543 struct channel *ch = arg; 544 int rc; 545 unsigned long saveflags = 0; 546 int timeout = CTCM_TIME_5_SEC; 547 548 fsm_deltimer(&ch->timer); 549 if (IS_MPC(ch)) { 550 timeout = 1500; 551 if (do_debug) 552 ctcm_pr_debug("ctcm enter: %s(): cp=%i ch=0x%p id=%s\n", 553 __FUNCTION__, smp_processor_id(), ch, ch->id); 554 } 555 fsm_addtimer(&ch->timer, timeout, CTC_EVENT_TIMER, ch); 556 fsm_newstate(fi, CTC_STATE_SETUPWAIT); 557 if (do_debug_ccw && IS_MPC(ch)) 558 ctcmpc_dumpit((char *)&ch->ccw[6], sizeof(struct ccw1) * 2); 559 560 if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */ 561 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 562 /* Such conditional locking is undeterministic in 563 * static view. => ignore sparse warnings here. */ 564 565 rc = ccw_device_start(ch->cdev, &ch->ccw[6], 566 (unsigned long)ch, 0xff, 0); 567 if (event == CTC_EVENT_TIMER) /* see above comments */ 568 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); 569 if (rc != 0) { 570 fsm_deltimer(&ch->timer); 571 fsm_newstate(fi, CTC_STATE_STARTWAIT); 572 ctcm_ccw_check_rc(ch, rc, "set Mode"); 573 } else 574 ch->retry = 0; 575 } 576 577 /** 578 * Setup channel. 579 * 580 * fi An instance of a channel statemachine. 581 * event The event, just happened. 582 * arg Generic pointer, casted from channel * upon call. 583 */ 584 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg) 585 { 586 struct channel *ch = arg; 587 int rc; 588 struct net_device *dev; 589 unsigned long saveflags; 590 591 CTCM_DBF_TEXT(TRACE, 5, __FUNCTION__); 592 if (ch == NULL) { 593 ctcm_pr_warn("chx_start ch=NULL\n"); 594 return; 595 } 596 if (ch->netdev == NULL) { 597 ctcm_pr_warn("chx_start dev=NULL, id=%s\n", ch->id); 598 return; 599 } 600 dev = ch->netdev; 601 602 if (do_debug) 603 ctcm_pr_debug("%s: %s channel start\n", dev->name, 604 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); 605 606 if (ch->trans_skb != NULL) { 607 clear_normalized_cda(&ch->ccw[1]); 608 dev_kfree_skb(ch->trans_skb); 609 ch->trans_skb = NULL; 610 } 611 if (CHANNEL_DIRECTION(ch->flags) == READ) { 612 ch->ccw[1].cmd_code = CCW_CMD_READ; 613 ch->ccw[1].flags = CCW_FLAG_SLI; 614 ch->ccw[1].count = 0; 615 } else { 616 ch->ccw[1].cmd_code = CCW_CMD_WRITE; 617 ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC; 618 ch->ccw[1].count = 0; 619 } 620 if (ctcm_checkalloc_buffer(ch)) { 621 ctcm_pr_notice("%s: %s trans_skb allocation delayed " 622 "until first transfer\n", dev->name, 623 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); 624 } 625 626 ch->ccw[0].cmd_code = CCW_CMD_PREPARE; 627 ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC; 628 ch->ccw[0].count = 0; 629 ch->ccw[0].cda = 0; 630 ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */ 631 ch->ccw[2].flags = CCW_FLAG_SLI; 632 ch->ccw[2].count = 0; 633 ch->ccw[2].cda = 0; 634 memcpy(&ch->ccw[3], &ch->ccw[0], sizeof(struct ccw1) * 3); 635 ch->ccw[4].cda = 0; 636 ch->ccw[4].flags &= ~CCW_FLAG_IDA; 637 638 fsm_newstate(fi, CTC_STATE_STARTWAIT); 639 fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch); 640 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 641 rc = ccw_device_halt(ch->cdev, (unsigned long)ch); 642 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); 643 if (rc != 0) { 644 if (rc != -EBUSY) 645 fsm_deltimer(&ch->timer); 646 ctcm_ccw_check_rc(ch, rc, "initial HaltIO"); 647 } 648 } 649 650 /** 651 * Shutdown a channel. 652 * 653 * fi An instance of a channel statemachine. 654 * event The event, just happened. 655 * arg Generic pointer, casted from channel * upon call. 656 */ 657 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg) 658 { 659 struct channel *ch = arg; 660 unsigned long saveflags = 0; 661 int rc; 662 int oldstate; 663 664 CTCM_DBF_TEXT(TRACE, 2, __FUNCTION__); 665 fsm_deltimer(&ch->timer); 666 if (IS_MPC(ch)) 667 fsm_deltimer(&ch->sweep_timer); 668 669 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 670 671 if (event == CTC_EVENT_STOP) /* only for STOP not yet locked */ 672 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 673 /* Such conditional locking is undeterministic in 674 * static view. => ignore sparse warnings here. */ 675 oldstate = fsm_getstate(fi); 676 fsm_newstate(fi, CTC_STATE_TERM); 677 rc = ccw_device_halt(ch->cdev, (unsigned long)ch); 678 679 if (event == CTC_EVENT_STOP) 680 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); 681 /* see remark above about conditional locking */ 682 683 if (rc != 0 && rc != -EBUSY) { 684 fsm_deltimer(&ch->timer); 685 if (event != CTC_EVENT_STOP) { 686 fsm_newstate(fi, oldstate); 687 ctcm_ccw_check_rc(ch, rc, (char *)__FUNCTION__); 688 } 689 } 690 } 691 692 /** 693 * Cleanup helper for chx_fail and chx_stopped 694 * cleanup channels queue and notify interface statemachine. 695 * 696 * fi An instance of a channel statemachine. 697 * state The next state (depending on caller). 698 * ch The channel to operate on. 699 */ 700 static void ctcm_chx_cleanup(fsm_instance *fi, int state, 701 struct channel *ch) 702 { 703 struct net_device *dev = ch->netdev; 704 struct ctcm_priv *priv = dev->priv; 705 706 CTCM_DBF_TEXT(TRACE, 3, __FUNCTION__); 707 708 fsm_deltimer(&ch->timer); 709 if (IS_MPC(ch)) 710 fsm_deltimer(&ch->sweep_timer); 711 712 fsm_newstate(fi, state); 713 if (state == CTC_STATE_STOPPED && ch->trans_skb != NULL) { 714 clear_normalized_cda(&ch->ccw[1]); 715 dev_kfree_skb_any(ch->trans_skb); 716 ch->trans_skb = NULL; 717 } 718 719 ch->th_seg = 0x00; 720 ch->th_seq_num = 0x00; 721 if (CHANNEL_DIRECTION(ch->flags) == READ) { 722 skb_queue_purge(&ch->io_queue); 723 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 724 } else { 725 ctcm_purge_skb_queue(&ch->io_queue); 726 if (IS_MPC(ch)) 727 ctcm_purge_skb_queue(&ch->sweep_queue); 728 spin_lock(&ch->collect_lock); 729 ctcm_purge_skb_queue(&ch->collect_queue); 730 ch->collect_len = 0; 731 spin_unlock(&ch->collect_lock); 732 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 733 } 734 } 735 736 /** 737 * A channel has successfully been halted. 738 * Cleanup it's queue and notify interface statemachine. 739 * 740 * fi An instance of a channel statemachine. 741 * event The event, just happened. 742 * arg Generic pointer, casted from channel * upon call. 743 */ 744 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg) 745 { 746 CTCM_DBF_TEXT(TRACE, 3, __FUNCTION__); 747 ctcm_chx_cleanup(fi, CTC_STATE_STOPPED, arg); 748 } 749 750 /** 751 * A stop command from device statemachine arrived and we are in 752 * not operational mode. Set state to stopped. 753 * 754 * fi An instance of a channel statemachine. 755 * event The event, just happened. 756 * arg Generic pointer, casted from channel * upon call. 757 */ 758 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg) 759 { 760 fsm_newstate(fi, CTC_STATE_STOPPED); 761 } 762 763 /** 764 * A machine check for no path, not operational status or gone device has 765 * happened. 766 * Cleanup queue and notify interface statemachine. 767 * 768 * fi An instance of a channel statemachine. 769 * event The event, just happened. 770 * arg Generic pointer, casted from channel * upon call. 771 */ 772 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg) 773 { 774 CTCM_DBF_TEXT(TRACE, 3, __FUNCTION__); 775 ctcm_chx_cleanup(fi, CTC_STATE_NOTOP, arg); 776 } 777 778 /** 779 * Handle error during setup of channel. 780 * 781 * fi An instance of a channel statemachine. 782 * event The event, just happened. 783 * arg Generic pointer, casted from channel * upon call. 784 */ 785 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg) 786 { 787 struct channel *ch = arg; 788 struct net_device *dev = ch->netdev; 789 struct ctcm_priv *priv = dev->priv; 790 791 /* 792 * Special case: Got UC_RCRESET on setmode. 793 * This means that remote side isn't setup. In this case 794 * simply retry after some 10 secs... 795 */ 796 if ((fsm_getstate(fi) == CTC_STATE_SETUPWAIT) && 797 ((event == CTC_EVENT_UC_RCRESET) || 798 (event == CTC_EVENT_UC_RSRESET))) { 799 fsm_newstate(fi, CTC_STATE_STARTRETRY); 800 fsm_deltimer(&ch->timer); 801 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 802 if (!IS_MPC(ch) && (CHANNEL_DIRECTION(ch->flags) == READ)) { 803 int rc = ccw_device_halt(ch->cdev, (unsigned long)ch); 804 if (rc != 0) 805 ctcm_ccw_check_rc(ch, rc, 806 "HaltIO in chx_setuperr"); 807 } 808 return; 809 } 810 811 CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT, 812 "%s : %s error during %s channel setup state=%s\n", 813 dev->name, ctc_ch_event_names[event], 814 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX", 815 fsm_getstate_str(fi)); 816 817 if (CHANNEL_DIRECTION(ch->flags) == READ) { 818 fsm_newstate(fi, CTC_STATE_RXERR); 819 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 820 } else { 821 fsm_newstate(fi, CTC_STATE_TXERR); 822 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 823 } 824 } 825 826 /** 827 * Restart a channel after an error. 828 * 829 * fi An instance of a channel statemachine. 830 * event The event, just happened. 831 * arg Generic pointer, casted from channel * upon call. 832 */ 833 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg) 834 { 835 struct channel *ch = arg; 836 struct net_device *dev = ch->netdev; 837 unsigned long saveflags = 0; 838 int oldstate; 839 int rc; 840 841 CTCM_DBF_TEXT(TRACE, CTC_DBF_NOTICE, __FUNCTION__); 842 fsm_deltimer(&ch->timer); 843 ctcm_pr_debug("%s: %s channel restart\n", dev->name, 844 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); 845 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 846 oldstate = fsm_getstate(fi); 847 fsm_newstate(fi, CTC_STATE_STARTWAIT); 848 if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */ 849 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 850 /* Such conditional locking is a known problem for 851 * sparse because its undeterministic in static view. 852 * Warnings should be ignored here. */ 853 rc = ccw_device_halt(ch->cdev, (unsigned long)ch); 854 if (event == CTC_EVENT_TIMER) 855 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); 856 if (rc != 0) { 857 if (rc != -EBUSY) { 858 fsm_deltimer(&ch->timer); 859 fsm_newstate(fi, oldstate); 860 } 861 ctcm_ccw_check_rc(ch, rc, "HaltIO in ctcm_chx_restart"); 862 } 863 } 864 865 /** 866 * Handle error during RX initial handshake (exchange of 867 * 0-length block header) 868 * 869 * fi An instance of a channel statemachine. 870 * event The event, just happened. 871 * arg Generic pointer, casted from channel * upon call. 872 */ 873 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg) 874 { 875 struct channel *ch = arg; 876 struct net_device *dev = ch->netdev; 877 struct ctcm_priv *priv = dev->priv; 878 879 CTCM_DBF_TEXT(SETUP, 3, __FUNCTION__); 880 if (event == CTC_EVENT_TIMER) { 881 if (!IS_MPCDEV(dev)) 882 /* TODO : check if MPC deletes timer somewhere */ 883 fsm_deltimer(&ch->timer); 884 ctcm_pr_debug("%s: Timeout during RX init handshake\n", 885 dev->name); 886 if (ch->retry++ < 3) 887 ctcm_chx_restart(fi, event, arg); 888 else { 889 fsm_newstate(fi, CTC_STATE_RXERR); 890 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 891 } 892 } else 893 ctcm_pr_warn("%s: Error during RX init handshake\n", dev->name); 894 } 895 896 /** 897 * Notify device statemachine if we gave up initialization 898 * of RX channel. 899 * 900 * fi An instance of a channel statemachine. 901 * event The event, just happened. 902 * arg Generic pointer, casted from channel * upon call. 903 */ 904 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg) 905 { 906 struct channel *ch = arg; 907 struct net_device *dev = ch->netdev; 908 struct ctcm_priv *priv = dev->priv; 909 910 CTCM_DBF_TEXT(SETUP, 3, __FUNCTION__); 911 fsm_newstate(fi, CTC_STATE_RXERR); 912 ctcm_pr_warn("%s: RX busy. Initialization failed\n", dev->name); 913 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 914 } 915 916 /** 917 * Handle RX Unit check remote reset (remote disconnected) 918 * 919 * fi An instance of a channel statemachine. 920 * event The event, just happened. 921 * arg Generic pointer, casted from channel * upon call. 922 */ 923 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg) 924 { 925 struct channel *ch = arg; 926 struct channel *ch2; 927 struct net_device *dev = ch->netdev; 928 struct ctcm_priv *priv = dev->priv; 929 930 CTCM_DBF_DEV_NAME(TRACE, dev, "Got remote disconnect, re-initializing"); 931 fsm_deltimer(&ch->timer); 932 if (do_debug) 933 ctcm_pr_debug("%s: Got remote disconnect, " 934 "re-initializing ...\n", dev->name); 935 /* 936 * Notify device statemachine 937 */ 938 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 939 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 940 941 fsm_newstate(fi, CTC_STATE_DTERM); 942 ch2 = priv->channel[WRITE]; 943 fsm_newstate(ch2->fsm, CTC_STATE_DTERM); 944 945 ccw_device_halt(ch->cdev, (unsigned long)ch); 946 ccw_device_halt(ch2->cdev, (unsigned long)ch2); 947 } 948 949 /** 950 * Handle error during TX channel initialization. 951 * 952 * fi An instance of a channel statemachine. 953 * event The event, just happened. 954 * arg Generic pointer, casted from channel * upon call. 955 */ 956 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg) 957 { 958 struct channel *ch = arg; 959 struct net_device *dev = ch->netdev; 960 struct ctcm_priv *priv = dev->priv; 961 962 if (event == CTC_EVENT_TIMER) { 963 fsm_deltimer(&ch->timer); 964 CTCM_DBF_DEV_NAME(ERROR, dev, 965 "Timeout during TX init handshake"); 966 if (ch->retry++ < 3) 967 ctcm_chx_restart(fi, event, arg); 968 else { 969 fsm_newstate(fi, CTC_STATE_TXERR); 970 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 971 } 972 } else { 973 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 974 "%s : %s error during channel setup state=%s", 975 dev->name, ctc_ch_event_names[event], 976 fsm_getstate_str(fi)); 977 978 ctcm_pr_warn("%s: Error during TX init handshake\n", dev->name); 979 } 980 } 981 982 /** 983 * Handle TX timeout by retrying operation. 984 * 985 * fi An instance of a channel statemachine. 986 * event The event, just happened. 987 * arg Generic pointer, casted from channel * upon call. 988 */ 989 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg) 990 { 991 struct channel *ch = arg; 992 struct net_device *dev = ch->netdev; 993 struct ctcm_priv *priv = dev->priv; 994 struct sk_buff *skb; 995 996 if (do_debug) 997 ctcm_pr_debug("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n", 998 __FUNCTION__, smp_processor_id(), ch, ch->id); 999 1000 fsm_deltimer(&ch->timer); 1001 if (ch->retry++ > 3) { 1002 struct mpc_group *gptr = priv->mpcg; 1003 ctcm_pr_debug("%s: TX retry failed, restarting channel\n", 1004 dev->name); 1005 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 1006 /* call restart if not MPC or if MPC and mpcg fsm is ready. 1007 use gptr as mpc indicator */ 1008 if (!(gptr && (fsm_getstate(gptr->fsm) != MPCG_STATE_READY))) 1009 ctcm_chx_restart(fi, event, arg); 1010 goto done; 1011 } 1012 1013 ctcm_pr_debug("%s: TX retry %d\n", dev->name, ch->retry); 1014 skb = skb_peek(&ch->io_queue); 1015 if (skb) { 1016 int rc = 0; 1017 unsigned long saveflags = 0; 1018 clear_normalized_cda(&ch->ccw[4]); 1019 ch->ccw[4].count = skb->len; 1020 if (set_normalized_cda(&ch->ccw[4], skb->data)) { 1021 ctcm_pr_debug("%s: IDAL alloc failed, chan restart\n", 1022 dev->name); 1023 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 1024 ctcm_chx_restart(fi, event, arg); 1025 goto done; 1026 } 1027 fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch); 1028 if (event == CTC_EVENT_TIMER) /* for TIMER not yet locked */ 1029 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 1030 /* Such conditional locking is a known problem for 1031 * sparse because its undeterministic in static view. 1032 * Warnings should be ignored here. */ 1033 if (do_debug_ccw) 1034 ctcmpc_dumpit((char *)&ch->ccw[3], 1035 sizeof(struct ccw1) * 3); 1036 1037 rc = ccw_device_start(ch->cdev, &ch->ccw[3], 1038 (unsigned long)ch, 0xff, 0); 1039 if (event == CTC_EVENT_TIMER) 1040 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), 1041 saveflags); 1042 if (rc != 0) { 1043 fsm_deltimer(&ch->timer); 1044 ctcm_ccw_check_rc(ch, rc, "TX in chx_txretry"); 1045 ctcm_purge_skb_queue(&ch->io_queue); 1046 } 1047 } 1048 done: 1049 return; 1050 } 1051 1052 /** 1053 * Handle fatal errors during an I/O command. 1054 * 1055 * fi An instance of a channel statemachine. 1056 * event The event, just happened. 1057 * arg Generic pointer, casted from channel * upon call. 1058 */ 1059 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg) 1060 { 1061 struct channel *ch = arg; 1062 struct net_device *dev = ch->netdev; 1063 struct ctcm_priv *priv = dev->priv; 1064 1065 CTCM_DBF_TEXT(TRACE, 3, __FUNCTION__); 1066 fsm_deltimer(&ch->timer); 1067 ctcm_pr_warn("%s %s : unrecoverable channel error\n", 1068 CTC_DRIVER_NAME, dev->name); 1069 if (IS_MPC(ch)) { 1070 priv->stats.tx_dropped++; 1071 priv->stats.tx_errors++; 1072 } 1073 1074 if (CHANNEL_DIRECTION(ch->flags) == READ) { 1075 ctcm_pr_debug("%s: RX I/O error\n", dev->name); 1076 fsm_newstate(fi, CTC_STATE_RXERR); 1077 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 1078 } else { 1079 ctcm_pr_debug("%s: TX I/O error\n", dev->name); 1080 fsm_newstate(fi, CTC_STATE_TXERR); 1081 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 1082 } 1083 } 1084 1085 /* 1086 * The ctcm statemachine for a channel. 1087 */ 1088 const fsm_node ch_fsm[] = { 1089 { CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop }, 1090 { CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start }, 1091 { CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1092 { CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop }, 1093 1094 { CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop }, 1095 { CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop }, 1096 { CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1097 { CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop }, 1098 { CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start }, 1099 1100 { CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1101 { CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop }, 1102 { CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, 1103 { CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr }, 1104 { CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1105 { CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1106 1107 { CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio }, 1108 { CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode }, 1109 { CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1110 { CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1111 1112 { CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1113 { CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop }, 1114 { CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, chx_firstio }, 1115 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1116 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1117 { CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode }, 1118 { CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1119 { CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1120 1121 { CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1122 { CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop }, 1123 { CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, chx_rxidle }, 1124 { CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr }, 1125 { CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr }, 1126 { CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr }, 1127 { CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail }, 1128 { CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1129 { CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, chx_firstio }, 1130 { CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1131 1132 { CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, 1133 { CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop }, 1134 { CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, chx_rx }, 1135 { CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc }, 1136 { CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1137 { CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1138 { CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, chx_rx }, 1139 1140 { CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1141 { CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop }, 1142 { CTC_STATE_TXINIT, CTC_EVENT_FINSTAT, ctcm_chx_txidle }, 1143 { CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr }, 1144 { CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr }, 1145 { CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr }, 1146 { CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1147 { CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1148 1149 { CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, 1150 { CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop }, 1151 { CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, chx_firstio }, 1152 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, 1153 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, 1154 { CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1155 { CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1156 1157 { CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop }, 1158 { CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart }, 1159 { CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped }, 1160 { CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, 1161 { CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, 1162 { CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1163 1164 { CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio }, 1165 { CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart }, 1166 { CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, 1167 { CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, 1168 { CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, 1169 { CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1170 1171 { CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio }, 1172 { CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop }, 1173 { CTC_STATE_TX, CTC_EVENT_FINSTAT, chx_txdone }, 1174 { CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_txretry }, 1175 { CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_txretry }, 1176 { CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry }, 1177 { CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1178 { CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1179 1180 { CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, 1181 { CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, 1182 { CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1183 { CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1184 }; 1185 1186 int ch_fsm_len = ARRAY_SIZE(ch_fsm); 1187 1188 /* 1189 * MPC actions for mpc channel statemachine 1190 * handling of MPC protocol requires extra 1191 * statemachine and actions which are prefixed ctcmpc_ . 1192 * The ctc_ch_states and ctc_ch_state_names, 1193 * ctc_ch_events and ctc_ch_event_names share the ctcm definitions 1194 * which are expanded by some elements. 1195 */ 1196 1197 /* 1198 * Actions for mpc channel statemachine. 1199 */ 1200 1201 /** 1202 * Normal data has been send. Free the corresponding 1203 * skb (it's in io_queue), reset dev->tbusy and 1204 * revert to idle state. 1205 * 1206 * fi An instance of a channel statemachine. 1207 * event The event, just happened. 1208 * arg Generic pointer, casted from channel * upon call. 1209 */ 1210 static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg) 1211 { 1212 struct channel *ch = arg; 1213 struct net_device *dev = ch->netdev; 1214 struct ctcm_priv *priv = dev->priv; 1215 struct mpc_group *grp = priv->mpcg; 1216 struct sk_buff *skb; 1217 int first = 1; 1218 int i; 1219 struct timespec done_stamp; 1220 __u32 data_space; 1221 unsigned long duration; 1222 struct sk_buff *peekskb; 1223 int rc; 1224 struct th_header *header; 1225 struct pdu *p_header; 1226 1227 if (do_debug) 1228 ctcm_pr_debug("%s cp:%i enter: %s()\n", 1229 dev->name, smp_processor_id(), __FUNCTION__); 1230 1231 done_stamp = current_kernel_time(); /* xtime */ 1232 duration = (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 1233 + (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000; 1234 if (duration > ch->prof.tx_time) 1235 ch->prof.tx_time = duration; 1236 1237 if (ch->irb->scsw.count != 0) 1238 ctcm_pr_debug("%s: TX not complete, remaining %d bytes\n", 1239 dev->name, ch->irb->scsw.count); 1240 fsm_deltimer(&ch->timer); 1241 while ((skb = skb_dequeue(&ch->io_queue))) { 1242 priv->stats.tx_packets++; 1243 priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH; 1244 if (first) { 1245 priv->stats.tx_bytes += 2; 1246 first = 0; 1247 } 1248 atomic_dec(&skb->users); 1249 dev_kfree_skb_irq(skb); 1250 } 1251 spin_lock(&ch->collect_lock); 1252 clear_normalized_cda(&ch->ccw[4]); 1253 1254 if ((ch->collect_len <= 0) || (grp->in_sweep != 0)) { 1255 spin_unlock(&ch->collect_lock); 1256 fsm_newstate(fi, CTC_STATE_TXIDLE); 1257 goto done; 1258 } 1259 1260 if (ctcm_checkalloc_buffer(ch)) { 1261 spin_unlock(&ch->collect_lock); 1262 goto done; 1263 } 1264 ch->trans_skb->data = ch->trans_skb_data; 1265 skb_reset_tail_pointer(ch->trans_skb); 1266 ch->trans_skb->len = 0; 1267 if (ch->prof.maxmulti < (ch->collect_len + TH_HEADER_LENGTH)) 1268 ch->prof.maxmulti = ch->collect_len + TH_HEADER_LENGTH; 1269 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue)) 1270 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue); 1271 i = 0; 1272 1273 if (do_debug_data) 1274 ctcm_pr_debug("ctcmpc: %s() building " 1275 "trans_skb from collect_q \n", __FUNCTION__); 1276 1277 data_space = grp->group_max_buflen - TH_HEADER_LENGTH; 1278 1279 if (do_debug_data) 1280 ctcm_pr_debug("ctcmpc: %s() building trans_skb from collect_q" 1281 " data_space:%04x\n", __FUNCTION__, data_space); 1282 p_header = NULL; 1283 while ((skb = skb_dequeue(&ch->collect_queue))) { 1284 memcpy(skb_put(ch->trans_skb, skb->len), skb->data, skb->len); 1285 p_header = (struct pdu *) 1286 (skb_tail_pointer(ch->trans_skb) - skb->len); 1287 p_header->pdu_flag = 0x00; 1288 if (skb->protocol == ntohs(ETH_P_SNAP)) 1289 p_header->pdu_flag |= 0x60; 1290 else 1291 p_header->pdu_flag |= 0x20; 1292 1293 if (do_debug_data) { 1294 ctcm_pr_debug("ctcmpc: %s()trans_skb len:%04x \n", 1295 __FUNCTION__, ch->trans_skb->len); 1296 ctcm_pr_debug("ctcmpc: %s() pdu header and data" 1297 " for up to 32 bytes sent to vtam\n", 1298 __FUNCTION__); 1299 ctcmpc_dumpit((char *)p_header, 1300 min_t(int, skb->len, 32)); 1301 } 1302 ch->collect_len -= skb->len; 1303 data_space -= skb->len; 1304 priv->stats.tx_packets++; 1305 priv->stats.tx_bytes += skb->len; 1306 atomic_dec(&skb->users); 1307 dev_kfree_skb_any(skb); 1308 peekskb = skb_peek(&ch->collect_queue); 1309 if (peekskb->len > data_space) 1310 break; 1311 i++; 1312 } 1313 /* p_header points to the last one we handled */ 1314 if (p_header) 1315 p_header->pdu_flag |= PDU_LAST; /*Say it's the last one*/ 1316 header = kzalloc(TH_HEADER_LENGTH, gfp_type()); 1317 1318 if (!header) { 1319 printk(KERN_WARNING "ctcmpc: OUT OF MEMORY IN %s()" 1320 ": Data Lost \n", __FUNCTION__); 1321 spin_unlock(&ch->collect_lock); 1322 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); 1323 goto done; 1324 } 1325 1326 header->th_ch_flag = TH_HAS_PDU; /* Normal data */ 1327 ch->th_seq_num++; 1328 header->th_seq_num = ch->th_seq_num; 1329 1330 if (do_debug_data) 1331 ctcm_pr_debug("%s: ToVTAM_th_seq= %08x\n" , 1332 __FUNCTION__, ch->th_seq_num); 1333 1334 memcpy(skb_push(ch->trans_skb, TH_HEADER_LENGTH), header, 1335 TH_HEADER_LENGTH); /* put the TH on the packet */ 1336 1337 kfree(header); 1338 1339 if (do_debug_data) { 1340 ctcm_pr_debug("ctcmpc: %s()trans_skb len:%04x \n", 1341 __FUNCTION__, ch->trans_skb->len); 1342 1343 ctcm_pr_debug("ctcmpc: %s() up-to-50 bytes of trans_skb " 1344 "data to vtam from collect_q\n", __FUNCTION__); 1345 ctcmpc_dumpit((char *)ch->trans_skb->data, 1346 min_t(int, ch->trans_skb->len, 50)); 1347 } 1348 1349 spin_unlock(&ch->collect_lock); 1350 clear_normalized_cda(&ch->ccw[1]); 1351 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) { 1352 dev_kfree_skb_any(ch->trans_skb); 1353 ch->trans_skb = NULL; 1354 printk(KERN_WARNING 1355 "ctcmpc: %s()CCW failure - data lost\n", 1356 __FUNCTION__); 1357 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); 1358 return; 1359 } 1360 ch->ccw[1].count = ch->trans_skb->len; 1361 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 1362 ch->prof.send_stamp = current_kernel_time(); /* xtime */ 1363 if (do_debug_ccw) 1364 ctcmpc_dumpit((char *)&ch->ccw[0], sizeof(struct ccw1) * 3); 1365 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 1366 (unsigned long)ch, 0xff, 0); 1367 ch->prof.doios_multi++; 1368 if (rc != 0) { 1369 priv->stats.tx_dropped += i; 1370 priv->stats.tx_errors += i; 1371 fsm_deltimer(&ch->timer); 1372 ctcm_ccw_check_rc(ch, rc, "chained TX"); 1373 } 1374 done: 1375 ctcm_clear_busy(dev); 1376 ctcm_pr_debug("ctcmpc exit: %s %s()\n", dev->name, __FUNCTION__); 1377 return; 1378 } 1379 1380 /** 1381 * Got normal data, check for sanity, queue it up, allocate new buffer 1382 * trigger bottom half, and initiate next read. 1383 * 1384 * fi An instance of a channel statemachine. 1385 * event The event, just happened. 1386 * arg Generic pointer, casted from channel * upon call. 1387 */ 1388 static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg) 1389 { 1390 struct channel *ch = arg; 1391 struct net_device *dev = ch->netdev; 1392 struct ctcm_priv *priv = dev->priv; 1393 struct mpc_group *grp = priv->mpcg; 1394 struct sk_buff *skb = ch->trans_skb; 1395 struct sk_buff *new_skb; 1396 unsigned long saveflags = 0; /* avoids compiler warning */ 1397 int len = ch->max_bufsize - ch->irb->scsw.count; 1398 1399 if (do_debug_data) { 1400 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, "mpc_ch_rx %s cp:%i %s\n", 1401 dev->name, smp_processor_id(), ch->id); 1402 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, "mpc_ch_rx: maxbuf: %04x " 1403 "len: %04x\n", ch->max_bufsize, len); 1404 } 1405 fsm_deltimer(&ch->timer); 1406 1407 if (skb == NULL) { 1408 ctcm_pr_debug("ctcmpc exit: %s() TRANS_SKB = NULL \n", 1409 __FUNCTION__); 1410 goto again; 1411 } 1412 1413 if (len < TH_HEADER_LENGTH) { 1414 ctcm_pr_info("%s: got packet with invalid length %d\n", 1415 dev->name, len); 1416 priv->stats.rx_dropped++; 1417 priv->stats.rx_length_errors++; 1418 } else { 1419 /* must have valid th header or game over */ 1420 __u32 block_len = len; 1421 len = TH_HEADER_LENGTH + XID2_LENGTH + 4; 1422 new_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC); 1423 1424 if (new_skb == NULL) { 1425 printk(KERN_INFO "ctcmpc:%s() NEW_SKB = NULL\n", 1426 __FUNCTION__); 1427 printk(KERN_WARNING "ctcmpc: %s() MEMORY ALLOC FAILED" 1428 " - DATA LOST - MPC FAILED\n", 1429 __FUNCTION__); 1430 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); 1431 goto again; 1432 } 1433 switch (fsm_getstate(grp->fsm)) { 1434 case MPCG_STATE_RESET: 1435 case MPCG_STATE_INOP: 1436 dev_kfree_skb_any(new_skb); 1437 break; 1438 case MPCG_STATE_FLOWC: 1439 case MPCG_STATE_READY: 1440 memcpy(skb_put(new_skb, block_len), 1441 skb->data, block_len); 1442 skb_queue_tail(&ch->io_queue, new_skb); 1443 tasklet_schedule(&ch->ch_tasklet); 1444 break; 1445 default: 1446 memcpy(skb_put(new_skb, len), skb->data, len); 1447 skb_queue_tail(&ch->io_queue, new_skb); 1448 tasklet_hi_schedule(&ch->ch_tasklet); 1449 break; 1450 } 1451 } 1452 1453 again: 1454 switch (fsm_getstate(grp->fsm)) { 1455 int rc, dolock; 1456 case MPCG_STATE_FLOWC: 1457 case MPCG_STATE_READY: 1458 if (ctcm_checkalloc_buffer(ch)) 1459 break; 1460 ch->trans_skb->data = ch->trans_skb_data; 1461 skb_reset_tail_pointer(ch->trans_skb); 1462 ch->trans_skb->len = 0; 1463 ch->ccw[1].count = ch->max_bufsize; 1464 if (do_debug_ccw) 1465 ctcmpc_dumpit((char *)&ch->ccw[0], 1466 sizeof(struct ccw1) * 3); 1467 dolock = !in_irq(); 1468 if (dolock) 1469 spin_lock_irqsave( 1470 get_ccwdev_lock(ch->cdev), saveflags); 1471 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 1472 (unsigned long)ch, 0xff, 0); 1473 if (dolock) /* see remark about conditional locking */ 1474 spin_unlock_irqrestore( 1475 get_ccwdev_lock(ch->cdev), saveflags); 1476 if (rc != 0) 1477 ctcm_ccw_check_rc(ch, rc, "normal RX"); 1478 default: 1479 break; 1480 } 1481 1482 if (do_debug) 1483 ctcm_pr_debug("ctcmpc exit : %s %s(): ch=0x%p id=%s\n", 1484 dev->name, __FUNCTION__, ch, ch->id); 1485 1486 } 1487 1488 /** 1489 * Initialize connection by sending a __u16 of value 0. 1490 * 1491 * fi An instance of a channel statemachine. 1492 * event The event, just happened. 1493 * arg Generic pointer, casted from channel * upon call. 1494 */ 1495 static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg) 1496 { 1497 struct channel *ch = arg; 1498 struct net_device *dev = ch->netdev; 1499 struct ctcm_priv *priv = dev->priv; 1500 1501 if (do_debug) { 1502 struct mpc_group *gptr = priv->mpcg; 1503 ctcm_pr_debug("ctcmpc enter: %s(): ch=0x%p id=%s\n", 1504 __FUNCTION__, ch, ch->id); 1505 ctcm_pr_debug("%s() %s chstate:%i grpstate:%i chprotocol:%i\n", 1506 __FUNCTION__, ch->id, fsm_getstate(fi), 1507 fsm_getstate(gptr->fsm), ch->protocol); 1508 } 1509 if (fsm_getstate(fi) == CTC_STATE_TXIDLE) 1510 MPC_DBF_DEV_NAME(TRACE, dev, "remote side issued READ? "); 1511 1512 fsm_deltimer(&ch->timer); 1513 if (ctcm_checkalloc_buffer(ch)) 1514 goto done; 1515 1516 switch (fsm_getstate(fi)) { 1517 case CTC_STATE_STARTRETRY: 1518 case CTC_STATE_SETUPWAIT: 1519 if (CHANNEL_DIRECTION(ch->flags) == READ) { 1520 ctcmpc_chx_rxidle(fi, event, arg); 1521 } else { 1522 fsm_newstate(fi, CTC_STATE_TXIDLE); 1523 fsm_event(priv->fsm, DEV_EVENT_TXUP, dev); 1524 } 1525 goto done; 1526 default: 1527 break; 1528 }; 1529 1530 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ) 1531 ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); 1532 1533 done: 1534 if (do_debug) 1535 ctcm_pr_debug("ctcmpc exit : %s(): ch=0x%p id=%s\n", 1536 __FUNCTION__, ch, ch->id); 1537 return; 1538 } 1539 1540 /** 1541 * Got initial data, check it. If OK, 1542 * notify device statemachine that we are up and 1543 * running. 1544 * 1545 * fi An instance of a channel statemachine. 1546 * event The event, just happened. 1547 * arg Generic pointer, casted from channel * upon call. 1548 */ 1549 void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg) 1550 { 1551 struct channel *ch = arg; 1552 struct net_device *dev = ch->netdev; 1553 struct ctcm_priv *priv = dev->priv; 1554 struct mpc_group *grp = priv->mpcg; 1555 int rc; 1556 unsigned long saveflags = 0; /* avoids compiler warning */ 1557 1558 fsm_deltimer(&ch->timer); 1559 ctcm_pr_debug("%s cp:%i enter: %s()\n", 1560 dev->name, smp_processor_id(), __FUNCTION__); 1561 if (do_debug) 1562 ctcm_pr_debug("%s() %s chstate:%i grpstate:%i\n", 1563 __FUNCTION__, ch->id, 1564 fsm_getstate(fi), fsm_getstate(grp->fsm)); 1565 1566 fsm_newstate(fi, CTC_STATE_RXIDLE); 1567 /* XID processing complete */ 1568 1569 switch (fsm_getstate(grp->fsm)) { 1570 case MPCG_STATE_FLOWC: 1571 case MPCG_STATE_READY: 1572 if (ctcm_checkalloc_buffer(ch)) 1573 goto done; 1574 ch->trans_skb->data = ch->trans_skb_data; 1575 skb_reset_tail_pointer(ch->trans_skb); 1576 ch->trans_skb->len = 0; 1577 ch->ccw[1].count = ch->max_bufsize; 1578 if (do_debug_ccw) 1579 ctcmpc_dumpit((char *)&ch->ccw[0], 1580 sizeof(struct ccw1) * 3); 1581 if (event == CTC_EVENT_START) 1582 /* see remark about conditional locking */ 1583 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 1584 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 1585 (unsigned long)ch, 0xff, 0); 1586 if (event == CTC_EVENT_START) 1587 spin_unlock_irqrestore( 1588 get_ccwdev_lock(ch->cdev), saveflags); 1589 if (rc != 0) { 1590 fsm_newstate(fi, CTC_STATE_RXINIT); 1591 ctcm_ccw_check_rc(ch, rc, "initial RX"); 1592 goto done; 1593 } 1594 break; 1595 default: 1596 break; 1597 } 1598 1599 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); 1600 done: 1601 if (do_debug) 1602 ctcm_pr_debug("ctcmpc exit: %s %s()\n", 1603 dev->name, __FUNCTION__); 1604 return; 1605 } 1606 1607 /* 1608 * ctcmpc channel FSM action 1609 * called from several points in ctcmpc_ch_fsm 1610 * ctcmpc only 1611 */ 1612 static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg) 1613 { 1614 struct channel *ch = arg; 1615 struct net_device *dev = ch->netdev; 1616 struct ctcm_priv *priv = dev->priv; 1617 struct mpc_group *grp = priv->mpcg; 1618 1619 if (do_debug) { 1620 ctcm_pr_debug("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s" 1621 "GrpState:%s ChState:%s\n", 1622 __FUNCTION__, smp_processor_id(), ch, ch->id, 1623 fsm_getstate_str(grp->fsm), 1624 fsm_getstate_str(ch->fsm)); 1625 } 1626 1627 switch (fsm_getstate(grp->fsm)) { 1628 case MPCG_STATE_XID2INITW: 1629 /* ok..start yside xid exchanges */ 1630 if (!ch->in_mpcgroup) 1631 break; 1632 if (fsm_getstate(ch->fsm) == CH_XID0_PENDING) { 1633 fsm_deltimer(&grp->timer); 1634 fsm_addtimer(&grp->timer, 1635 MPC_XID_TIMEOUT_VALUE, 1636 MPCG_EVENT_TIMER, dev); 1637 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); 1638 1639 } else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1) 1640 /* attn rcvd before xid0 processed via bh */ 1641 fsm_newstate(ch->fsm, CH_XID7_PENDING1); 1642 break; 1643 case MPCG_STATE_XID2INITX: 1644 case MPCG_STATE_XID0IOWAIT: 1645 case MPCG_STATE_XID0IOWAIX: 1646 /* attn rcvd before xid0 processed on ch 1647 but mid-xid0 processing for group */ 1648 if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1) 1649 fsm_newstate(ch->fsm, CH_XID7_PENDING1); 1650 break; 1651 case MPCG_STATE_XID7INITW: 1652 case MPCG_STATE_XID7INITX: 1653 case MPCG_STATE_XID7INITI: 1654 case MPCG_STATE_XID7INITZ: 1655 switch (fsm_getstate(ch->fsm)) { 1656 case CH_XID7_PENDING: 1657 fsm_newstate(ch->fsm, CH_XID7_PENDING1); 1658 break; 1659 case CH_XID7_PENDING2: 1660 fsm_newstate(ch->fsm, CH_XID7_PENDING3); 1661 break; 1662 } 1663 fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev); 1664 break; 1665 } 1666 1667 if (do_debug) 1668 ctcm_pr_debug("ctcmpc exit : %s(): cp=%i ch=0x%p id=%s\n", 1669 __FUNCTION__, smp_processor_id(), ch, ch->id); 1670 return; 1671 1672 } 1673 1674 /* 1675 * ctcmpc channel FSM action 1676 * called from one point in ctcmpc_ch_fsm 1677 * ctcmpc only 1678 */ 1679 static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg) 1680 { 1681 struct channel *ch = arg; 1682 struct net_device *dev = ch->netdev; 1683 struct ctcm_priv *priv = dev->priv; 1684 struct mpc_group *grp = priv->mpcg; 1685 1686 ctcm_pr_debug("ctcmpc enter: %s %s() %s \nGrpState:%s ChState:%s\n", 1687 dev->name, 1688 __FUNCTION__, ch->id, 1689 fsm_getstate_str(grp->fsm), 1690 fsm_getstate_str(ch->fsm)); 1691 1692 fsm_deltimer(&ch->timer); 1693 1694 switch (fsm_getstate(grp->fsm)) { 1695 case MPCG_STATE_XID0IOWAIT: 1696 /* vtam wants to be primary.start yside xid exchanges*/ 1697 /* only receive one attn-busy at a time so must not */ 1698 /* change state each time */ 1699 grp->changed_side = 1; 1700 fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW); 1701 break; 1702 case MPCG_STATE_XID2INITW: 1703 if (grp->changed_side == 1) { 1704 grp->changed_side = 2; 1705 break; 1706 } 1707 /* process began via call to establish_conn */ 1708 /* so must report failure instead of reverting */ 1709 /* back to ready-for-xid passive state */ 1710 if (grp->estconnfunc) 1711 goto done; 1712 /* this attnbusy is NOT the result of xside xid */ 1713 /* collisions so yside must have been triggered */ 1714 /* by an ATTN that was not intended to start XID */ 1715 /* processing. Revert back to ready-for-xid and */ 1716 /* wait for ATTN interrupt to signal xid start */ 1717 if (fsm_getstate(ch->fsm) == CH_XID0_INPROGRESS) { 1718 fsm_newstate(ch->fsm, CH_XID0_PENDING) ; 1719 fsm_deltimer(&grp->timer); 1720 goto done; 1721 } 1722 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); 1723 goto done; 1724 case MPCG_STATE_XID2INITX: 1725 /* XID2 was received before ATTN Busy for second 1726 channel.Send yside xid for second channel. 1727 */ 1728 if (grp->changed_side == 1) { 1729 grp->changed_side = 2; 1730 break; 1731 } 1732 case MPCG_STATE_XID0IOWAIX: 1733 case MPCG_STATE_XID7INITW: 1734 case MPCG_STATE_XID7INITX: 1735 case MPCG_STATE_XID7INITI: 1736 case MPCG_STATE_XID7INITZ: 1737 default: 1738 /* multiple attn-busy indicates too out-of-sync */ 1739 /* and they are certainly not being received as part */ 1740 /* of valid mpc group negotiations.. */ 1741 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); 1742 goto done; 1743 } 1744 1745 if (grp->changed_side == 1) { 1746 fsm_deltimer(&grp->timer); 1747 fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE, 1748 MPCG_EVENT_TIMER, dev); 1749 } 1750 if (ch->in_mpcgroup) 1751 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); 1752 else 1753 printk(KERN_WARNING "ctcmpc: %s() Not all channels have" 1754 " been added to group\n", __FUNCTION__); 1755 1756 done: 1757 if (do_debug) 1758 ctcm_pr_debug("ctcmpc exit : %s()%s ch=0x%p id=%s\n", 1759 __FUNCTION__, dev->name, ch, ch->id); 1760 1761 return; 1762 1763 } 1764 1765 /* 1766 * ctcmpc channel FSM action 1767 * called from several points in ctcmpc_ch_fsm 1768 * ctcmpc only 1769 */ 1770 static void ctcmpc_chx_resend(fsm_instance *fsm, int event, void *arg) 1771 { 1772 struct channel *ch = arg; 1773 struct net_device *dev = ch->netdev; 1774 struct ctcm_priv *priv = dev->priv; 1775 struct mpc_group *grp = priv->mpcg; 1776 1777 ctcm_pr_debug("ctcmpc enter: %s %s() %s \nGrpState:%s ChState:%s\n", 1778 dev->name, __FUNCTION__, ch->id, 1779 fsm_getstate_str(grp->fsm), 1780 fsm_getstate_str(ch->fsm)); 1781 1782 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); 1783 1784 return; 1785 } 1786 1787 /* 1788 * ctcmpc channel FSM action 1789 * called from several points in ctcmpc_ch_fsm 1790 * ctcmpc only 1791 */ 1792 static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg) 1793 { 1794 struct channel *ach = arg; 1795 struct net_device *dev = ach->netdev; 1796 struct ctcm_priv *priv = dev->priv; 1797 struct mpc_group *grp = priv->mpcg; 1798 struct channel *wch = priv->channel[WRITE]; 1799 struct channel *rch = priv->channel[READ]; 1800 struct sk_buff *skb; 1801 struct th_sweep *header; 1802 int rc = 0; 1803 unsigned long saveflags = 0; 1804 1805 if (do_debug) 1806 ctcm_pr_debug("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n", 1807 __FUNCTION__, smp_processor_id(), ach, ach->id); 1808 1809 if (grp->in_sweep == 0) 1810 goto done; 1811 1812 if (do_debug_data) { 1813 ctcm_pr_debug("ctcmpc: %s() 1: ToVTAM_th_seq= %08x\n" , 1814 __FUNCTION__, wch->th_seq_num); 1815 ctcm_pr_debug("ctcmpc: %s() 1: FromVTAM_th_seq= %08x\n" , 1816 __FUNCTION__, rch->th_seq_num); 1817 } 1818 1819 if (fsm_getstate(wch->fsm) != CTC_STATE_TXIDLE) { 1820 /* give the previous IO time to complete */ 1821 fsm_addtimer(&wch->sweep_timer, 1822 200, CTC_EVENT_RSWEEP_TIMER, wch); 1823 goto done; 1824 } 1825 1826 skb = skb_dequeue(&wch->sweep_queue); 1827 if (!skb) 1828 goto done; 1829 1830 if (set_normalized_cda(&wch->ccw[4], skb->data)) { 1831 grp->in_sweep = 0; 1832 ctcm_clear_busy_do(dev); 1833 dev_kfree_skb_any(skb); 1834 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); 1835 goto done; 1836 } else { 1837 atomic_inc(&skb->users); 1838 skb_queue_tail(&wch->io_queue, skb); 1839 } 1840 1841 /* send out the sweep */ 1842 wch->ccw[4].count = skb->len; 1843 1844 header = (struct th_sweep *)skb->data; 1845 switch (header->th.th_ch_flag) { 1846 case TH_SWEEP_REQ: 1847 grp->sweep_req_pend_num--; 1848 break; 1849 case TH_SWEEP_RESP: 1850 grp->sweep_rsp_pend_num--; 1851 break; 1852 } 1853 1854 header->sw.th_last_seq = wch->th_seq_num; 1855 1856 if (do_debug_ccw) 1857 ctcmpc_dumpit((char *)&wch->ccw[3], sizeof(struct ccw1) * 3); 1858 1859 ctcm_pr_debug("ctcmpc: %s() sweep packet\n", __FUNCTION__); 1860 ctcmpc_dumpit((char *)header, TH_SWEEP_LENGTH); 1861 1862 fsm_addtimer(&wch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, wch); 1863 fsm_newstate(wch->fsm, CTC_STATE_TX); 1864 1865 spin_lock_irqsave(get_ccwdev_lock(wch->cdev), saveflags); 1866 wch->prof.send_stamp = current_kernel_time(); /* xtime */ 1867 rc = ccw_device_start(wch->cdev, &wch->ccw[3], 1868 (unsigned long) wch, 0xff, 0); 1869 spin_unlock_irqrestore(get_ccwdev_lock(wch->cdev), saveflags); 1870 1871 if ((grp->sweep_req_pend_num == 0) && 1872 (grp->sweep_rsp_pend_num == 0)) { 1873 grp->in_sweep = 0; 1874 rch->th_seq_num = 0x00; 1875 wch->th_seq_num = 0x00; 1876 ctcm_clear_busy_do(dev); 1877 } 1878 1879 if (do_debug_data) { 1880 ctcm_pr_debug("ctcmpc: %s()2: ToVTAM_th_seq= %08x\n" , 1881 __FUNCTION__, wch->th_seq_num); 1882 ctcm_pr_debug("ctcmpc: %s()2: FromVTAM_th_seq= %08x\n" , 1883 __FUNCTION__, rch->th_seq_num); 1884 } 1885 1886 if (rc != 0) 1887 ctcm_ccw_check_rc(wch, rc, "send sweep"); 1888 1889 done: 1890 if (do_debug) 1891 ctcm_pr_debug("ctcmpc exit: %s() %s\n", __FUNCTION__, ach->id); 1892 return; 1893 } 1894 1895 1896 /* 1897 * The ctcmpc statemachine for a channel. 1898 */ 1899 1900 const fsm_node ctcmpc_ch_fsm[] = { 1901 { CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop }, 1902 { CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start }, 1903 { CTC_STATE_STOPPED, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1904 { CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1905 { CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop }, 1906 1907 { CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop }, 1908 { CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop }, 1909 { CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1910 { CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop }, 1911 { CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start }, 1912 { CTC_STATE_NOTOP, CTC_EVENT_UC_RCRESET, ctcm_chx_stop }, 1913 { CTC_STATE_NOTOP, CTC_EVENT_UC_RSRESET, ctcm_chx_stop }, 1914 { CTC_STATE_NOTOP, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1915 1916 { CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1917 { CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop }, 1918 { CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, 1919 { CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr }, 1920 { CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1921 { CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1922 1923 { CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio }, 1924 { CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode }, 1925 { CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, 1926 { CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1927 { CTC_STATE_STARTRETRY, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1928 1929 { CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1930 { CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop }, 1931 { CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio }, 1932 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1933 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1934 { CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode }, 1935 { CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1936 { CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1937 1938 { CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1939 { CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop }, 1940 { CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, ctcmpc_chx_rxidle }, 1941 { CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr }, 1942 { CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr }, 1943 { CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr }, 1944 { CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail }, 1945 { CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1946 { CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, ctcmpc_chx_firstio }, 1947 { CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1948 1949 { CH_XID0_PENDING, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1950 { CH_XID0_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1951 { CH_XID0_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio }, 1952 { CH_XID0_PENDING, CTC_EVENT_START, ctcm_action_nop }, 1953 { CH_XID0_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1954 { CH_XID0_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1955 { CH_XID0_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1956 { CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1957 { CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1958 { CH_XID0_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1959 1960 { CH_XID0_INPROGRESS, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1961 { CH_XID0_INPROGRESS, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1962 { CH_XID0_INPROGRESS, CTC_EVENT_STOP, ctcm_chx_haltio }, 1963 { CH_XID0_INPROGRESS, CTC_EVENT_START, ctcm_action_nop }, 1964 { CH_XID0_INPROGRESS, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1965 { CH_XID0_INPROGRESS, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1966 { CH_XID0_INPROGRESS, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1967 { CH_XID0_INPROGRESS, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1968 { CH_XID0_INPROGRESS, CTC_EVENT_ATTNBUSY, ctcmpc_chx_attnbusy }, 1969 { CH_XID0_INPROGRESS, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1970 { CH_XID0_INPROGRESS, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1971 1972 { CH_XID7_PENDING, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1973 { CH_XID7_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1974 { CH_XID7_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio }, 1975 { CH_XID7_PENDING, CTC_EVENT_START, ctcm_action_nop }, 1976 { CH_XID7_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1977 { CH_XID7_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1978 { CH_XID7_PENDING, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1979 { CH_XID7_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1980 { CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1981 { CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1982 { CH_XID7_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1983 { CH_XID7_PENDING, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1984 { CH_XID7_PENDING, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1985 1986 { CH_XID7_PENDING1, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1987 { CH_XID7_PENDING1, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1988 { CH_XID7_PENDING1, CTC_EVENT_STOP, ctcm_chx_haltio }, 1989 { CH_XID7_PENDING1, CTC_EVENT_START, ctcm_action_nop }, 1990 { CH_XID7_PENDING1, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1991 { CH_XID7_PENDING1, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1992 { CH_XID7_PENDING1, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1993 { CH_XID7_PENDING1, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1994 { CH_XID7_PENDING1, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1995 { CH_XID7_PENDING1, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1996 { CH_XID7_PENDING1, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1997 { CH_XID7_PENDING1, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1998 1999 { CH_XID7_PENDING2, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 2000 { CH_XID7_PENDING2, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 2001 { CH_XID7_PENDING2, CTC_EVENT_STOP, ctcm_chx_haltio }, 2002 { CH_XID7_PENDING2, CTC_EVENT_START, ctcm_action_nop }, 2003 { CH_XID7_PENDING2, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2004 { CH_XID7_PENDING2, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2005 { CH_XID7_PENDING2, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 2006 { CH_XID7_PENDING2, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 2007 { CH_XID7_PENDING2, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 2008 { CH_XID7_PENDING2, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 2009 { CH_XID7_PENDING2, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 2010 { CH_XID7_PENDING2, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 2011 2012 { CH_XID7_PENDING3, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 2013 { CH_XID7_PENDING3, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 2014 { CH_XID7_PENDING3, CTC_EVENT_STOP, ctcm_chx_haltio }, 2015 { CH_XID7_PENDING3, CTC_EVENT_START, ctcm_action_nop }, 2016 { CH_XID7_PENDING3, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2017 { CH_XID7_PENDING3, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2018 { CH_XID7_PENDING3, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 2019 { CH_XID7_PENDING3, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 2020 { CH_XID7_PENDING3, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 2021 { CH_XID7_PENDING3, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 2022 { CH_XID7_PENDING3, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 2023 { CH_XID7_PENDING3, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 2024 2025 { CH_XID7_PENDING4, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 2026 { CH_XID7_PENDING4, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 2027 { CH_XID7_PENDING4, CTC_EVENT_STOP, ctcm_chx_haltio }, 2028 { CH_XID7_PENDING4, CTC_EVENT_START, ctcm_action_nop }, 2029 { CH_XID7_PENDING4, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2030 { CH_XID7_PENDING4, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2031 { CH_XID7_PENDING4, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 2032 { CH_XID7_PENDING4, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 2033 { CH_XID7_PENDING4, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 2034 { CH_XID7_PENDING4, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 2035 { CH_XID7_PENDING4, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 2036 { CH_XID7_PENDING4, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 2037 2038 { CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, 2039 { CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop }, 2040 { CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 2041 { CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc }, 2042 { CTC_STATE_RXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail }, 2043 { CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2044 { CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2045 { CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 2046 2047 { CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 2048 { CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop }, 2049 { CTC_STATE_TXINIT, CTC_EVENT_FINSTAT, ctcm_chx_txidle }, 2050 { CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr }, 2051 { CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr }, 2052 { CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr }, 2053 { CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2054 { CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2055 { CTC_STATE_TXINIT, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep }, 2056 2057 { CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, 2058 { CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop }, 2059 { CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio }, 2060 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_fail }, 2061 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail }, 2062 { CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2063 { CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2064 { CTC_STATE_TXIDLE, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep }, 2065 2066 { CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop }, 2067 { CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart }, 2068 { CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped }, 2069 { CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, 2070 { CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, 2071 { CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2072 { CTC_STATE_TERM, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 2073 { CTC_STATE_TERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2074 2075 { CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio }, 2076 { CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart }, 2077 { CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, 2078 { CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, 2079 { CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, 2080 { CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2081 { CTC_STATE_DTERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2082 2083 { CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio }, 2084 { CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop }, 2085 { CTC_STATE_TX, CTC_EVENT_FINSTAT, ctcmpc_chx_txdone }, 2086 { CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_fail }, 2087 { CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_fail }, 2088 { CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry }, 2089 { CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2090 { CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2091 { CTC_STATE_TX, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep }, 2092 { CTC_STATE_TX, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 2093 2094 { CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, 2095 { CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, 2096 { CTC_STATE_TXERR, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2097 { CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2098 { CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2099 }; 2100 2101 int mpc_ch_fsm_len = ARRAY_SIZE(ctcmpc_ch_fsm); 2102 2103 /* 2104 * Actions for interface - statemachine. 2105 */ 2106 2107 /** 2108 * Startup channels by sending CTC_EVENT_START to each channel. 2109 * 2110 * fi An instance of an interface statemachine. 2111 * event The event, just happened. 2112 * arg Generic pointer, casted from struct net_device * upon call. 2113 */ 2114 static void dev_action_start(fsm_instance *fi, int event, void *arg) 2115 { 2116 struct net_device *dev = arg; 2117 struct ctcm_priv *priv = dev->priv; 2118 int direction; 2119 2120 CTCMY_DBF_DEV_NAME(SETUP, dev, ""); 2121 2122 fsm_deltimer(&priv->restart_timer); 2123 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); 2124 if (IS_MPC(priv)) 2125 priv->mpcg->channels_terminating = 0; 2126 for (direction = READ; direction <= WRITE; direction++) { 2127 struct channel *ch = priv->channel[direction]; 2128 fsm_event(ch->fsm, CTC_EVENT_START, ch); 2129 } 2130 } 2131 2132 /** 2133 * Shutdown channels by sending CTC_EVENT_STOP to each channel. 2134 * 2135 * fi An instance of an interface statemachine. 2136 * event The event, just happened. 2137 * arg Generic pointer, casted from struct net_device * upon call. 2138 */ 2139 static void dev_action_stop(fsm_instance *fi, int event, void *arg) 2140 { 2141 int direction; 2142 struct net_device *dev = arg; 2143 struct ctcm_priv *priv = dev->priv; 2144 2145 CTCMY_DBF_DEV_NAME(SETUP, dev, ""); 2146 2147 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); 2148 for (direction = READ; direction <= WRITE; direction++) { 2149 struct channel *ch = priv->channel[direction]; 2150 fsm_event(ch->fsm, CTC_EVENT_STOP, ch); 2151 ch->th_seq_num = 0x00; 2152 if (do_debug) 2153 ctcm_pr_debug("ctcm: %s() CH_th_seq= %08x\n", 2154 __FUNCTION__, ch->th_seq_num); 2155 } 2156 if (IS_MPC(priv)) 2157 fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET); 2158 } 2159 2160 static void dev_action_restart(fsm_instance *fi, int event, void *arg) 2161 { 2162 int restart_timer; 2163 struct net_device *dev = arg; 2164 struct ctcm_priv *priv = dev->priv; 2165 2166 CTCMY_DBF_DEV_NAME(TRACE, dev, ""); 2167 2168 if (IS_MPC(priv)) { 2169 ctcm_pr_info("ctcm: %s Restarting Device and " 2170 "MPC Group in 5 seconds\n", 2171 dev->name); 2172 restart_timer = CTCM_TIME_1_SEC; 2173 } else { 2174 ctcm_pr_info("%s: Restarting\n", dev->name); 2175 restart_timer = CTCM_TIME_5_SEC; 2176 } 2177 2178 dev_action_stop(fi, event, arg); 2179 fsm_event(priv->fsm, DEV_EVENT_STOP, dev); 2180 if (IS_MPC(priv)) 2181 fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET); 2182 2183 /* going back into start sequence too quickly can */ 2184 /* result in the other side becoming unreachable due */ 2185 /* to sense reported when IO is aborted */ 2186 fsm_addtimer(&priv->restart_timer, restart_timer, 2187 DEV_EVENT_START, dev); 2188 } 2189 2190 /** 2191 * Called from channel statemachine 2192 * when a channel is up and running. 2193 * 2194 * fi An instance of an interface statemachine. 2195 * event The event, just happened. 2196 * arg Generic pointer, casted from struct net_device * upon call. 2197 */ 2198 static void dev_action_chup(fsm_instance *fi, int event, void *arg) 2199 { 2200 struct net_device *dev = arg; 2201 struct ctcm_priv *priv = dev->priv; 2202 2203 CTCMY_DBF_DEV_NAME(SETUP, dev, ""); 2204 2205 switch (fsm_getstate(fi)) { 2206 case DEV_STATE_STARTWAIT_RXTX: 2207 if (event == DEV_EVENT_RXUP) 2208 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX); 2209 else 2210 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX); 2211 break; 2212 case DEV_STATE_STARTWAIT_RX: 2213 if (event == DEV_EVENT_RXUP) { 2214 fsm_newstate(fi, DEV_STATE_RUNNING); 2215 ctcm_pr_info("%s: connected with remote side\n", 2216 dev->name); 2217 ctcm_clear_busy(dev); 2218 } 2219 break; 2220 case DEV_STATE_STARTWAIT_TX: 2221 if (event == DEV_EVENT_TXUP) { 2222 fsm_newstate(fi, DEV_STATE_RUNNING); 2223 ctcm_pr_info("%s: connected with remote side\n", 2224 dev->name); 2225 ctcm_clear_busy(dev); 2226 } 2227 break; 2228 case DEV_STATE_STOPWAIT_TX: 2229 if (event == DEV_EVENT_RXUP) 2230 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); 2231 break; 2232 case DEV_STATE_STOPWAIT_RX: 2233 if (event == DEV_EVENT_TXUP) 2234 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); 2235 break; 2236 } 2237 2238 if (IS_MPC(priv)) { 2239 if (event == DEV_EVENT_RXUP) 2240 mpc_channel_action(priv->channel[READ], 2241 READ, MPC_CHANNEL_ADD); 2242 else 2243 mpc_channel_action(priv->channel[WRITE], 2244 WRITE, MPC_CHANNEL_ADD); 2245 } 2246 } 2247 2248 /** 2249 * Called from device statemachine 2250 * when a channel has been shutdown. 2251 * 2252 * fi An instance of an interface statemachine. 2253 * event The event, just happened. 2254 * arg Generic pointer, casted from struct net_device * upon call. 2255 */ 2256 static void dev_action_chdown(fsm_instance *fi, int event, void *arg) 2257 { 2258 2259 struct net_device *dev = arg; 2260 struct ctcm_priv *priv = dev->priv; 2261 2262 CTCMY_DBF_DEV_NAME(SETUP, dev, ""); 2263 2264 switch (fsm_getstate(fi)) { 2265 case DEV_STATE_RUNNING: 2266 if (event == DEV_EVENT_TXDOWN) 2267 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX); 2268 else 2269 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX); 2270 break; 2271 case DEV_STATE_STARTWAIT_RX: 2272 if (event == DEV_EVENT_TXDOWN) 2273 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); 2274 break; 2275 case DEV_STATE_STARTWAIT_TX: 2276 if (event == DEV_EVENT_RXDOWN) 2277 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); 2278 break; 2279 case DEV_STATE_STOPWAIT_RXTX: 2280 if (event == DEV_EVENT_TXDOWN) 2281 fsm_newstate(fi, DEV_STATE_STOPWAIT_RX); 2282 else 2283 fsm_newstate(fi, DEV_STATE_STOPWAIT_TX); 2284 break; 2285 case DEV_STATE_STOPWAIT_RX: 2286 if (event == DEV_EVENT_RXDOWN) 2287 fsm_newstate(fi, DEV_STATE_STOPPED); 2288 break; 2289 case DEV_STATE_STOPWAIT_TX: 2290 if (event == DEV_EVENT_TXDOWN) 2291 fsm_newstate(fi, DEV_STATE_STOPPED); 2292 break; 2293 } 2294 if (IS_MPC(priv)) { 2295 if (event == DEV_EVENT_RXDOWN) 2296 mpc_channel_action(priv->channel[READ], 2297 READ, MPC_CHANNEL_REMOVE); 2298 else 2299 mpc_channel_action(priv->channel[WRITE], 2300 WRITE, MPC_CHANNEL_REMOVE); 2301 } 2302 } 2303 2304 const fsm_node dev_fsm[] = { 2305 { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start }, 2306 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start }, 2307 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown }, 2308 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown }, 2309 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart }, 2310 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start }, 2311 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup }, 2312 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup }, 2313 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown }, 2314 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart }, 2315 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start }, 2316 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup }, 2317 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup }, 2318 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown }, 2319 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart }, 2320 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop }, 2321 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup }, 2322 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup }, 2323 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown }, 2324 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown }, 2325 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart }, 2326 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop }, 2327 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup }, 2328 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup }, 2329 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown }, 2330 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart }, 2331 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop }, 2332 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup }, 2333 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup }, 2334 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown }, 2335 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart }, 2336 { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop }, 2337 { DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown }, 2338 { DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown }, 2339 { DEV_STATE_RUNNING, DEV_EVENT_TXUP, ctcm_action_nop }, 2340 { DEV_STATE_RUNNING, DEV_EVENT_RXUP, ctcm_action_nop }, 2341 { DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart }, 2342 }; 2343 2344 int dev_fsm_len = ARRAY_SIZE(dev_fsm); 2345 2346 /* --- This is the END my friend --- */ 2347 2348