1 /* 2 * drivers/s390/net/ctcm_fsms.c 3 * 4 * Copyright IBM Corp. 2001, 2007 5 * Authors: Fritz Elfert (felfert@millenux.com) 6 * Peter Tiedemann (ptiedem@de.ibm.com) 7 * MPC additions : 8 * Belinda Thompson (belindat@us.ibm.com) 9 * Andy Richter (richtera@us.ibm.com) 10 */ 11 12 #undef DEBUG 13 #undef DEBUGDATA 14 #undef DEBUGCCW 15 16 #include <linux/module.h> 17 #include <linux/init.h> 18 #include <linux/kernel.h> 19 #include <linux/slab.h> 20 #include <linux/errno.h> 21 #include <linux/types.h> 22 #include <linux/interrupt.h> 23 #include <linux/timer.h> 24 #include <linux/bitops.h> 25 26 #include <linux/signal.h> 27 #include <linux/string.h> 28 29 #include <linux/ip.h> 30 #include <linux/if_arp.h> 31 #include <linux/tcp.h> 32 #include <linux/skbuff.h> 33 #include <linux/ctype.h> 34 #include <net/dst.h> 35 36 #include <linux/io.h> 37 #include <asm/ccwdev.h> 38 #include <asm/ccwgroup.h> 39 #include <linux/uaccess.h> 40 41 #include <asm/idals.h> 42 43 #include "fsm.h" 44 #include "cu3088.h" 45 46 #include "ctcm_dbug.h" 47 #include "ctcm_main.h" 48 #include "ctcm_fsms.h" 49 50 const char *dev_state_names[] = { 51 [DEV_STATE_STOPPED] = "Stopped", 52 [DEV_STATE_STARTWAIT_RXTX] = "StartWait RXTX", 53 [DEV_STATE_STARTWAIT_RX] = "StartWait RX", 54 [DEV_STATE_STARTWAIT_TX] = "StartWait TX", 55 [DEV_STATE_STOPWAIT_RXTX] = "StopWait RXTX", 56 [DEV_STATE_STOPWAIT_RX] = "StopWait RX", 57 [DEV_STATE_STOPWAIT_TX] = "StopWait TX", 58 [DEV_STATE_RUNNING] = "Running", 59 }; 60 61 const char *dev_event_names[] = { 62 [DEV_EVENT_START] = "Start", 63 [DEV_EVENT_STOP] = "Stop", 64 [DEV_EVENT_RXUP] = "RX up", 65 [DEV_EVENT_TXUP] = "TX up", 66 [DEV_EVENT_RXDOWN] = "RX down", 67 [DEV_EVENT_TXDOWN] = "TX down", 68 [DEV_EVENT_RESTART] = "Restart", 69 }; 70 71 const char *ctc_ch_event_names[] = { 72 [CTC_EVENT_IO_SUCCESS] = "ccw_device success", 73 [CTC_EVENT_IO_EBUSY] = "ccw_device busy", 74 [CTC_EVENT_IO_ENODEV] = "ccw_device enodev", 75 [CTC_EVENT_IO_UNKNOWN] = "ccw_device unknown", 76 [CTC_EVENT_ATTNBUSY] = "Status ATTN & BUSY", 77 [CTC_EVENT_ATTN] = "Status ATTN", 78 [CTC_EVENT_BUSY] = "Status BUSY", 79 [CTC_EVENT_UC_RCRESET] = "Unit check remote reset", 80 [CTC_EVENT_UC_RSRESET] = "Unit check remote system reset", 81 [CTC_EVENT_UC_TXTIMEOUT] = "Unit check TX timeout", 82 [CTC_EVENT_UC_TXPARITY] = "Unit check TX parity", 83 [CTC_EVENT_UC_HWFAIL] = "Unit check Hardware failure", 84 [CTC_EVENT_UC_RXPARITY] = "Unit check RX parity", 85 [CTC_EVENT_UC_ZERO] = "Unit check ZERO", 86 [CTC_EVENT_UC_UNKNOWN] = "Unit check Unknown", 87 [CTC_EVENT_SC_UNKNOWN] = "SubChannel check Unknown", 88 [CTC_EVENT_MC_FAIL] = "Machine check failure", 89 [CTC_EVENT_MC_GOOD] = "Machine check operational", 90 [CTC_EVENT_IRQ] = "IRQ normal", 91 [CTC_EVENT_FINSTAT] = "IRQ final", 92 [CTC_EVENT_TIMER] = "Timer", 93 [CTC_EVENT_START] = "Start", 94 [CTC_EVENT_STOP] = "Stop", 95 /* 96 * additional MPC events 97 */ 98 [CTC_EVENT_SEND_XID] = "XID Exchange", 99 [CTC_EVENT_RSWEEP_TIMER] = "MPC Group Sweep Timer", 100 }; 101 102 const char *ctc_ch_state_names[] = { 103 [CTC_STATE_IDLE] = "Idle", 104 [CTC_STATE_STOPPED] = "Stopped", 105 [CTC_STATE_STARTWAIT] = "StartWait", 106 [CTC_STATE_STARTRETRY] = "StartRetry", 107 [CTC_STATE_SETUPWAIT] = "SetupWait", 108 [CTC_STATE_RXINIT] = "RX init", 109 [CTC_STATE_TXINIT] = "TX init", 110 [CTC_STATE_RX] = "RX", 111 [CTC_STATE_TX] = "TX", 112 [CTC_STATE_RXIDLE] = "RX idle", 113 [CTC_STATE_TXIDLE] = "TX idle", 114 [CTC_STATE_RXERR] = "RX error", 115 [CTC_STATE_TXERR] = "TX error", 116 [CTC_STATE_TERM] = "Terminating", 117 [CTC_STATE_DTERM] = "Restarting", 118 [CTC_STATE_NOTOP] = "Not operational", 119 /* 120 * additional MPC states 121 */ 122 [CH_XID0_PENDING] = "Pending XID0 Start", 123 [CH_XID0_INPROGRESS] = "In XID0 Negotiations ", 124 [CH_XID7_PENDING] = "Pending XID7 P1 Start", 125 [CH_XID7_PENDING1] = "Active XID7 P1 Exchange ", 126 [CH_XID7_PENDING2] = "Pending XID7 P2 Start ", 127 [CH_XID7_PENDING3] = "Active XID7 P2 Exchange ", 128 [CH_XID7_PENDING4] = "XID7 Complete - Pending READY ", 129 }; 130 131 static void ctcm_action_nop(fsm_instance *fi, int event, void *arg); 132 133 /* 134 * ----- static ctcm actions for channel statemachine ----- 135 * 136 */ 137 static void chx_txdone(fsm_instance *fi, int event, void *arg); 138 static void chx_rx(fsm_instance *fi, int event, void *arg); 139 static void chx_rxidle(fsm_instance *fi, int event, void *arg); 140 static void chx_firstio(fsm_instance *fi, int event, void *arg); 141 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg); 142 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg); 143 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg); 144 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg); 145 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg); 146 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg); 147 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg); 148 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg); 149 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg); 150 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg); 151 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg); 152 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg); 153 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg); 154 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg); 155 156 /* 157 * ----- static ctcmpc actions for ctcmpc channel statemachine ----- 158 * 159 */ 160 static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg); 161 static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg); 162 static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg); 163 /* shared : 164 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg); 165 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg); 166 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg); 167 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg); 168 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg); 169 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg); 170 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg); 171 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg); 172 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg); 173 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg); 174 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg); 175 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg); 176 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg); 177 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg); 178 */ 179 static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg); 180 static void ctcmpc_chx_attnbusy(fsm_instance *, int, void *); 181 static void ctcmpc_chx_resend(fsm_instance *, int, void *); 182 static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg); 183 184 /** 185 * Check return code of a preceeding ccw_device call, halt_IO etc... 186 * 187 * ch : The channel, the error belongs to. 188 * Returns the error code (!= 0) to inspect. 189 */ 190 void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg) 191 { 192 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 193 "%s(%s): %s: %04x\n", 194 CTCM_FUNTAIL, ch->id, msg, rc); 195 switch (rc) { 196 case -EBUSY: 197 ctcm_pr_warn("%s (%s): Busy !\n", ch->id, msg); 198 fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch); 199 break; 200 case -ENODEV: 201 ctcm_pr_emerg("%s (%s): Invalid device called for IO\n", 202 ch->id, msg); 203 fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch); 204 break; 205 default: 206 ctcm_pr_emerg("%s (%s): Unknown error in do_IO %04x\n", 207 ch->id, msg, rc); 208 fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch); 209 } 210 } 211 212 void ctcm_purge_skb_queue(struct sk_buff_head *q) 213 { 214 struct sk_buff *skb; 215 216 CTCM_DBF_TEXT(TRACE, CTC_DBF_DEBUG, __func__); 217 218 while ((skb = skb_dequeue(q))) { 219 atomic_dec(&skb->users); 220 dev_kfree_skb_any(skb); 221 } 222 } 223 224 /** 225 * NOP action for statemachines 226 */ 227 static void ctcm_action_nop(fsm_instance *fi, int event, void *arg) 228 { 229 } 230 231 /* 232 * Actions for channel - statemachines. 233 */ 234 235 /** 236 * Normal data has been send. Free the corresponding 237 * skb (it's in io_queue), reset dev->tbusy and 238 * revert to idle state. 239 * 240 * fi An instance of a channel statemachine. 241 * event The event, just happened. 242 * arg Generic pointer, casted from channel * upon call. 243 */ 244 static void chx_txdone(fsm_instance *fi, int event, void *arg) 245 { 246 struct channel *ch = arg; 247 struct net_device *dev = ch->netdev; 248 struct ctcm_priv *priv = dev->priv; 249 struct sk_buff *skb; 250 int first = 1; 251 int i; 252 unsigned long duration; 253 struct timespec done_stamp = current_kernel_time(); /* xtime */ 254 255 CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name); 256 257 duration = 258 (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 + 259 (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000; 260 if (duration > ch->prof.tx_time) 261 ch->prof.tx_time = duration; 262 263 if (ch->irb->scsw.cmd.count != 0) 264 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, 265 "%s(%s): TX not complete, remaining %d bytes", 266 CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count); 267 fsm_deltimer(&ch->timer); 268 while ((skb = skb_dequeue(&ch->io_queue))) { 269 priv->stats.tx_packets++; 270 priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; 271 if (first) { 272 priv->stats.tx_bytes += 2; 273 first = 0; 274 } 275 atomic_dec(&skb->users); 276 dev_kfree_skb_irq(skb); 277 } 278 spin_lock(&ch->collect_lock); 279 clear_normalized_cda(&ch->ccw[4]); 280 if (ch->collect_len > 0) { 281 int rc; 282 283 if (ctcm_checkalloc_buffer(ch)) { 284 spin_unlock(&ch->collect_lock); 285 return; 286 } 287 ch->trans_skb->data = ch->trans_skb_data; 288 skb_reset_tail_pointer(ch->trans_skb); 289 ch->trans_skb->len = 0; 290 if (ch->prof.maxmulti < (ch->collect_len + 2)) 291 ch->prof.maxmulti = ch->collect_len + 2; 292 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue)) 293 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue); 294 *((__u16 *)skb_put(ch->trans_skb, 2)) = ch->collect_len + 2; 295 i = 0; 296 while ((skb = skb_dequeue(&ch->collect_queue))) { 297 skb_copy_from_linear_data(skb, 298 skb_put(ch->trans_skb, skb->len), skb->len); 299 priv->stats.tx_packets++; 300 priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; 301 atomic_dec(&skb->users); 302 dev_kfree_skb_irq(skb); 303 i++; 304 } 305 ch->collect_len = 0; 306 spin_unlock(&ch->collect_lock); 307 ch->ccw[1].count = ch->trans_skb->len; 308 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 309 ch->prof.send_stamp = current_kernel_time(); /* xtime */ 310 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 311 (unsigned long)ch, 0xff, 0); 312 ch->prof.doios_multi++; 313 if (rc != 0) { 314 priv->stats.tx_dropped += i; 315 priv->stats.tx_errors += i; 316 fsm_deltimer(&ch->timer); 317 ctcm_ccw_check_rc(ch, rc, "chained TX"); 318 } 319 } else { 320 spin_unlock(&ch->collect_lock); 321 fsm_newstate(fi, CTC_STATE_TXIDLE); 322 } 323 ctcm_clear_busy_do(dev); 324 } 325 326 /** 327 * Initial data is sent. 328 * Notify device statemachine that we are up and 329 * running. 330 * 331 * fi An instance of a channel statemachine. 332 * event The event, just happened. 333 * arg Generic pointer, casted from channel * upon call. 334 */ 335 void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg) 336 { 337 struct channel *ch = arg; 338 struct net_device *dev = ch->netdev; 339 struct ctcm_priv *priv = dev->priv; 340 341 CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name); 342 343 fsm_deltimer(&ch->timer); 344 fsm_newstate(fi, CTC_STATE_TXIDLE); 345 fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev); 346 } 347 348 /** 349 * Got normal data, check for sanity, queue it up, allocate new buffer 350 * trigger bottom half, and initiate next read. 351 * 352 * fi An instance of a channel statemachine. 353 * event The event, just happened. 354 * arg Generic pointer, casted from channel * upon call. 355 */ 356 static void chx_rx(fsm_instance *fi, int event, void *arg) 357 { 358 struct channel *ch = arg; 359 struct net_device *dev = ch->netdev; 360 struct ctcm_priv *priv = dev->priv; 361 int len = ch->max_bufsize - ch->irb->scsw.cmd.count; 362 struct sk_buff *skb = ch->trans_skb; 363 __u16 block_len = *((__u16 *)skb->data); 364 int check_len; 365 int rc; 366 367 fsm_deltimer(&ch->timer); 368 if (len < 8) { 369 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 370 "%s(%s): got packet with length %d < 8\n", 371 CTCM_FUNTAIL, dev->name, len); 372 priv->stats.rx_dropped++; 373 priv->stats.rx_length_errors++; 374 goto again; 375 } 376 if (len > ch->max_bufsize) { 377 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 378 "%s(%s): got packet with length %d > %d\n", 379 CTCM_FUNTAIL, dev->name, len, ch->max_bufsize); 380 priv->stats.rx_dropped++; 381 priv->stats.rx_length_errors++; 382 goto again; 383 } 384 385 /* 386 * VM TCP seems to have a bug sending 2 trailing bytes of garbage. 387 */ 388 switch (ch->protocol) { 389 case CTCM_PROTO_S390: 390 case CTCM_PROTO_OS390: 391 check_len = block_len + 2; 392 break; 393 default: 394 check_len = block_len; 395 break; 396 } 397 if ((len < block_len) || (len > check_len)) { 398 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 399 "%s(%s): got block length %d != rx length %d\n", 400 CTCM_FUNTAIL, dev->name, block_len, len); 401 if (do_debug) 402 ctcmpc_dump_skb(skb, 0); 403 404 *((__u16 *)skb->data) = len; 405 priv->stats.rx_dropped++; 406 priv->stats.rx_length_errors++; 407 goto again; 408 } 409 block_len -= 2; 410 if (block_len > 0) { 411 *((__u16 *)skb->data) = block_len; 412 ctcm_unpack_skb(ch, skb); 413 } 414 again: 415 skb->data = ch->trans_skb_data; 416 skb_reset_tail_pointer(skb); 417 skb->len = 0; 418 if (ctcm_checkalloc_buffer(ch)) 419 return; 420 ch->ccw[1].count = ch->max_bufsize; 421 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 422 (unsigned long)ch, 0xff, 0); 423 if (rc != 0) 424 ctcm_ccw_check_rc(ch, rc, "normal RX"); 425 } 426 427 /** 428 * Initialize connection by sending a __u16 of value 0. 429 * 430 * fi An instance of a channel statemachine. 431 * event The event, just happened. 432 * arg Generic pointer, casted from channel * upon call. 433 */ 434 static void chx_firstio(fsm_instance *fi, int event, void *arg) 435 { 436 int rc; 437 struct channel *ch = arg; 438 int fsmstate = fsm_getstate(fi); 439 440 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 441 "%s(%s) : %02x", 442 CTCM_FUNTAIL, ch->id, fsmstate); 443 444 ch->sense_rc = 0; /* reset unit check report control */ 445 if (fsmstate == CTC_STATE_TXIDLE) 446 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, 447 "%s(%s): remote side issued READ?, init.\n", 448 CTCM_FUNTAIL, ch->id); 449 fsm_deltimer(&ch->timer); 450 if (ctcm_checkalloc_buffer(ch)) 451 return; 452 if ((fsmstate == CTC_STATE_SETUPWAIT) && 453 (ch->protocol == CTCM_PROTO_OS390)) { 454 /* OS/390 resp. z/OS */ 455 if (CHANNEL_DIRECTION(ch->flags) == READ) { 456 *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN; 457 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, 458 CTC_EVENT_TIMER, ch); 459 chx_rxidle(fi, event, arg); 460 } else { 461 struct net_device *dev = ch->netdev; 462 struct ctcm_priv *priv = dev->priv; 463 fsm_newstate(fi, CTC_STATE_TXIDLE); 464 fsm_event(priv->fsm, DEV_EVENT_TXUP, dev); 465 } 466 return; 467 } 468 /* 469 * Don't setup a timer for receiving the initial RX frame 470 * if in compatibility mode, since VM TCP delays the initial 471 * frame until it has some data to send. 472 */ 473 if ((CHANNEL_DIRECTION(ch->flags) == WRITE) || 474 (ch->protocol != CTCM_PROTO_S390)) 475 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 476 477 *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN; 478 ch->ccw[1].count = 2; /* Transfer only length */ 479 480 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ) 481 ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); 482 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 483 (unsigned long)ch, 0xff, 0); 484 if (rc != 0) { 485 fsm_deltimer(&ch->timer); 486 fsm_newstate(fi, CTC_STATE_SETUPWAIT); 487 ctcm_ccw_check_rc(ch, rc, "init IO"); 488 } 489 /* 490 * If in compatibility mode since we don't setup a timer, we 491 * also signal RX channel up immediately. This enables us 492 * to send packets early which in turn usually triggers some 493 * reply from VM TCP which brings up the RX channel to it's 494 * final state. 495 */ 496 if ((CHANNEL_DIRECTION(ch->flags) == READ) && 497 (ch->protocol == CTCM_PROTO_S390)) { 498 struct net_device *dev = ch->netdev; 499 struct ctcm_priv *priv = dev->priv; 500 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); 501 } 502 } 503 504 /** 505 * Got initial data, check it. If OK, 506 * notify device statemachine that we are up and 507 * running. 508 * 509 * fi An instance of a channel statemachine. 510 * event The event, just happened. 511 * arg Generic pointer, casted from channel * upon call. 512 */ 513 static void chx_rxidle(fsm_instance *fi, int event, void *arg) 514 { 515 struct channel *ch = arg; 516 struct net_device *dev = ch->netdev; 517 struct ctcm_priv *priv = dev->priv; 518 __u16 buflen; 519 int rc; 520 521 fsm_deltimer(&ch->timer); 522 buflen = *((__u16 *)ch->trans_skb->data); 523 CTCM_PR_DEBUG("%s: %s: Initial RX count = %d\n", 524 __func__, dev->name, buflen); 525 526 if (buflen >= CTCM_INITIAL_BLOCKLEN) { 527 if (ctcm_checkalloc_buffer(ch)) 528 return; 529 ch->ccw[1].count = ch->max_bufsize; 530 fsm_newstate(fi, CTC_STATE_RXIDLE); 531 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 532 (unsigned long)ch, 0xff, 0); 533 if (rc != 0) { 534 fsm_newstate(fi, CTC_STATE_RXINIT); 535 ctcm_ccw_check_rc(ch, rc, "initial RX"); 536 } else 537 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); 538 } else { 539 CTCM_PR_DEBUG("%s: %s: Initial RX count %d not %d\n", 540 __func__, dev->name, 541 buflen, CTCM_INITIAL_BLOCKLEN); 542 chx_firstio(fi, event, arg); 543 } 544 } 545 546 /** 547 * Set channel into extended mode. 548 * 549 * fi An instance of a channel statemachine. 550 * event The event, just happened. 551 * arg Generic pointer, casted from channel * upon call. 552 */ 553 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg) 554 { 555 struct channel *ch = arg; 556 int rc; 557 unsigned long saveflags = 0; 558 int timeout = CTCM_TIME_5_SEC; 559 560 fsm_deltimer(&ch->timer); 561 if (IS_MPC(ch)) { 562 timeout = 1500; 563 CTCM_PR_DEBUG("enter %s: cp=%i ch=0x%p id=%s\n", 564 __func__, smp_processor_id(), ch, ch->id); 565 } 566 fsm_addtimer(&ch->timer, timeout, CTC_EVENT_TIMER, ch); 567 fsm_newstate(fi, CTC_STATE_SETUPWAIT); 568 CTCM_CCW_DUMP((char *)&ch->ccw[6], sizeof(struct ccw1) * 2); 569 570 if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */ 571 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 572 /* Such conditional locking is undeterministic in 573 * static view. => ignore sparse warnings here. */ 574 575 rc = ccw_device_start(ch->cdev, &ch->ccw[6], 576 (unsigned long)ch, 0xff, 0); 577 if (event == CTC_EVENT_TIMER) /* see above comments */ 578 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); 579 if (rc != 0) { 580 fsm_deltimer(&ch->timer); 581 fsm_newstate(fi, CTC_STATE_STARTWAIT); 582 ctcm_ccw_check_rc(ch, rc, "set Mode"); 583 } else 584 ch->retry = 0; 585 } 586 587 /** 588 * Setup channel. 589 * 590 * fi An instance of a channel statemachine. 591 * event The event, just happened. 592 * arg Generic pointer, casted from channel * upon call. 593 */ 594 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg) 595 { 596 struct channel *ch = arg; 597 unsigned long saveflags; 598 int rc; 599 600 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s", 601 CTCM_FUNTAIL, ch->id, 602 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); 603 604 if (ch->trans_skb != NULL) { 605 clear_normalized_cda(&ch->ccw[1]); 606 dev_kfree_skb(ch->trans_skb); 607 ch->trans_skb = NULL; 608 } 609 if (CHANNEL_DIRECTION(ch->flags) == READ) { 610 ch->ccw[1].cmd_code = CCW_CMD_READ; 611 ch->ccw[1].flags = CCW_FLAG_SLI; 612 ch->ccw[1].count = 0; 613 } else { 614 ch->ccw[1].cmd_code = CCW_CMD_WRITE; 615 ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC; 616 ch->ccw[1].count = 0; 617 } 618 if (ctcm_checkalloc_buffer(ch)) { 619 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, 620 "%s(%s): %s trans_skb alloc delayed " 621 "until first transfer", 622 CTCM_FUNTAIL, ch->id, 623 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX"); 624 } 625 ch->ccw[0].cmd_code = CCW_CMD_PREPARE; 626 ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC; 627 ch->ccw[0].count = 0; 628 ch->ccw[0].cda = 0; 629 ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */ 630 ch->ccw[2].flags = CCW_FLAG_SLI; 631 ch->ccw[2].count = 0; 632 ch->ccw[2].cda = 0; 633 memcpy(&ch->ccw[3], &ch->ccw[0], sizeof(struct ccw1) * 3); 634 ch->ccw[4].cda = 0; 635 ch->ccw[4].flags &= ~CCW_FLAG_IDA; 636 637 fsm_newstate(fi, CTC_STATE_STARTWAIT); 638 fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch); 639 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 640 rc = ccw_device_halt(ch->cdev, (unsigned long)ch); 641 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); 642 if (rc != 0) { 643 if (rc != -EBUSY) 644 fsm_deltimer(&ch->timer); 645 ctcm_ccw_check_rc(ch, rc, "initial HaltIO"); 646 } 647 } 648 649 /** 650 * Shutdown a channel. 651 * 652 * fi An instance of a channel statemachine. 653 * event The event, just happened. 654 * arg Generic pointer, casted from channel * upon call. 655 */ 656 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg) 657 { 658 struct channel *ch = arg; 659 unsigned long saveflags = 0; 660 int rc; 661 int oldstate; 662 663 fsm_deltimer(&ch->timer); 664 if (IS_MPC(ch)) 665 fsm_deltimer(&ch->sweep_timer); 666 667 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 668 669 if (event == CTC_EVENT_STOP) /* only for STOP not yet locked */ 670 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 671 /* Such conditional locking is undeterministic in 672 * static view. => ignore sparse warnings here. */ 673 oldstate = fsm_getstate(fi); 674 fsm_newstate(fi, CTC_STATE_TERM); 675 rc = ccw_device_halt(ch->cdev, (unsigned long)ch); 676 677 if (event == CTC_EVENT_STOP) 678 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); 679 /* see remark above about conditional locking */ 680 681 if (rc != 0 && rc != -EBUSY) { 682 fsm_deltimer(&ch->timer); 683 if (event != CTC_EVENT_STOP) { 684 fsm_newstate(fi, oldstate); 685 ctcm_ccw_check_rc(ch, rc, (char *)__func__); 686 } 687 } 688 } 689 690 /** 691 * Cleanup helper for chx_fail and chx_stopped 692 * cleanup channels queue and notify interface statemachine. 693 * 694 * fi An instance of a channel statemachine. 695 * state The next state (depending on caller). 696 * ch The channel to operate on. 697 */ 698 static void ctcm_chx_cleanup(fsm_instance *fi, int state, 699 struct channel *ch) 700 { 701 struct net_device *dev = ch->netdev; 702 struct ctcm_priv *priv = dev->priv; 703 704 CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE, 705 "%s(%s): %s[%d]\n", 706 CTCM_FUNTAIL, dev->name, ch->id, state); 707 708 fsm_deltimer(&ch->timer); 709 if (IS_MPC(ch)) 710 fsm_deltimer(&ch->sweep_timer); 711 712 fsm_newstate(fi, state); 713 if (state == CTC_STATE_STOPPED && ch->trans_skb != NULL) { 714 clear_normalized_cda(&ch->ccw[1]); 715 dev_kfree_skb_any(ch->trans_skb); 716 ch->trans_skb = NULL; 717 } 718 719 ch->th_seg = 0x00; 720 ch->th_seq_num = 0x00; 721 if (CHANNEL_DIRECTION(ch->flags) == READ) { 722 skb_queue_purge(&ch->io_queue); 723 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 724 } else { 725 ctcm_purge_skb_queue(&ch->io_queue); 726 if (IS_MPC(ch)) 727 ctcm_purge_skb_queue(&ch->sweep_queue); 728 spin_lock(&ch->collect_lock); 729 ctcm_purge_skb_queue(&ch->collect_queue); 730 ch->collect_len = 0; 731 spin_unlock(&ch->collect_lock); 732 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 733 } 734 } 735 736 /** 737 * A channel has successfully been halted. 738 * Cleanup it's queue and notify interface statemachine. 739 * 740 * fi An instance of a channel statemachine. 741 * event The event, just happened. 742 * arg Generic pointer, casted from channel * upon call. 743 */ 744 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg) 745 { 746 ctcm_chx_cleanup(fi, CTC_STATE_STOPPED, arg); 747 } 748 749 /** 750 * A stop command from device statemachine arrived and we are in 751 * not operational mode. Set state to stopped. 752 * 753 * fi An instance of a channel statemachine. 754 * event The event, just happened. 755 * arg Generic pointer, casted from channel * upon call. 756 */ 757 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg) 758 { 759 fsm_newstate(fi, CTC_STATE_STOPPED); 760 } 761 762 /** 763 * A machine check for no path, not operational status or gone device has 764 * happened. 765 * Cleanup queue and notify interface statemachine. 766 * 767 * fi An instance of a channel statemachine. 768 * event The event, just happened. 769 * arg Generic pointer, casted from channel * upon call. 770 */ 771 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg) 772 { 773 ctcm_chx_cleanup(fi, CTC_STATE_NOTOP, arg); 774 } 775 776 /** 777 * Handle error during setup of channel. 778 * 779 * fi An instance of a channel statemachine. 780 * event The event, just happened. 781 * arg Generic pointer, casted from channel * upon call. 782 */ 783 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg) 784 { 785 struct channel *ch = arg; 786 struct net_device *dev = ch->netdev; 787 struct ctcm_priv *priv = dev->priv; 788 789 /* 790 * Special case: Got UC_RCRESET on setmode. 791 * This means that remote side isn't setup. In this case 792 * simply retry after some 10 secs... 793 */ 794 if ((fsm_getstate(fi) == CTC_STATE_SETUPWAIT) && 795 ((event == CTC_EVENT_UC_RCRESET) || 796 (event == CTC_EVENT_UC_RSRESET))) { 797 fsm_newstate(fi, CTC_STATE_STARTRETRY); 798 fsm_deltimer(&ch->timer); 799 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 800 if (!IS_MPC(ch) && (CHANNEL_DIRECTION(ch->flags) == READ)) { 801 int rc = ccw_device_halt(ch->cdev, (unsigned long)ch); 802 if (rc != 0) 803 ctcm_ccw_check_rc(ch, rc, 804 "HaltIO in chx_setuperr"); 805 } 806 return; 807 } 808 809 CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT, 810 "%s(%s) : %s error during %s channel setup state=%s\n", 811 CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event], 812 (CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX", 813 fsm_getstate_str(fi)); 814 815 if (CHANNEL_DIRECTION(ch->flags) == READ) { 816 fsm_newstate(fi, CTC_STATE_RXERR); 817 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 818 } else { 819 fsm_newstate(fi, CTC_STATE_TXERR); 820 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 821 } 822 } 823 824 /** 825 * Restart a channel after an error. 826 * 827 * fi An instance of a channel statemachine. 828 * event The event, just happened. 829 * arg Generic pointer, casted from channel * upon call. 830 */ 831 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg) 832 { 833 struct channel *ch = arg; 834 struct net_device *dev = ch->netdev; 835 unsigned long saveflags = 0; 836 int oldstate; 837 int rc; 838 839 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 840 "%s: %s[%d] of %s\n", 841 CTCM_FUNTAIL, ch->id, event, dev->name); 842 843 fsm_deltimer(&ch->timer); 844 845 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 846 oldstate = fsm_getstate(fi); 847 fsm_newstate(fi, CTC_STATE_STARTWAIT); 848 if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */ 849 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 850 /* Such conditional locking is a known problem for 851 * sparse because its undeterministic in static view. 852 * Warnings should be ignored here. */ 853 rc = ccw_device_halt(ch->cdev, (unsigned long)ch); 854 if (event == CTC_EVENT_TIMER) 855 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); 856 if (rc != 0) { 857 if (rc != -EBUSY) { 858 fsm_deltimer(&ch->timer); 859 fsm_newstate(fi, oldstate); 860 } 861 ctcm_ccw_check_rc(ch, rc, "HaltIO in ctcm_chx_restart"); 862 } 863 } 864 865 /** 866 * Handle error during RX initial handshake (exchange of 867 * 0-length block header) 868 * 869 * fi An instance of a channel statemachine. 870 * event The event, just happened. 871 * arg Generic pointer, casted from channel * upon call. 872 */ 873 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg) 874 { 875 struct channel *ch = arg; 876 struct net_device *dev = ch->netdev; 877 struct ctcm_priv *priv = dev->priv; 878 879 if (event == CTC_EVENT_TIMER) { 880 if (!IS_MPCDEV(dev)) 881 /* TODO : check if MPC deletes timer somewhere */ 882 fsm_deltimer(&ch->timer); 883 if (ch->retry++ < 3) 884 ctcm_chx_restart(fi, event, arg); 885 else { 886 fsm_newstate(fi, CTC_STATE_RXERR); 887 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 888 } 889 } else 890 ctcm_pr_warn("%s: Error during RX init handshake\n", dev->name); 891 } 892 893 /** 894 * Notify device statemachine if we gave up initialization 895 * of RX channel. 896 * 897 * fi An instance of a channel statemachine. 898 * event The event, just happened. 899 * arg Generic pointer, casted from channel * upon call. 900 */ 901 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg) 902 { 903 struct channel *ch = arg; 904 struct net_device *dev = ch->netdev; 905 struct ctcm_priv *priv = dev->priv; 906 907 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 908 "%s(%s): RX %s busy, init. fail", 909 CTCM_FUNTAIL, dev->name, ch->id); 910 fsm_newstate(fi, CTC_STATE_RXERR); 911 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 912 } 913 914 /** 915 * Handle RX Unit check remote reset (remote disconnected) 916 * 917 * fi An instance of a channel statemachine. 918 * event The event, just happened. 919 * arg Generic pointer, casted from channel * upon call. 920 */ 921 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg) 922 { 923 struct channel *ch = arg; 924 struct channel *ch2; 925 struct net_device *dev = ch->netdev; 926 struct ctcm_priv *priv = dev->priv; 927 928 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 929 "%s: %s: remote disconnect - re-init ...", 930 CTCM_FUNTAIL, dev->name); 931 fsm_deltimer(&ch->timer); 932 /* 933 * Notify device statemachine 934 */ 935 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 936 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 937 938 fsm_newstate(fi, CTC_STATE_DTERM); 939 ch2 = priv->channel[WRITE]; 940 fsm_newstate(ch2->fsm, CTC_STATE_DTERM); 941 942 ccw_device_halt(ch->cdev, (unsigned long)ch); 943 ccw_device_halt(ch2->cdev, (unsigned long)ch2); 944 } 945 946 /** 947 * Handle error during TX channel initialization. 948 * 949 * fi An instance of a channel statemachine. 950 * event The event, just happened. 951 * arg Generic pointer, casted from channel * upon call. 952 */ 953 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg) 954 { 955 struct channel *ch = arg; 956 struct net_device *dev = ch->netdev; 957 struct ctcm_priv *priv = dev->priv; 958 959 if (event == CTC_EVENT_TIMER) { 960 fsm_deltimer(&ch->timer); 961 if (ch->retry++ < 3) 962 ctcm_chx_restart(fi, event, arg); 963 else { 964 fsm_newstate(fi, CTC_STATE_TXERR); 965 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 966 } 967 } else { 968 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 969 "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, 970 ctc_ch_event_names[event], fsm_getstate_str(fi)); 971 972 ctcm_pr_warn("%s: Error during TX init handshake\n", dev->name); 973 } 974 } 975 976 /** 977 * Handle TX timeout by retrying operation. 978 * 979 * fi An instance of a channel statemachine. 980 * event The event, just happened. 981 * arg Generic pointer, casted from channel * upon call. 982 */ 983 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg) 984 { 985 struct channel *ch = arg; 986 struct net_device *dev = ch->netdev; 987 struct ctcm_priv *priv = dev->priv; 988 struct sk_buff *skb; 989 990 CTCM_PR_DEBUG("Enter: %s: cp=%i ch=0x%p id=%s\n", 991 __func__, smp_processor_id(), ch, ch->id); 992 993 fsm_deltimer(&ch->timer); 994 if (ch->retry++ > 3) { 995 struct mpc_group *gptr = priv->mpcg; 996 CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO, 997 "%s: %s: retries exceeded", 998 CTCM_FUNTAIL, ch->id); 999 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 1000 /* call restart if not MPC or if MPC and mpcg fsm is ready. 1001 use gptr as mpc indicator */ 1002 if (!(gptr && (fsm_getstate(gptr->fsm) != MPCG_STATE_READY))) 1003 ctcm_chx_restart(fi, event, arg); 1004 goto done; 1005 } 1006 1007 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, 1008 "%s : %s: retry %d", 1009 CTCM_FUNTAIL, ch->id, ch->retry); 1010 skb = skb_peek(&ch->io_queue); 1011 if (skb) { 1012 int rc = 0; 1013 unsigned long saveflags = 0; 1014 clear_normalized_cda(&ch->ccw[4]); 1015 ch->ccw[4].count = skb->len; 1016 if (set_normalized_cda(&ch->ccw[4], skb->data)) { 1017 CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO, 1018 "%s: %s: IDAL alloc failed", 1019 CTCM_FUNTAIL, ch->id); 1020 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 1021 ctcm_chx_restart(fi, event, arg); 1022 goto done; 1023 } 1024 fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch); 1025 if (event == CTC_EVENT_TIMER) /* for TIMER not yet locked */ 1026 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 1027 /* Such conditional locking is a known problem for 1028 * sparse because its undeterministic in static view. 1029 * Warnings should be ignored here. */ 1030 if (do_debug_ccw) 1031 ctcmpc_dumpit((char *)&ch->ccw[3], 1032 sizeof(struct ccw1) * 3); 1033 1034 rc = ccw_device_start(ch->cdev, &ch->ccw[3], 1035 (unsigned long)ch, 0xff, 0); 1036 if (event == CTC_EVENT_TIMER) 1037 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), 1038 saveflags); 1039 if (rc != 0) { 1040 fsm_deltimer(&ch->timer); 1041 ctcm_ccw_check_rc(ch, rc, "TX in chx_txretry"); 1042 ctcm_purge_skb_queue(&ch->io_queue); 1043 } 1044 } 1045 done: 1046 return; 1047 } 1048 1049 /** 1050 * Handle fatal errors during an I/O command. 1051 * 1052 * fi An instance of a channel statemachine. 1053 * event The event, just happened. 1054 * arg Generic pointer, casted from channel * upon call. 1055 */ 1056 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg) 1057 { 1058 struct channel *ch = arg; 1059 struct net_device *dev = ch->netdev; 1060 struct ctcm_priv *priv = dev->priv; 1061 int rd = CHANNEL_DIRECTION(ch->flags); 1062 1063 fsm_deltimer(&ch->timer); 1064 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 1065 "%s: %s: %s unrecoverable channel error", 1066 CTCM_FUNTAIL, ch->id, rd == READ ? "RX" : "TX"); 1067 1068 if (IS_MPC(ch)) { 1069 priv->stats.tx_dropped++; 1070 priv->stats.tx_errors++; 1071 } 1072 if (rd == READ) { 1073 fsm_newstate(fi, CTC_STATE_RXERR); 1074 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 1075 } else { 1076 fsm_newstate(fi, CTC_STATE_TXERR); 1077 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 1078 } 1079 } 1080 1081 /* 1082 * The ctcm statemachine for a channel. 1083 */ 1084 const fsm_node ch_fsm[] = { 1085 { CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop }, 1086 { CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start }, 1087 { CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1088 { CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop }, 1089 1090 { CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop }, 1091 { CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop }, 1092 { CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1093 { CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop }, 1094 { CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start }, 1095 1096 { CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1097 { CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop }, 1098 { CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, 1099 { CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr }, 1100 { CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1101 { CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1102 1103 { CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio }, 1104 { CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode }, 1105 { CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1106 { CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1107 1108 { CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1109 { CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop }, 1110 { CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, chx_firstio }, 1111 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1112 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1113 { CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode }, 1114 { CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1115 { CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1116 1117 { CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1118 { CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop }, 1119 { CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, chx_rxidle }, 1120 { CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr }, 1121 { CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr }, 1122 { CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr }, 1123 { CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail }, 1124 { CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1125 { CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, chx_firstio }, 1126 { CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1127 1128 { CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, 1129 { CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop }, 1130 { CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, chx_rx }, 1131 { CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc }, 1132 { CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1133 { CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1134 { CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, chx_rx }, 1135 1136 { CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1137 { CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop }, 1138 { CTC_STATE_TXINIT, CTC_EVENT_FINSTAT, ctcm_chx_txidle }, 1139 { CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr }, 1140 { CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr }, 1141 { CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr }, 1142 { CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1143 { CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1144 1145 { CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, 1146 { CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop }, 1147 { CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, chx_firstio }, 1148 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, 1149 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, 1150 { CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1151 { CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1152 1153 { CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop }, 1154 { CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart }, 1155 { CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped }, 1156 { CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, 1157 { CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, 1158 { CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1159 1160 { CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio }, 1161 { CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart }, 1162 { CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, 1163 { CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, 1164 { CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, 1165 { CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1166 1167 { CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio }, 1168 { CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop }, 1169 { CTC_STATE_TX, CTC_EVENT_FINSTAT, chx_txdone }, 1170 { CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_txretry }, 1171 { CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_txretry }, 1172 { CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry }, 1173 { CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1174 { CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1175 1176 { CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, 1177 { CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, 1178 { CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1179 { CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1180 }; 1181 1182 int ch_fsm_len = ARRAY_SIZE(ch_fsm); 1183 1184 /* 1185 * MPC actions for mpc channel statemachine 1186 * handling of MPC protocol requires extra 1187 * statemachine and actions which are prefixed ctcmpc_ . 1188 * The ctc_ch_states and ctc_ch_state_names, 1189 * ctc_ch_events and ctc_ch_event_names share the ctcm definitions 1190 * which are expanded by some elements. 1191 */ 1192 1193 /* 1194 * Actions for mpc channel statemachine. 1195 */ 1196 1197 /** 1198 * Normal data has been send. Free the corresponding 1199 * skb (it's in io_queue), reset dev->tbusy and 1200 * revert to idle state. 1201 * 1202 * fi An instance of a channel statemachine. 1203 * event The event, just happened. 1204 * arg Generic pointer, casted from channel * upon call. 1205 */ 1206 static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg) 1207 { 1208 struct channel *ch = arg; 1209 struct net_device *dev = ch->netdev; 1210 struct ctcm_priv *priv = dev->priv; 1211 struct mpc_group *grp = priv->mpcg; 1212 struct sk_buff *skb; 1213 int first = 1; 1214 int i; 1215 __u32 data_space; 1216 unsigned long duration; 1217 struct sk_buff *peekskb; 1218 int rc; 1219 struct th_header *header; 1220 struct pdu *p_header; 1221 struct timespec done_stamp = current_kernel_time(); /* xtime */ 1222 1223 CTCM_PR_DEBUG("Enter %s: %s cp:%i\n", 1224 __func__, dev->name, smp_processor_id()); 1225 1226 duration = 1227 (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 + 1228 (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000; 1229 if (duration > ch->prof.tx_time) 1230 ch->prof.tx_time = duration; 1231 1232 if (ch->irb->scsw.cmd.count != 0) 1233 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, 1234 "%s(%s): TX not complete, remaining %d bytes", 1235 CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count); 1236 fsm_deltimer(&ch->timer); 1237 while ((skb = skb_dequeue(&ch->io_queue))) { 1238 priv->stats.tx_packets++; 1239 priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH; 1240 if (first) { 1241 priv->stats.tx_bytes += 2; 1242 first = 0; 1243 } 1244 atomic_dec(&skb->users); 1245 dev_kfree_skb_irq(skb); 1246 } 1247 spin_lock(&ch->collect_lock); 1248 clear_normalized_cda(&ch->ccw[4]); 1249 if ((ch->collect_len <= 0) || (grp->in_sweep != 0)) { 1250 spin_unlock(&ch->collect_lock); 1251 fsm_newstate(fi, CTC_STATE_TXIDLE); 1252 goto done; 1253 } 1254 1255 if (ctcm_checkalloc_buffer(ch)) { 1256 spin_unlock(&ch->collect_lock); 1257 goto done; 1258 } 1259 ch->trans_skb->data = ch->trans_skb_data; 1260 skb_reset_tail_pointer(ch->trans_skb); 1261 ch->trans_skb->len = 0; 1262 if (ch->prof.maxmulti < (ch->collect_len + TH_HEADER_LENGTH)) 1263 ch->prof.maxmulti = ch->collect_len + TH_HEADER_LENGTH; 1264 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue)) 1265 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue); 1266 i = 0; 1267 p_header = NULL; 1268 data_space = grp->group_max_buflen - TH_HEADER_LENGTH; 1269 1270 CTCM_PR_DBGDATA("%s: building trans_skb from collect_q" 1271 " data_space:%04x\n", 1272 __func__, data_space); 1273 1274 while ((skb = skb_dequeue(&ch->collect_queue))) { 1275 memcpy(skb_put(ch->trans_skb, skb->len), skb->data, skb->len); 1276 p_header = (struct pdu *) 1277 (skb_tail_pointer(ch->trans_skb) - skb->len); 1278 p_header->pdu_flag = 0x00; 1279 if (skb->protocol == ntohs(ETH_P_SNAP)) 1280 p_header->pdu_flag |= 0x60; 1281 else 1282 p_header->pdu_flag |= 0x20; 1283 1284 CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n", 1285 __func__, ch->trans_skb->len); 1286 CTCM_PR_DBGDATA("%s: pdu header and data for up" 1287 " to 32 bytes sent to vtam\n", __func__); 1288 CTCM_D3_DUMP((char *)p_header, min_t(int, skb->len, 32)); 1289 1290 ch->collect_len -= skb->len; 1291 data_space -= skb->len; 1292 priv->stats.tx_packets++; 1293 priv->stats.tx_bytes += skb->len; 1294 atomic_dec(&skb->users); 1295 dev_kfree_skb_any(skb); 1296 peekskb = skb_peek(&ch->collect_queue); 1297 if (peekskb->len > data_space) 1298 break; 1299 i++; 1300 } 1301 /* p_header points to the last one we handled */ 1302 if (p_header) 1303 p_header->pdu_flag |= PDU_LAST; /*Say it's the last one*/ 1304 header = kzalloc(TH_HEADER_LENGTH, gfp_type()); 1305 if (!header) { 1306 spin_unlock(&ch->collect_lock); 1307 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); 1308 goto done; 1309 } 1310 header->th_ch_flag = TH_HAS_PDU; /* Normal data */ 1311 ch->th_seq_num++; 1312 header->th_seq_num = ch->th_seq_num; 1313 1314 CTCM_PR_DBGDATA("%s: ToVTAM_th_seq= %08x\n" , 1315 __func__, ch->th_seq_num); 1316 1317 memcpy(skb_push(ch->trans_skb, TH_HEADER_LENGTH), header, 1318 TH_HEADER_LENGTH); /* put the TH on the packet */ 1319 1320 kfree(header); 1321 1322 CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n", 1323 __func__, ch->trans_skb->len); 1324 CTCM_PR_DBGDATA("%s: up-to-50 bytes of trans_skb " 1325 "data to vtam from collect_q\n", __func__); 1326 CTCM_D3_DUMP((char *)ch->trans_skb->data, 1327 min_t(int, ch->trans_skb->len, 50)); 1328 1329 spin_unlock(&ch->collect_lock); 1330 clear_normalized_cda(&ch->ccw[1]); 1331 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) { 1332 dev_kfree_skb_any(ch->trans_skb); 1333 ch->trans_skb = NULL; 1334 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR, 1335 "%s: %s: IDAL alloc failed", 1336 CTCM_FUNTAIL, ch->id); 1337 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); 1338 return; 1339 } 1340 ch->ccw[1].count = ch->trans_skb->len; 1341 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 1342 ch->prof.send_stamp = current_kernel_time(); /* xtime */ 1343 if (do_debug_ccw) 1344 ctcmpc_dumpit((char *)&ch->ccw[0], sizeof(struct ccw1) * 3); 1345 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 1346 (unsigned long)ch, 0xff, 0); 1347 ch->prof.doios_multi++; 1348 if (rc != 0) { 1349 priv->stats.tx_dropped += i; 1350 priv->stats.tx_errors += i; 1351 fsm_deltimer(&ch->timer); 1352 ctcm_ccw_check_rc(ch, rc, "chained TX"); 1353 } 1354 done: 1355 ctcm_clear_busy(dev); 1356 return; 1357 } 1358 1359 /** 1360 * Got normal data, check for sanity, queue it up, allocate new buffer 1361 * trigger bottom half, and initiate next read. 1362 * 1363 * fi An instance of a channel statemachine. 1364 * event The event, just happened. 1365 * arg Generic pointer, casted from channel * upon call. 1366 */ 1367 static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg) 1368 { 1369 struct channel *ch = arg; 1370 struct net_device *dev = ch->netdev; 1371 struct ctcm_priv *priv = dev->priv; 1372 struct mpc_group *grp = priv->mpcg; 1373 struct sk_buff *skb = ch->trans_skb; 1374 struct sk_buff *new_skb; 1375 unsigned long saveflags = 0; /* avoids compiler warning */ 1376 int len = ch->max_bufsize - ch->irb->scsw.cmd.count; 1377 1378 CTCM_PR_DEBUG("%s: %s: cp:%i %s maxbuf : %04x, len: %04x\n", 1379 CTCM_FUNTAIL, dev->name, smp_processor_id(), 1380 ch->id, ch->max_bufsize, len); 1381 fsm_deltimer(&ch->timer); 1382 1383 if (skb == NULL) { 1384 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, 1385 "%s(%s): TRANS_SKB = NULL", 1386 CTCM_FUNTAIL, dev->name); 1387 goto again; 1388 } 1389 1390 if (len < TH_HEADER_LENGTH) { 1391 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, 1392 "%s(%s): packet length %d to short", 1393 CTCM_FUNTAIL, dev->name, len); 1394 priv->stats.rx_dropped++; 1395 priv->stats.rx_length_errors++; 1396 } else { 1397 /* must have valid th header or game over */ 1398 __u32 block_len = len; 1399 len = TH_HEADER_LENGTH + XID2_LENGTH + 4; 1400 new_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC); 1401 1402 if (new_skb == NULL) { 1403 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, 1404 "%s(%d): skb allocation failed", 1405 CTCM_FUNTAIL, dev->name); 1406 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); 1407 goto again; 1408 } 1409 switch (fsm_getstate(grp->fsm)) { 1410 case MPCG_STATE_RESET: 1411 case MPCG_STATE_INOP: 1412 dev_kfree_skb_any(new_skb); 1413 break; 1414 case MPCG_STATE_FLOWC: 1415 case MPCG_STATE_READY: 1416 memcpy(skb_put(new_skb, block_len), 1417 skb->data, block_len); 1418 skb_queue_tail(&ch->io_queue, new_skb); 1419 tasklet_schedule(&ch->ch_tasklet); 1420 break; 1421 default: 1422 memcpy(skb_put(new_skb, len), skb->data, len); 1423 skb_queue_tail(&ch->io_queue, new_skb); 1424 tasklet_hi_schedule(&ch->ch_tasklet); 1425 break; 1426 } 1427 } 1428 1429 again: 1430 switch (fsm_getstate(grp->fsm)) { 1431 int rc, dolock; 1432 case MPCG_STATE_FLOWC: 1433 case MPCG_STATE_READY: 1434 if (ctcm_checkalloc_buffer(ch)) 1435 break; 1436 ch->trans_skb->data = ch->trans_skb_data; 1437 skb_reset_tail_pointer(ch->trans_skb); 1438 ch->trans_skb->len = 0; 1439 ch->ccw[1].count = ch->max_bufsize; 1440 if (do_debug_ccw) 1441 ctcmpc_dumpit((char *)&ch->ccw[0], 1442 sizeof(struct ccw1) * 3); 1443 dolock = !in_irq(); 1444 if (dolock) 1445 spin_lock_irqsave( 1446 get_ccwdev_lock(ch->cdev), saveflags); 1447 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 1448 (unsigned long)ch, 0xff, 0); 1449 if (dolock) /* see remark about conditional locking */ 1450 spin_unlock_irqrestore( 1451 get_ccwdev_lock(ch->cdev), saveflags); 1452 if (rc != 0) 1453 ctcm_ccw_check_rc(ch, rc, "normal RX"); 1454 default: 1455 break; 1456 } 1457 1458 CTCM_PR_DEBUG("Exit %s: %s, ch=0x%p, id=%s\n", 1459 __func__, dev->name, ch, ch->id); 1460 1461 } 1462 1463 /** 1464 * Initialize connection by sending a __u16 of value 0. 1465 * 1466 * fi An instance of a channel statemachine. 1467 * event The event, just happened. 1468 * arg Generic pointer, casted from channel * upon call. 1469 */ 1470 static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg) 1471 { 1472 struct channel *ch = arg; 1473 struct net_device *dev = ch->netdev; 1474 struct ctcm_priv *priv = dev->priv; 1475 struct mpc_group *gptr = priv->mpcg; 1476 1477 CTCM_PR_DEBUG("Enter %s: id=%s, ch=0x%p\n", 1478 __func__, ch->id, ch); 1479 1480 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_INFO, 1481 "%s: %s: chstate:%i, grpstate:%i, prot:%i\n", 1482 CTCM_FUNTAIL, ch->id, fsm_getstate(fi), 1483 fsm_getstate(gptr->fsm), ch->protocol); 1484 1485 if (fsm_getstate(fi) == CTC_STATE_TXIDLE) 1486 MPC_DBF_DEV_NAME(TRACE, dev, "remote side issued READ? "); 1487 1488 fsm_deltimer(&ch->timer); 1489 if (ctcm_checkalloc_buffer(ch)) 1490 goto done; 1491 1492 switch (fsm_getstate(fi)) { 1493 case CTC_STATE_STARTRETRY: 1494 case CTC_STATE_SETUPWAIT: 1495 if (CHANNEL_DIRECTION(ch->flags) == READ) { 1496 ctcmpc_chx_rxidle(fi, event, arg); 1497 } else { 1498 fsm_newstate(fi, CTC_STATE_TXIDLE); 1499 fsm_event(priv->fsm, DEV_EVENT_TXUP, dev); 1500 } 1501 goto done; 1502 default: 1503 break; 1504 }; 1505 1506 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ) 1507 ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); 1508 1509 done: 1510 CTCM_PR_DEBUG("Exit %s: id=%s, ch=0x%p\n", 1511 __func__, ch->id, ch); 1512 return; 1513 } 1514 1515 /** 1516 * Got initial data, check it. If OK, 1517 * notify device statemachine that we are up and 1518 * running. 1519 * 1520 * fi An instance of a channel statemachine. 1521 * event The event, just happened. 1522 * arg Generic pointer, casted from channel * upon call. 1523 */ 1524 void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg) 1525 { 1526 struct channel *ch = arg; 1527 struct net_device *dev = ch->netdev; 1528 struct ctcm_priv *priv = dev->priv; 1529 struct mpc_group *grp = priv->mpcg; 1530 int rc; 1531 unsigned long saveflags = 0; /* avoids compiler warning */ 1532 1533 fsm_deltimer(&ch->timer); 1534 CTCM_PR_DEBUG("%s: %s: %s: cp:%i, chstate:%i grpstate:%i\n", 1535 __func__, ch->id, dev->name, smp_processor_id(), 1536 fsm_getstate(fi), fsm_getstate(grp->fsm)); 1537 1538 fsm_newstate(fi, CTC_STATE_RXIDLE); 1539 /* XID processing complete */ 1540 1541 switch (fsm_getstate(grp->fsm)) { 1542 case MPCG_STATE_FLOWC: 1543 case MPCG_STATE_READY: 1544 if (ctcm_checkalloc_buffer(ch)) 1545 goto done; 1546 ch->trans_skb->data = ch->trans_skb_data; 1547 skb_reset_tail_pointer(ch->trans_skb); 1548 ch->trans_skb->len = 0; 1549 ch->ccw[1].count = ch->max_bufsize; 1550 CTCM_CCW_DUMP((char *)&ch->ccw[0], sizeof(struct ccw1) * 3); 1551 if (event == CTC_EVENT_START) 1552 /* see remark about conditional locking */ 1553 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 1554 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 1555 (unsigned long)ch, 0xff, 0); 1556 if (event == CTC_EVENT_START) 1557 spin_unlock_irqrestore( 1558 get_ccwdev_lock(ch->cdev), saveflags); 1559 if (rc != 0) { 1560 fsm_newstate(fi, CTC_STATE_RXINIT); 1561 ctcm_ccw_check_rc(ch, rc, "initial RX"); 1562 goto done; 1563 } 1564 break; 1565 default: 1566 break; 1567 } 1568 1569 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); 1570 done: 1571 return; 1572 } 1573 1574 /* 1575 * ctcmpc channel FSM action 1576 * called from several points in ctcmpc_ch_fsm 1577 * ctcmpc only 1578 */ 1579 static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg) 1580 { 1581 struct channel *ch = arg; 1582 struct net_device *dev = ch->netdev; 1583 struct ctcm_priv *priv = dev->priv; 1584 struct mpc_group *grp = priv->mpcg; 1585 1586 CTCM_PR_DEBUG("%s(%s): %s(ch=0x%p), cp=%i, ChStat:%s, GrpStat:%s\n", 1587 __func__, dev->name, ch->id, ch, smp_processor_id(), 1588 fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm)); 1589 1590 switch (fsm_getstate(grp->fsm)) { 1591 case MPCG_STATE_XID2INITW: 1592 /* ok..start yside xid exchanges */ 1593 if (!ch->in_mpcgroup) 1594 break; 1595 if (fsm_getstate(ch->fsm) == CH_XID0_PENDING) { 1596 fsm_deltimer(&grp->timer); 1597 fsm_addtimer(&grp->timer, 1598 MPC_XID_TIMEOUT_VALUE, 1599 MPCG_EVENT_TIMER, dev); 1600 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); 1601 1602 } else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1) 1603 /* attn rcvd before xid0 processed via bh */ 1604 fsm_newstate(ch->fsm, CH_XID7_PENDING1); 1605 break; 1606 case MPCG_STATE_XID2INITX: 1607 case MPCG_STATE_XID0IOWAIT: 1608 case MPCG_STATE_XID0IOWAIX: 1609 /* attn rcvd before xid0 processed on ch 1610 but mid-xid0 processing for group */ 1611 if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1) 1612 fsm_newstate(ch->fsm, CH_XID7_PENDING1); 1613 break; 1614 case MPCG_STATE_XID7INITW: 1615 case MPCG_STATE_XID7INITX: 1616 case MPCG_STATE_XID7INITI: 1617 case MPCG_STATE_XID7INITZ: 1618 switch (fsm_getstate(ch->fsm)) { 1619 case CH_XID7_PENDING: 1620 fsm_newstate(ch->fsm, CH_XID7_PENDING1); 1621 break; 1622 case CH_XID7_PENDING2: 1623 fsm_newstate(ch->fsm, CH_XID7_PENDING3); 1624 break; 1625 } 1626 fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev); 1627 break; 1628 } 1629 1630 return; 1631 } 1632 1633 /* 1634 * ctcmpc channel FSM action 1635 * called from one point in ctcmpc_ch_fsm 1636 * ctcmpc only 1637 */ 1638 static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg) 1639 { 1640 struct channel *ch = arg; 1641 struct net_device *dev = ch->netdev; 1642 struct ctcm_priv *priv = dev->priv; 1643 struct mpc_group *grp = priv->mpcg; 1644 1645 CTCM_PR_DEBUG("%s(%s): %s\n ChState:%s GrpState:%s\n", 1646 __func__, dev->name, ch->id, 1647 fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm)); 1648 1649 fsm_deltimer(&ch->timer); 1650 1651 switch (fsm_getstate(grp->fsm)) { 1652 case MPCG_STATE_XID0IOWAIT: 1653 /* vtam wants to be primary.start yside xid exchanges*/ 1654 /* only receive one attn-busy at a time so must not */ 1655 /* change state each time */ 1656 grp->changed_side = 1; 1657 fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW); 1658 break; 1659 case MPCG_STATE_XID2INITW: 1660 if (grp->changed_side == 1) { 1661 grp->changed_side = 2; 1662 break; 1663 } 1664 /* process began via call to establish_conn */ 1665 /* so must report failure instead of reverting */ 1666 /* back to ready-for-xid passive state */ 1667 if (grp->estconnfunc) 1668 goto done; 1669 /* this attnbusy is NOT the result of xside xid */ 1670 /* collisions so yside must have been triggered */ 1671 /* by an ATTN that was not intended to start XID */ 1672 /* processing. Revert back to ready-for-xid and */ 1673 /* wait for ATTN interrupt to signal xid start */ 1674 if (fsm_getstate(ch->fsm) == CH_XID0_INPROGRESS) { 1675 fsm_newstate(ch->fsm, CH_XID0_PENDING) ; 1676 fsm_deltimer(&grp->timer); 1677 goto done; 1678 } 1679 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); 1680 goto done; 1681 case MPCG_STATE_XID2INITX: 1682 /* XID2 was received before ATTN Busy for second 1683 channel.Send yside xid for second channel. 1684 */ 1685 if (grp->changed_side == 1) { 1686 grp->changed_side = 2; 1687 break; 1688 } 1689 case MPCG_STATE_XID0IOWAIX: 1690 case MPCG_STATE_XID7INITW: 1691 case MPCG_STATE_XID7INITX: 1692 case MPCG_STATE_XID7INITI: 1693 case MPCG_STATE_XID7INITZ: 1694 default: 1695 /* multiple attn-busy indicates too out-of-sync */ 1696 /* and they are certainly not being received as part */ 1697 /* of valid mpc group negotiations.. */ 1698 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); 1699 goto done; 1700 } 1701 1702 if (grp->changed_side == 1) { 1703 fsm_deltimer(&grp->timer); 1704 fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE, 1705 MPCG_EVENT_TIMER, dev); 1706 } 1707 if (ch->in_mpcgroup) 1708 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); 1709 else 1710 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, 1711 "%s(%s): channel %s not added to group", 1712 CTCM_FUNTAIL, dev->name, ch->id); 1713 1714 done: 1715 return; 1716 } 1717 1718 /* 1719 * ctcmpc channel FSM action 1720 * called from several points in ctcmpc_ch_fsm 1721 * ctcmpc only 1722 */ 1723 static void ctcmpc_chx_resend(fsm_instance *fsm, int event, void *arg) 1724 { 1725 struct channel *ch = arg; 1726 struct net_device *dev = ch->netdev; 1727 struct ctcm_priv *priv = dev->priv; 1728 struct mpc_group *grp = priv->mpcg; 1729 1730 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); 1731 return; 1732 } 1733 1734 /* 1735 * ctcmpc channel FSM action 1736 * called from several points in ctcmpc_ch_fsm 1737 * ctcmpc only 1738 */ 1739 static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg) 1740 { 1741 struct channel *ach = arg; 1742 struct net_device *dev = ach->netdev; 1743 struct ctcm_priv *priv = dev->priv; 1744 struct mpc_group *grp = priv->mpcg; 1745 struct channel *wch = priv->channel[WRITE]; 1746 struct channel *rch = priv->channel[READ]; 1747 struct sk_buff *skb; 1748 struct th_sweep *header; 1749 int rc = 0; 1750 unsigned long saveflags = 0; 1751 1752 CTCM_PR_DEBUG("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n", 1753 __func__, smp_processor_id(), ach, ach->id); 1754 1755 if (grp->in_sweep == 0) 1756 goto done; 1757 1758 CTCM_PR_DBGDATA("%s: 1: ToVTAM_th_seq= %08x\n" , 1759 __func__, wch->th_seq_num); 1760 CTCM_PR_DBGDATA("%s: 1: FromVTAM_th_seq= %08x\n" , 1761 __func__, rch->th_seq_num); 1762 1763 if (fsm_getstate(wch->fsm) != CTC_STATE_TXIDLE) { 1764 /* give the previous IO time to complete */ 1765 fsm_addtimer(&wch->sweep_timer, 1766 200, CTC_EVENT_RSWEEP_TIMER, wch); 1767 goto done; 1768 } 1769 1770 skb = skb_dequeue(&wch->sweep_queue); 1771 if (!skb) 1772 goto done; 1773 1774 if (set_normalized_cda(&wch->ccw[4], skb->data)) { 1775 grp->in_sweep = 0; 1776 ctcm_clear_busy_do(dev); 1777 dev_kfree_skb_any(skb); 1778 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); 1779 goto done; 1780 } else { 1781 atomic_inc(&skb->users); 1782 skb_queue_tail(&wch->io_queue, skb); 1783 } 1784 1785 /* send out the sweep */ 1786 wch->ccw[4].count = skb->len; 1787 1788 header = (struct th_sweep *)skb->data; 1789 switch (header->th.th_ch_flag) { 1790 case TH_SWEEP_REQ: 1791 grp->sweep_req_pend_num--; 1792 break; 1793 case TH_SWEEP_RESP: 1794 grp->sweep_rsp_pend_num--; 1795 break; 1796 } 1797 1798 header->sw.th_last_seq = wch->th_seq_num; 1799 1800 CTCM_CCW_DUMP((char *)&wch->ccw[3], sizeof(struct ccw1) * 3); 1801 CTCM_PR_DBGDATA("%s: sweep packet\n", __func__); 1802 CTCM_D3_DUMP((char *)header, TH_SWEEP_LENGTH); 1803 1804 fsm_addtimer(&wch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, wch); 1805 fsm_newstate(wch->fsm, CTC_STATE_TX); 1806 1807 spin_lock_irqsave(get_ccwdev_lock(wch->cdev), saveflags); 1808 wch->prof.send_stamp = current_kernel_time(); /* xtime */ 1809 rc = ccw_device_start(wch->cdev, &wch->ccw[3], 1810 (unsigned long) wch, 0xff, 0); 1811 spin_unlock_irqrestore(get_ccwdev_lock(wch->cdev), saveflags); 1812 1813 if ((grp->sweep_req_pend_num == 0) && 1814 (grp->sweep_rsp_pend_num == 0)) { 1815 grp->in_sweep = 0; 1816 rch->th_seq_num = 0x00; 1817 wch->th_seq_num = 0x00; 1818 ctcm_clear_busy_do(dev); 1819 } 1820 1821 CTCM_PR_DBGDATA("%s: To-/From-VTAM_th_seq = %08x/%08x\n" , 1822 __func__, wch->th_seq_num, rch->th_seq_num); 1823 1824 if (rc != 0) 1825 ctcm_ccw_check_rc(wch, rc, "send sweep"); 1826 1827 done: 1828 return; 1829 } 1830 1831 1832 /* 1833 * The ctcmpc statemachine for a channel. 1834 */ 1835 1836 const fsm_node ctcmpc_ch_fsm[] = { 1837 { CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop }, 1838 { CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start }, 1839 { CTC_STATE_STOPPED, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1840 { CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1841 { CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop }, 1842 1843 { CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop }, 1844 { CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop }, 1845 { CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1846 { CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop }, 1847 { CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start }, 1848 { CTC_STATE_NOTOP, CTC_EVENT_UC_RCRESET, ctcm_chx_stop }, 1849 { CTC_STATE_NOTOP, CTC_EVENT_UC_RSRESET, ctcm_chx_stop }, 1850 { CTC_STATE_NOTOP, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1851 1852 { CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1853 { CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop }, 1854 { CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, 1855 { CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr }, 1856 { CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1857 { CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1858 1859 { CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio }, 1860 { CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode }, 1861 { CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, 1862 { CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1863 { CTC_STATE_STARTRETRY, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1864 1865 { CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1866 { CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop }, 1867 { CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio }, 1868 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1869 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1870 { CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode }, 1871 { CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1872 { CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1873 1874 { CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1875 { CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop }, 1876 { CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, ctcmpc_chx_rxidle }, 1877 { CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr }, 1878 { CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr }, 1879 { CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr }, 1880 { CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail }, 1881 { CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1882 { CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, ctcmpc_chx_firstio }, 1883 { CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1884 1885 { CH_XID0_PENDING, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1886 { CH_XID0_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1887 { CH_XID0_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio }, 1888 { CH_XID0_PENDING, CTC_EVENT_START, ctcm_action_nop }, 1889 { CH_XID0_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1890 { CH_XID0_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1891 { CH_XID0_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1892 { CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1893 { CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1894 { CH_XID0_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1895 1896 { CH_XID0_INPROGRESS, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1897 { CH_XID0_INPROGRESS, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1898 { CH_XID0_INPROGRESS, CTC_EVENT_STOP, ctcm_chx_haltio }, 1899 { CH_XID0_INPROGRESS, CTC_EVENT_START, ctcm_action_nop }, 1900 { CH_XID0_INPROGRESS, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1901 { CH_XID0_INPROGRESS, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1902 { CH_XID0_INPROGRESS, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1903 { CH_XID0_INPROGRESS, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1904 { CH_XID0_INPROGRESS, CTC_EVENT_ATTNBUSY, ctcmpc_chx_attnbusy }, 1905 { CH_XID0_INPROGRESS, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1906 { CH_XID0_INPROGRESS, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1907 1908 { CH_XID7_PENDING, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1909 { CH_XID7_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1910 { CH_XID7_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio }, 1911 { CH_XID7_PENDING, CTC_EVENT_START, ctcm_action_nop }, 1912 { CH_XID7_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1913 { CH_XID7_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1914 { CH_XID7_PENDING, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1915 { CH_XID7_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1916 { CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1917 { CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1918 { CH_XID7_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1919 { CH_XID7_PENDING, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1920 { CH_XID7_PENDING, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1921 1922 { CH_XID7_PENDING1, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1923 { CH_XID7_PENDING1, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1924 { CH_XID7_PENDING1, CTC_EVENT_STOP, ctcm_chx_haltio }, 1925 { CH_XID7_PENDING1, CTC_EVENT_START, ctcm_action_nop }, 1926 { CH_XID7_PENDING1, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1927 { CH_XID7_PENDING1, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1928 { CH_XID7_PENDING1, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1929 { CH_XID7_PENDING1, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1930 { CH_XID7_PENDING1, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1931 { CH_XID7_PENDING1, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1932 { CH_XID7_PENDING1, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1933 { CH_XID7_PENDING1, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1934 1935 { CH_XID7_PENDING2, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1936 { CH_XID7_PENDING2, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1937 { CH_XID7_PENDING2, CTC_EVENT_STOP, ctcm_chx_haltio }, 1938 { CH_XID7_PENDING2, CTC_EVENT_START, ctcm_action_nop }, 1939 { CH_XID7_PENDING2, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1940 { CH_XID7_PENDING2, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1941 { CH_XID7_PENDING2, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1942 { CH_XID7_PENDING2, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1943 { CH_XID7_PENDING2, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1944 { CH_XID7_PENDING2, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1945 { CH_XID7_PENDING2, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1946 { CH_XID7_PENDING2, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1947 1948 { CH_XID7_PENDING3, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1949 { CH_XID7_PENDING3, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1950 { CH_XID7_PENDING3, CTC_EVENT_STOP, ctcm_chx_haltio }, 1951 { CH_XID7_PENDING3, CTC_EVENT_START, ctcm_action_nop }, 1952 { CH_XID7_PENDING3, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1953 { CH_XID7_PENDING3, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1954 { CH_XID7_PENDING3, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1955 { CH_XID7_PENDING3, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1956 { CH_XID7_PENDING3, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1957 { CH_XID7_PENDING3, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1958 { CH_XID7_PENDING3, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1959 { CH_XID7_PENDING3, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1960 1961 { CH_XID7_PENDING4, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1962 { CH_XID7_PENDING4, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1963 { CH_XID7_PENDING4, CTC_EVENT_STOP, ctcm_chx_haltio }, 1964 { CH_XID7_PENDING4, CTC_EVENT_START, ctcm_action_nop }, 1965 { CH_XID7_PENDING4, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1966 { CH_XID7_PENDING4, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1967 { CH_XID7_PENDING4, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1968 { CH_XID7_PENDING4, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1969 { CH_XID7_PENDING4, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1970 { CH_XID7_PENDING4, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1971 { CH_XID7_PENDING4, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1972 { CH_XID7_PENDING4, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1973 1974 { CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, 1975 { CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop }, 1976 { CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1977 { CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc }, 1978 { CTC_STATE_RXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail }, 1979 { CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1980 { CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1981 { CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1982 1983 { CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1984 { CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop }, 1985 { CTC_STATE_TXINIT, CTC_EVENT_FINSTAT, ctcm_chx_txidle }, 1986 { CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr }, 1987 { CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr }, 1988 { CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr }, 1989 { CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1990 { CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1991 { CTC_STATE_TXINIT, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep }, 1992 1993 { CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, 1994 { CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop }, 1995 { CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio }, 1996 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_fail }, 1997 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail }, 1998 { CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1999 { CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2000 { CTC_STATE_TXIDLE, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep }, 2001 2002 { CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop }, 2003 { CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart }, 2004 { CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped }, 2005 { CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, 2006 { CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, 2007 { CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2008 { CTC_STATE_TERM, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 2009 { CTC_STATE_TERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2010 2011 { CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio }, 2012 { CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart }, 2013 { CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, 2014 { CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, 2015 { CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, 2016 { CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2017 { CTC_STATE_DTERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2018 2019 { CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio }, 2020 { CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop }, 2021 { CTC_STATE_TX, CTC_EVENT_FINSTAT, ctcmpc_chx_txdone }, 2022 { CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_fail }, 2023 { CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_fail }, 2024 { CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry }, 2025 { CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2026 { CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2027 { CTC_STATE_TX, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep }, 2028 { CTC_STATE_TX, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 2029 2030 { CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, 2031 { CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, 2032 { CTC_STATE_TXERR, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2033 { CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2034 { CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2035 }; 2036 2037 int mpc_ch_fsm_len = ARRAY_SIZE(ctcmpc_ch_fsm); 2038 2039 /* 2040 * Actions for interface - statemachine. 2041 */ 2042 2043 /** 2044 * Startup channels by sending CTC_EVENT_START to each channel. 2045 * 2046 * fi An instance of an interface statemachine. 2047 * event The event, just happened. 2048 * arg Generic pointer, casted from struct net_device * upon call. 2049 */ 2050 static void dev_action_start(fsm_instance *fi, int event, void *arg) 2051 { 2052 struct net_device *dev = arg; 2053 struct ctcm_priv *priv = dev->priv; 2054 int direction; 2055 2056 CTCMY_DBF_DEV_NAME(SETUP, dev, ""); 2057 2058 fsm_deltimer(&priv->restart_timer); 2059 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); 2060 if (IS_MPC(priv)) 2061 priv->mpcg->channels_terminating = 0; 2062 for (direction = READ; direction <= WRITE; direction++) { 2063 struct channel *ch = priv->channel[direction]; 2064 fsm_event(ch->fsm, CTC_EVENT_START, ch); 2065 } 2066 } 2067 2068 /** 2069 * Shutdown channels by sending CTC_EVENT_STOP to each channel. 2070 * 2071 * fi An instance of an interface statemachine. 2072 * event The event, just happened. 2073 * arg Generic pointer, casted from struct net_device * upon call. 2074 */ 2075 static void dev_action_stop(fsm_instance *fi, int event, void *arg) 2076 { 2077 int direction; 2078 struct net_device *dev = arg; 2079 struct ctcm_priv *priv = dev->priv; 2080 2081 CTCMY_DBF_DEV_NAME(SETUP, dev, ""); 2082 2083 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); 2084 for (direction = READ; direction <= WRITE; direction++) { 2085 struct channel *ch = priv->channel[direction]; 2086 fsm_event(ch->fsm, CTC_EVENT_STOP, ch); 2087 ch->th_seq_num = 0x00; 2088 CTCM_PR_DEBUG("%s: CH_th_seq= %08x\n", 2089 __func__, ch->th_seq_num); 2090 } 2091 if (IS_MPC(priv)) 2092 fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET); 2093 } 2094 2095 static void dev_action_restart(fsm_instance *fi, int event, void *arg) 2096 { 2097 int restart_timer; 2098 struct net_device *dev = arg; 2099 struct ctcm_priv *priv = dev->priv; 2100 2101 CTCMY_DBF_DEV_NAME(TRACE, dev, ""); 2102 2103 if (IS_MPC(priv)) { 2104 ctcm_pr_info("ctcm: %s Restarting Device and " 2105 "MPC Group in 5 seconds\n", 2106 dev->name); 2107 restart_timer = CTCM_TIME_1_SEC; 2108 } else { 2109 ctcm_pr_info("%s: Restarting\n", dev->name); 2110 restart_timer = CTCM_TIME_5_SEC; 2111 } 2112 2113 dev_action_stop(fi, event, arg); 2114 fsm_event(priv->fsm, DEV_EVENT_STOP, dev); 2115 if (IS_MPC(priv)) 2116 fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET); 2117 2118 /* going back into start sequence too quickly can */ 2119 /* result in the other side becoming unreachable due */ 2120 /* to sense reported when IO is aborted */ 2121 fsm_addtimer(&priv->restart_timer, restart_timer, 2122 DEV_EVENT_START, dev); 2123 } 2124 2125 /** 2126 * Called from channel statemachine 2127 * when a channel is up and running. 2128 * 2129 * fi An instance of an interface statemachine. 2130 * event The event, just happened. 2131 * arg Generic pointer, casted from struct net_device * upon call. 2132 */ 2133 static void dev_action_chup(fsm_instance *fi, int event, void *arg) 2134 { 2135 struct net_device *dev = arg; 2136 struct ctcm_priv *priv = dev->priv; 2137 int dev_stat = fsm_getstate(fi); 2138 2139 CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE, 2140 "%s(%s): priv = %p [%d,%d]\n ", CTCM_FUNTAIL, 2141 dev->name, dev->priv, dev_stat, event); 2142 2143 switch (fsm_getstate(fi)) { 2144 case DEV_STATE_STARTWAIT_RXTX: 2145 if (event == DEV_EVENT_RXUP) 2146 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX); 2147 else 2148 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX); 2149 break; 2150 case DEV_STATE_STARTWAIT_RX: 2151 if (event == DEV_EVENT_RXUP) { 2152 fsm_newstate(fi, DEV_STATE_RUNNING); 2153 ctcm_pr_info("%s: connected with remote side\n", 2154 dev->name); 2155 ctcm_clear_busy(dev); 2156 } 2157 break; 2158 case DEV_STATE_STARTWAIT_TX: 2159 if (event == DEV_EVENT_TXUP) { 2160 fsm_newstate(fi, DEV_STATE_RUNNING); 2161 ctcm_pr_info("%s: connected with remote side\n", 2162 dev->name); 2163 ctcm_clear_busy(dev); 2164 } 2165 break; 2166 case DEV_STATE_STOPWAIT_TX: 2167 if (event == DEV_EVENT_RXUP) 2168 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); 2169 break; 2170 case DEV_STATE_STOPWAIT_RX: 2171 if (event == DEV_EVENT_TXUP) 2172 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); 2173 break; 2174 } 2175 2176 if (IS_MPC(priv)) { 2177 if (event == DEV_EVENT_RXUP) 2178 mpc_channel_action(priv->channel[READ], 2179 READ, MPC_CHANNEL_ADD); 2180 else 2181 mpc_channel_action(priv->channel[WRITE], 2182 WRITE, MPC_CHANNEL_ADD); 2183 } 2184 } 2185 2186 /** 2187 * Called from device statemachine 2188 * when a channel has been shutdown. 2189 * 2190 * fi An instance of an interface statemachine. 2191 * event The event, just happened. 2192 * arg Generic pointer, casted from struct net_device * upon call. 2193 */ 2194 static void dev_action_chdown(fsm_instance *fi, int event, void *arg) 2195 { 2196 2197 struct net_device *dev = arg; 2198 struct ctcm_priv *priv = dev->priv; 2199 2200 CTCMY_DBF_DEV_NAME(SETUP, dev, ""); 2201 2202 switch (fsm_getstate(fi)) { 2203 case DEV_STATE_RUNNING: 2204 if (event == DEV_EVENT_TXDOWN) 2205 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX); 2206 else 2207 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX); 2208 break; 2209 case DEV_STATE_STARTWAIT_RX: 2210 if (event == DEV_EVENT_TXDOWN) 2211 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); 2212 break; 2213 case DEV_STATE_STARTWAIT_TX: 2214 if (event == DEV_EVENT_RXDOWN) 2215 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); 2216 break; 2217 case DEV_STATE_STOPWAIT_RXTX: 2218 if (event == DEV_EVENT_TXDOWN) 2219 fsm_newstate(fi, DEV_STATE_STOPWAIT_RX); 2220 else 2221 fsm_newstate(fi, DEV_STATE_STOPWAIT_TX); 2222 break; 2223 case DEV_STATE_STOPWAIT_RX: 2224 if (event == DEV_EVENT_RXDOWN) 2225 fsm_newstate(fi, DEV_STATE_STOPPED); 2226 break; 2227 case DEV_STATE_STOPWAIT_TX: 2228 if (event == DEV_EVENT_TXDOWN) 2229 fsm_newstate(fi, DEV_STATE_STOPPED); 2230 break; 2231 } 2232 if (IS_MPC(priv)) { 2233 if (event == DEV_EVENT_RXDOWN) 2234 mpc_channel_action(priv->channel[READ], 2235 READ, MPC_CHANNEL_REMOVE); 2236 else 2237 mpc_channel_action(priv->channel[WRITE], 2238 WRITE, MPC_CHANNEL_REMOVE); 2239 } 2240 } 2241 2242 const fsm_node dev_fsm[] = { 2243 { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start }, 2244 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start }, 2245 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown }, 2246 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown }, 2247 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart }, 2248 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start }, 2249 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup }, 2250 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup }, 2251 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown }, 2252 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart }, 2253 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start }, 2254 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup }, 2255 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup }, 2256 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown }, 2257 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart }, 2258 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop }, 2259 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup }, 2260 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup }, 2261 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown }, 2262 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown }, 2263 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart }, 2264 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop }, 2265 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup }, 2266 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup }, 2267 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown }, 2268 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart }, 2269 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop }, 2270 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup }, 2271 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup }, 2272 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown }, 2273 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart }, 2274 { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop }, 2275 { DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown }, 2276 { DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown }, 2277 { DEV_STATE_RUNNING, DEV_EVENT_TXUP, ctcm_action_nop }, 2278 { DEV_STATE_RUNNING, DEV_EVENT_RXUP, ctcm_action_nop }, 2279 { DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart }, 2280 }; 2281 2282 int dev_fsm_len = ARRAY_SIZE(dev_fsm); 2283 2284 /* --- This is the END my friend --- */ 2285 2286