1 /* 2 * drivers/s390/net/ctcm_fsms.c 3 * 4 * Copyright IBM Corp. 2001, 2007 5 * Authors: Fritz Elfert (felfert@millenux.com) 6 * Peter Tiedemann (ptiedem@de.ibm.com) 7 * MPC additions : 8 * Belinda Thompson (belindat@us.ibm.com) 9 * Andy Richter (richtera@us.ibm.com) 10 */ 11 12 #undef DEBUG 13 #undef DEBUGDATA 14 #undef DEBUGCCW 15 16 #define KMSG_COMPONENT "ctcm" 17 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 18 19 #include <linux/module.h> 20 #include <linux/init.h> 21 #include <linux/kernel.h> 22 #include <linux/slab.h> 23 #include <linux/errno.h> 24 #include <linux/types.h> 25 #include <linux/interrupt.h> 26 #include <linux/timer.h> 27 #include <linux/bitops.h> 28 29 #include <linux/signal.h> 30 #include <linux/string.h> 31 32 #include <linux/ip.h> 33 #include <linux/if_arp.h> 34 #include <linux/tcp.h> 35 #include <linux/skbuff.h> 36 #include <linux/ctype.h> 37 #include <net/dst.h> 38 39 #include <linux/io.h> 40 #include <asm/ccwdev.h> 41 #include <asm/ccwgroup.h> 42 #include <linux/uaccess.h> 43 44 #include <asm/idals.h> 45 46 #include "fsm.h" 47 48 #include "ctcm_dbug.h" 49 #include "ctcm_main.h" 50 #include "ctcm_fsms.h" 51 52 const char *dev_state_names[] = { 53 [DEV_STATE_STOPPED] = "Stopped", 54 [DEV_STATE_STARTWAIT_RXTX] = "StartWait RXTX", 55 [DEV_STATE_STARTWAIT_RX] = "StartWait RX", 56 [DEV_STATE_STARTWAIT_TX] = "StartWait TX", 57 [DEV_STATE_STOPWAIT_RXTX] = "StopWait RXTX", 58 [DEV_STATE_STOPWAIT_RX] = "StopWait RX", 59 [DEV_STATE_STOPWAIT_TX] = "StopWait TX", 60 [DEV_STATE_RUNNING] = "Running", 61 }; 62 63 const char *dev_event_names[] = { 64 [DEV_EVENT_START] = "Start", 65 [DEV_EVENT_STOP] = "Stop", 66 [DEV_EVENT_RXUP] = "RX up", 67 [DEV_EVENT_TXUP] = "TX up", 68 [DEV_EVENT_RXDOWN] = "RX down", 69 [DEV_EVENT_TXDOWN] = "TX down", 70 [DEV_EVENT_RESTART] = "Restart", 71 }; 72 73 const char *ctc_ch_event_names[] = { 74 [CTC_EVENT_IO_SUCCESS] = "ccw_device success", 75 [CTC_EVENT_IO_EBUSY] = "ccw_device busy", 76 [CTC_EVENT_IO_ENODEV] = "ccw_device enodev", 77 [CTC_EVENT_IO_UNKNOWN] = "ccw_device unknown", 78 [CTC_EVENT_ATTNBUSY] = "Status ATTN & BUSY", 79 [CTC_EVENT_ATTN] = "Status ATTN", 80 [CTC_EVENT_BUSY] = "Status BUSY", 81 [CTC_EVENT_UC_RCRESET] = "Unit check remote reset", 82 [CTC_EVENT_UC_RSRESET] = "Unit check remote system reset", 83 [CTC_EVENT_UC_TXTIMEOUT] = "Unit check TX timeout", 84 [CTC_EVENT_UC_TXPARITY] = "Unit check TX parity", 85 [CTC_EVENT_UC_HWFAIL] = "Unit check Hardware failure", 86 [CTC_EVENT_UC_RXPARITY] = "Unit check RX parity", 87 [CTC_EVENT_UC_ZERO] = "Unit check ZERO", 88 [CTC_EVENT_UC_UNKNOWN] = "Unit check Unknown", 89 [CTC_EVENT_SC_UNKNOWN] = "SubChannel check Unknown", 90 [CTC_EVENT_MC_FAIL] = "Machine check failure", 91 [CTC_EVENT_MC_GOOD] = "Machine check operational", 92 [CTC_EVENT_IRQ] = "IRQ normal", 93 [CTC_EVENT_FINSTAT] = "IRQ final", 94 [CTC_EVENT_TIMER] = "Timer", 95 [CTC_EVENT_START] = "Start", 96 [CTC_EVENT_STOP] = "Stop", 97 /* 98 * additional MPC events 99 */ 100 [CTC_EVENT_SEND_XID] = "XID Exchange", 101 [CTC_EVENT_RSWEEP_TIMER] = "MPC Group Sweep Timer", 102 }; 103 104 const char *ctc_ch_state_names[] = { 105 [CTC_STATE_IDLE] = "Idle", 106 [CTC_STATE_STOPPED] = "Stopped", 107 [CTC_STATE_STARTWAIT] = "StartWait", 108 [CTC_STATE_STARTRETRY] = "StartRetry", 109 [CTC_STATE_SETUPWAIT] = "SetupWait", 110 [CTC_STATE_RXINIT] = "RX init", 111 [CTC_STATE_TXINIT] = "TX init", 112 [CTC_STATE_RX] = "RX", 113 [CTC_STATE_TX] = "TX", 114 [CTC_STATE_RXIDLE] = "RX idle", 115 [CTC_STATE_TXIDLE] = "TX idle", 116 [CTC_STATE_RXERR] = "RX error", 117 [CTC_STATE_TXERR] = "TX error", 118 [CTC_STATE_TERM] = "Terminating", 119 [CTC_STATE_DTERM] = "Restarting", 120 [CTC_STATE_NOTOP] = "Not operational", 121 /* 122 * additional MPC states 123 */ 124 [CH_XID0_PENDING] = "Pending XID0 Start", 125 [CH_XID0_INPROGRESS] = "In XID0 Negotiations ", 126 [CH_XID7_PENDING] = "Pending XID7 P1 Start", 127 [CH_XID7_PENDING1] = "Active XID7 P1 Exchange ", 128 [CH_XID7_PENDING2] = "Pending XID7 P2 Start ", 129 [CH_XID7_PENDING3] = "Active XID7 P2 Exchange ", 130 [CH_XID7_PENDING4] = "XID7 Complete - Pending READY ", 131 }; 132 133 static void ctcm_action_nop(fsm_instance *fi, int event, void *arg); 134 135 /* 136 * ----- static ctcm actions for channel statemachine ----- 137 * 138 */ 139 static void chx_txdone(fsm_instance *fi, int event, void *arg); 140 static void chx_rx(fsm_instance *fi, int event, void *arg); 141 static void chx_rxidle(fsm_instance *fi, int event, void *arg); 142 static void chx_firstio(fsm_instance *fi, int event, void *arg); 143 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg); 144 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg); 145 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg); 146 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg); 147 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg); 148 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg); 149 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg); 150 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg); 151 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg); 152 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg); 153 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg); 154 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg); 155 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg); 156 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg); 157 158 /* 159 * ----- static ctcmpc actions for ctcmpc channel statemachine ----- 160 * 161 */ 162 static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg); 163 static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg); 164 static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg); 165 /* shared : 166 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg); 167 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg); 168 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg); 169 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg); 170 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg); 171 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg); 172 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg); 173 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg); 174 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg); 175 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg); 176 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg); 177 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg); 178 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg); 179 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg); 180 */ 181 static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg); 182 static void ctcmpc_chx_attnbusy(fsm_instance *, int, void *); 183 static void ctcmpc_chx_resend(fsm_instance *, int, void *); 184 static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg); 185 186 /** 187 * Check return code of a preceding ccw_device call, halt_IO etc... 188 * 189 * ch : The channel, the error belongs to. 190 * Returns the error code (!= 0) to inspect. 191 */ 192 void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg) 193 { 194 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 195 "%s(%s): %s: %04x\n", 196 CTCM_FUNTAIL, ch->id, msg, rc); 197 switch (rc) { 198 case -EBUSY: 199 pr_info("%s: The communication peer is busy\n", 200 ch->id); 201 fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch); 202 break; 203 case -ENODEV: 204 pr_err("%s: The specified target device is not valid\n", 205 ch->id); 206 fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch); 207 break; 208 default: 209 pr_err("An I/O operation resulted in error %04x\n", 210 rc); 211 fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch); 212 } 213 } 214 215 void ctcm_purge_skb_queue(struct sk_buff_head *q) 216 { 217 struct sk_buff *skb; 218 219 CTCM_DBF_TEXT(TRACE, CTC_DBF_DEBUG, __func__); 220 221 while ((skb = skb_dequeue(q))) { 222 atomic_dec(&skb->users); 223 dev_kfree_skb_any(skb); 224 } 225 } 226 227 /** 228 * NOP action for statemachines 229 */ 230 static void ctcm_action_nop(fsm_instance *fi, int event, void *arg) 231 { 232 } 233 234 /* 235 * Actions for channel - statemachines. 236 */ 237 238 /** 239 * Normal data has been send. Free the corresponding 240 * skb (it's in io_queue), reset dev->tbusy and 241 * revert to idle state. 242 * 243 * fi An instance of a channel statemachine. 244 * event The event, just happened. 245 * arg Generic pointer, casted from channel * upon call. 246 */ 247 static void chx_txdone(fsm_instance *fi, int event, void *arg) 248 { 249 struct channel *ch = arg; 250 struct net_device *dev = ch->netdev; 251 struct ctcm_priv *priv = dev->ml_priv; 252 struct sk_buff *skb; 253 int first = 1; 254 int i; 255 unsigned long duration; 256 struct timespec done_stamp = current_kernel_time(); /* xtime */ 257 258 CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name); 259 260 duration = 261 (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 + 262 (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000; 263 if (duration > ch->prof.tx_time) 264 ch->prof.tx_time = duration; 265 266 if (ch->irb->scsw.cmd.count != 0) 267 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, 268 "%s(%s): TX not complete, remaining %d bytes", 269 CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count); 270 fsm_deltimer(&ch->timer); 271 while ((skb = skb_dequeue(&ch->io_queue))) { 272 priv->stats.tx_packets++; 273 priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; 274 if (first) { 275 priv->stats.tx_bytes += 2; 276 first = 0; 277 } 278 atomic_dec(&skb->users); 279 dev_kfree_skb_irq(skb); 280 } 281 spin_lock(&ch->collect_lock); 282 clear_normalized_cda(&ch->ccw[4]); 283 if (ch->collect_len > 0) { 284 int rc; 285 286 if (ctcm_checkalloc_buffer(ch)) { 287 spin_unlock(&ch->collect_lock); 288 return; 289 } 290 ch->trans_skb->data = ch->trans_skb_data; 291 skb_reset_tail_pointer(ch->trans_skb); 292 ch->trans_skb->len = 0; 293 if (ch->prof.maxmulti < (ch->collect_len + 2)) 294 ch->prof.maxmulti = ch->collect_len + 2; 295 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue)) 296 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue); 297 *((__u16 *)skb_put(ch->trans_skb, 2)) = ch->collect_len + 2; 298 i = 0; 299 while ((skb = skb_dequeue(&ch->collect_queue))) { 300 skb_copy_from_linear_data(skb, 301 skb_put(ch->trans_skb, skb->len), skb->len); 302 priv->stats.tx_packets++; 303 priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; 304 atomic_dec(&skb->users); 305 dev_kfree_skb_irq(skb); 306 i++; 307 } 308 ch->collect_len = 0; 309 spin_unlock(&ch->collect_lock); 310 ch->ccw[1].count = ch->trans_skb->len; 311 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 312 ch->prof.send_stamp = current_kernel_time(); /* xtime */ 313 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 314 (unsigned long)ch, 0xff, 0); 315 ch->prof.doios_multi++; 316 if (rc != 0) { 317 priv->stats.tx_dropped += i; 318 priv->stats.tx_errors += i; 319 fsm_deltimer(&ch->timer); 320 ctcm_ccw_check_rc(ch, rc, "chained TX"); 321 } 322 } else { 323 spin_unlock(&ch->collect_lock); 324 fsm_newstate(fi, CTC_STATE_TXIDLE); 325 } 326 ctcm_clear_busy_do(dev); 327 } 328 329 /** 330 * Initial data is sent. 331 * Notify device statemachine that we are up and 332 * running. 333 * 334 * fi An instance of a channel statemachine. 335 * event The event, just happened. 336 * arg Generic pointer, casted from channel * upon call. 337 */ 338 void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg) 339 { 340 struct channel *ch = arg; 341 struct net_device *dev = ch->netdev; 342 struct ctcm_priv *priv = dev->ml_priv; 343 344 CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name); 345 346 fsm_deltimer(&ch->timer); 347 fsm_newstate(fi, CTC_STATE_TXIDLE); 348 fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev); 349 } 350 351 /** 352 * Got normal data, check for sanity, queue it up, allocate new buffer 353 * trigger bottom half, and initiate next read. 354 * 355 * fi An instance of a channel statemachine. 356 * event The event, just happened. 357 * arg Generic pointer, casted from channel * upon call. 358 */ 359 static void chx_rx(fsm_instance *fi, int event, void *arg) 360 { 361 struct channel *ch = arg; 362 struct net_device *dev = ch->netdev; 363 struct ctcm_priv *priv = dev->ml_priv; 364 int len = ch->max_bufsize - ch->irb->scsw.cmd.count; 365 struct sk_buff *skb = ch->trans_skb; 366 __u16 block_len = *((__u16 *)skb->data); 367 int check_len; 368 int rc; 369 370 fsm_deltimer(&ch->timer); 371 if (len < 8) { 372 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 373 "%s(%s): got packet with length %d < 8\n", 374 CTCM_FUNTAIL, dev->name, len); 375 priv->stats.rx_dropped++; 376 priv->stats.rx_length_errors++; 377 goto again; 378 } 379 if (len > ch->max_bufsize) { 380 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 381 "%s(%s): got packet with length %d > %d\n", 382 CTCM_FUNTAIL, dev->name, len, ch->max_bufsize); 383 priv->stats.rx_dropped++; 384 priv->stats.rx_length_errors++; 385 goto again; 386 } 387 388 /* 389 * VM TCP seems to have a bug sending 2 trailing bytes of garbage. 390 */ 391 switch (ch->protocol) { 392 case CTCM_PROTO_S390: 393 case CTCM_PROTO_OS390: 394 check_len = block_len + 2; 395 break; 396 default: 397 check_len = block_len; 398 break; 399 } 400 if ((len < block_len) || (len > check_len)) { 401 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 402 "%s(%s): got block length %d != rx length %d\n", 403 CTCM_FUNTAIL, dev->name, block_len, len); 404 if (do_debug) 405 ctcmpc_dump_skb(skb, 0); 406 407 *((__u16 *)skb->data) = len; 408 priv->stats.rx_dropped++; 409 priv->stats.rx_length_errors++; 410 goto again; 411 } 412 if (block_len > 2) { 413 *((__u16 *)skb->data) = block_len - 2; 414 ctcm_unpack_skb(ch, skb); 415 } 416 again: 417 skb->data = ch->trans_skb_data; 418 skb_reset_tail_pointer(skb); 419 skb->len = 0; 420 if (ctcm_checkalloc_buffer(ch)) 421 return; 422 ch->ccw[1].count = ch->max_bufsize; 423 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 424 (unsigned long)ch, 0xff, 0); 425 if (rc != 0) 426 ctcm_ccw_check_rc(ch, rc, "normal RX"); 427 } 428 429 /** 430 * Initialize connection by sending a __u16 of value 0. 431 * 432 * fi An instance of a channel statemachine. 433 * event The event, just happened. 434 * arg Generic pointer, casted from channel * upon call. 435 */ 436 static void chx_firstio(fsm_instance *fi, int event, void *arg) 437 { 438 int rc; 439 struct channel *ch = arg; 440 int fsmstate = fsm_getstate(fi); 441 442 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 443 "%s(%s) : %02x", 444 CTCM_FUNTAIL, ch->id, fsmstate); 445 446 ch->sense_rc = 0; /* reset unit check report control */ 447 if (fsmstate == CTC_STATE_TXIDLE) 448 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, 449 "%s(%s): remote side issued READ?, init.\n", 450 CTCM_FUNTAIL, ch->id); 451 fsm_deltimer(&ch->timer); 452 if (ctcm_checkalloc_buffer(ch)) 453 return; 454 if ((fsmstate == CTC_STATE_SETUPWAIT) && 455 (ch->protocol == CTCM_PROTO_OS390)) { 456 /* OS/390 resp. z/OS */ 457 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { 458 *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN; 459 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, 460 CTC_EVENT_TIMER, ch); 461 chx_rxidle(fi, event, arg); 462 } else { 463 struct net_device *dev = ch->netdev; 464 struct ctcm_priv *priv = dev->ml_priv; 465 fsm_newstate(fi, CTC_STATE_TXIDLE); 466 fsm_event(priv->fsm, DEV_EVENT_TXUP, dev); 467 } 468 return; 469 } 470 /* 471 * Don't setup a timer for receiving the initial RX frame 472 * if in compatibility mode, since VM TCP delays the initial 473 * frame until it has some data to send. 474 */ 475 if ((CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) || 476 (ch->protocol != CTCM_PROTO_S390)) 477 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 478 479 *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN; 480 ch->ccw[1].count = 2; /* Transfer only length */ 481 482 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) 483 ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); 484 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 485 (unsigned long)ch, 0xff, 0); 486 if (rc != 0) { 487 fsm_deltimer(&ch->timer); 488 fsm_newstate(fi, CTC_STATE_SETUPWAIT); 489 ctcm_ccw_check_rc(ch, rc, "init IO"); 490 } 491 /* 492 * If in compatibility mode since we don't setup a timer, we 493 * also signal RX channel up immediately. This enables us 494 * to send packets early which in turn usually triggers some 495 * reply from VM TCP which brings up the RX channel to it's 496 * final state. 497 */ 498 if ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) && 499 (ch->protocol == CTCM_PROTO_S390)) { 500 struct net_device *dev = ch->netdev; 501 struct ctcm_priv *priv = dev->ml_priv; 502 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); 503 } 504 } 505 506 /** 507 * Got initial data, check it. If OK, 508 * notify device statemachine that we are up and 509 * running. 510 * 511 * fi An instance of a channel statemachine. 512 * event The event, just happened. 513 * arg Generic pointer, casted from channel * upon call. 514 */ 515 static void chx_rxidle(fsm_instance *fi, int event, void *arg) 516 { 517 struct channel *ch = arg; 518 struct net_device *dev = ch->netdev; 519 struct ctcm_priv *priv = dev->ml_priv; 520 __u16 buflen; 521 int rc; 522 523 fsm_deltimer(&ch->timer); 524 buflen = *((__u16 *)ch->trans_skb->data); 525 CTCM_PR_DEBUG("%s: %s: Initial RX count = %d\n", 526 __func__, dev->name, buflen); 527 528 if (buflen >= CTCM_INITIAL_BLOCKLEN) { 529 if (ctcm_checkalloc_buffer(ch)) 530 return; 531 ch->ccw[1].count = ch->max_bufsize; 532 fsm_newstate(fi, CTC_STATE_RXIDLE); 533 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 534 (unsigned long)ch, 0xff, 0); 535 if (rc != 0) { 536 fsm_newstate(fi, CTC_STATE_RXINIT); 537 ctcm_ccw_check_rc(ch, rc, "initial RX"); 538 } else 539 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); 540 } else { 541 CTCM_PR_DEBUG("%s: %s: Initial RX count %d not %d\n", 542 __func__, dev->name, 543 buflen, CTCM_INITIAL_BLOCKLEN); 544 chx_firstio(fi, event, arg); 545 } 546 } 547 548 /** 549 * Set channel into extended mode. 550 * 551 * fi An instance of a channel statemachine. 552 * event The event, just happened. 553 * arg Generic pointer, casted from channel * upon call. 554 */ 555 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg) 556 { 557 struct channel *ch = arg; 558 int rc; 559 unsigned long saveflags = 0; 560 int timeout = CTCM_TIME_5_SEC; 561 562 fsm_deltimer(&ch->timer); 563 if (IS_MPC(ch)) { 564 timeout = 1500; 565 CTCM_PR_DEBUG("enter %s: cp=%i ch=0x%p id=%s\n", 566 __func__, smp_processor_id(), ch, ch->id); 567 } 568 fsm_addtimer(&ch->timer, timeout, CTC_EVENT_TIMER, ch); 569 fsm_newstate(fi, CTC_STATE_SETUPWAIT); 570 CTCM_CCW_DUMP((char *)&ch->ccw[6], sizeof(struct ccw1) * 2); 571 572 if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */ 573 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 574 /* Such conditional locking is undeterministic in 575 * static view. => ignore sparse warnings here. */ 576 577 rc = ccw_device_start(ch->cdev, &ch->ccw[6], 578 (unsigned long)ch, 0xff, 0); 579 if (event == CTC_EVENT_TIMER) /* see above comments */ 580 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); 581 if (rc != 0) { 582 fsm_deltimer(&ch->timer); 583 fsm_newstate(fi, CTC_STATE_STARTWAIT); 584 ctcm_ccw_check_rc(ch, rc, "set Mode"); 585 } else 586 ch->retry = 0; 587 } 588 589 /** 590 * Setup channel. 591 * 592 * fi An instance of a channel statemachine. 593 * event The event, just happened. 594 * arg Generic pointer, casted from channel * upon call. 595 */ 596 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg) 597 { 598 struct channel *ch = arg; 599 unsigned long saveflags; 600 int rc; 601 602 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s", 603 CTCM_FUNTAIL, ch->id, 604 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX"); 605 606 if (ch->trans_skb != NULL) { 607 clear_normalized_cda(&ch->ccw[1]); 608 dev_kfree_skb(ch->trans_skb); 609 ch->trans_skb = NULL; 610 } 611 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { 612 ch->ccw[1].cmd_code = CCW_CMD_READ; 613 ch->ccw[1].flags = CCW_FLAG_SLI; 614 ch->ccw[1].count = 0; 615 } else { 616 ch->ccw[1].cmd_code = CCW_CMD_WRITE; 617 ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC; 618 ch->ccw[1].count = 0; 619 } 620 if (ctcm_checkalloc_buffer(ch)) { 621 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, 622 "%s(%s): %s trans_skb alloc delayed " 623 "until first transfer", 624 CTCM_FUNTAIL, ch->id, 625 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? 626 "RX" : "TX"); 627 } 628 ch->ccw[0].cmd_code = CCW_CMD_PREPARE; 629 ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC; 630 ch->ccw[0].count = 0; 631 ch->ccw[0].cda = 0; 632 ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */ 633 ch->ccw[2].flags = CCW_FLAG_SLI; 634 ch->ccw[2].count = 0; 635 ch->ccw[2].cda = 0; 636 memcpy(&ch->ccw[3], &ch->ccw[0], sizeof(struct ccw1) * 3); 637 ch->ccw[4].cda = 0; 638 ch->ccw[4].flags &= ~CCW_FLAG_IDA; 639 640 fsm_newstate(fi, CTC_STATE_STARTWAIT); 641 fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch); 642 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 643 rc = ccw_device_halt(ch->cdev, (unsigned long)ch); 644 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); 645 if (rc != 0) { 646 if (rc != -EBUSY) 647 fsm_deltimer(&ch->timer); 648 ctcm_ccw_check_rc(ch, rc, "initial HaltIO"); 649 } 650 } 651 652 /** 653 * Shutdown a channel. 654 * 655 * fi An instance of a channel statemachine. 656 * event The event, just happened. 657 * arg Generic pointer, casted from channel * upon call. 658 */ 659 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg) 660 { 661 struct channel *ch = arg; 662 unsigned long saveflags = 0; 663 int rc; 664 int oldstate; 665 666 fsm_deltimer(&ch->timer); 667 if (IS_MPC(ch)) 668 fsm_deltimer(&ch->sweep_timer); 669 670 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 671 672 if (event == CTC_EVENT_STOP) /* only for STOP not yet locked */ 673 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 674 /* Such conditional locking is undeterministic in 675 * static view. => ignore sparse warnings here. */ 676 oldstate = fsm_getstate(fi); 677 fsm_newstate(fi, CTC_STATE_TERM); 678 rc = ccw_device_halt(ch->cdev, (unsigned long)ch); 679 680 if (event == CTC_EVENT_STOP) 681 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); 682 /* see remark above about conditional locking */ 683 684 if (rc != 0 && rc != -EBUSY) { 685 fsm_deltimer(&ch->timer); 686 if (event != CTC_EVENT_STOP) { 687 fsm_newstate(fi, oldstate); 688 ctcm_ccw_check_rc(ch, rc, (char *)__func__); 689 } 690 } 691 } 692 693 /** 694 * Cleanup helper for chx_fail and chx_stopped 695 * cleanup channels queue and notify interface statemachine. 696 * 697 * fi An instance of a channel statemachine. 698 * state The next state (depending on caller). 699 * ch The channel to operate on. 700 */ 701 static void ctcm_chx_cleanup(fsm_instance *fi, int state, 702 struct channel *ch) 703 { 704 struct net_device *dev = ch->netdev; 705 struct ctcm_priv *priv = dev->ml_priv; 706 707 CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE, 708 "%s(%s): %s[%d]\n", 709 CTCM_FUNTAIL, dev->name, ch->id, state); 710 711 fsm_deltimer(&ch->timer); 712 if (IS_MPC(ch)) 713 fsm_deltimer(&ch->sweep_timer); 714 715 fsm_newstate(fi, state); 716 if (state == CTC_STATE_STOPPED && ch->trans_skb != NULL) { 717 clear_normalized_cda(&ch->ccw[1]); 718 dev_kfree_skb_any(ch->trans_skb); 719 ch->trans_skb = NULL; 720 } 721 722 ch->th_seg = 0x00; 723 ch->th_seq_num = 0x00; 724 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { 725 skb_queue_purge(&ch->io_queue); 726 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 727 } else { 728 ctcm_purge_skb_queue(&ch->io_queue); 729 if (IS_MPC(ch)) 730 ctcm_purge_skb_queue(&ch->sweep_queue); 731 spin_lock(&ch->collect_lock); 732 ctcm_purge_skb_queue(&ch->collect_queue); 733 ch->collect_len = 0; 734 spin_unlock(&ch->collect_lock); 735 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 736 } 737 } 738 739 /** 740 * A channel has successfully been halted. 741 * Cleanup it's queue and notify interface statemachine. 742 * 743 * fi An instance of a channel statemachine. 744 * event The event, just happened. 745 * arg Generic pointer, casted from channel * upon call. 746 */ 747 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg) 748 { 749 ctcm_chx_cleanup(fi, CTC_STATE_STOPPED, arg); 750 } 751 752 /** 753 * A stop command from device statemachine arrived and we are in 754 * not operational mode. Set state to stopped. 755 * 756 * fi An instance of a channel statemachine. 757 * event The event, just happened. 758 * arg Generic pointer, casted from channel * upon call. 759 */ 760 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg) 761 { 762 fsm_newstate(fi, CTC_STATE_STOPPED); 763 } 764 765 /** 766 * A machine check for no path, not operational status or gone device has 767 * happened. 768 * Cleanup queue and notify interface statemachine. 769 * 770 * fi An instance of a channel statemachine. 771 * event The event, just happened. 772 * arg Generic pointer, casted from channel * upon call. 773 */ 774 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg) 775 { 776 ctcm_chx_cleanup(fi, CTC_STATE_NOTOP, arg); 777 } 778 779 /** 780 * Handle error during setup of channel. 781 * 782 * fi An instance of a channel statemachine. 783 * event The event, just happened. 784 * arg Generic pointer, casted from channel * upon call. 785 */ 786 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg) 787 { 788 struct channel *ch = arg; 789 struct net_device *dev = ch->netdev; 790 struct ctcm_priv *priv = dev->ml_priv; 791 792 /* 793 * Special case: Got UC_RCRESET on setmode. 794 * This means that remote side isn't setup. In this case 795 * simply retry after some 10 secs... 796 */ 797 if ((fsm_getstate(fi) == CTC_STATE_SETUPWAIT) && 798 ((event == CTC_EVENT_UC_RCRESET) || 799 (event == CTC_EVENT_UC_RSRESET))) { 800 fsm_newstate(fi, CTC_STATE_STARTRETRY); 801 fsm_deltimer(&ch->timer); 802 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 803 if (!IS_MPC(ch) && 804 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) { 805 int rc = ccw_device_halt(ch->cdev, (unsigned long)ch); 806 if (rc != 0) 807 ctcm_ccw_check_rc(ch, rc, 808 "HaltIO in chx_setuperr"); 809 } 810 return; 811 } 812 813 CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT, 814 "%s(%s) : %s error during %s channel setup state=%s\n", 815 CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event], 816 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX", 817 fsm_getstate_str(fi)); 818 819 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { 820 fsm_newstate(fi, CTC_STATE_RXERR); 821 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 822 } else { 823 fsm_newstate(fi, CTC_STATE_TXERR); 824 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 825 } 826 } 827 828 /** 829 * Restart a channel after an error. 830 * 831 * fi An instance of a channel statemachine. 832 * event The event, just happened. 833 * arg Generic pointer, casted from channel * upon call. 834 */ 835 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg) 836 { 837 struct channel *ch = arg; 838 struct net_device *dev = ch->netdev; 839 unsigned long saveflags = 0; 840 int oldstate; 841 int rc; 842 843 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 844 "%s: %s[%d] of %s\n", 845 CTCM_FUNTAIL, ch->id, event, dev->name); 846 847 fsm_deltimer(&ch->timer); 848 849 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 850 oldstate = fsm_getstate(fi); 851 fsm_newstate(fi, CTC_STATE_STARTWAIT); 852 if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */ 853 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 854 /* Such conditional locking is a known problem for 855 * sparse because its undeterministic in static view. 856 * Warnings should be ignored here. */ 857 rc = ccw_device_halt(ch->cdev, (unsigned long)ch); 858 if (event == CTC_EVENT_TIMER) 859 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); 860 if (rc != 0) { 861 if (rc != -EBUSY) { 862 fsm_deltimer(&ch->timer); 863 fsm_newstate(fi, oldstate); 864 } 865 ctcm_ccw_check_rc(ch, rc, "HaltIO in ctcm_chx_restart"); 866 } 867 } 868 869 /** 870 * Handle error during RX initial handshake (exchange of 871 * 0-length block header) 872 * 873 * fi An instance of a channel statemachine. 874 * event The event, just happened. 875 * arg Generic pointer, casted from channel * upon call. 876 */ 877 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg) 878 { 879 struct channel *ch = arg; 880 struct net_device *dev = ch->netdev; 881 struct ctcm_priv *priv = dev->ml_priv; 882 883 if (event == CTC_EVENT_TIMER) { 884 if (!IS_MPCDEV(dev)) 885 /* TODO : check if MPC deletes timer somewhere */ 886 fsm_deltimer(&ch->timer); 887 if (ch->retry++ < 3) 888 ctcm_chx_restart(fi, event, arg); 889 else { 890 fsm_newstate(fi, CTC_STATE_RXERR); 891 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 892 } 893 } else { 894 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 895 "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, 896 ctc_ch_event_names[event], fsm_getstate_str(fi)); 897 898 dev_warn(&dev->dev, 899 "Initialization failed with RX/TX init handshake " 900 "error %s\n", ctc_ch_event_names[event]); 901 } 902 } 903 904 /** 905 * Notify device statemachine if we gave up initialization 906 * of RX channel. 907 * 908 * fi An instance of a channel statemachine. 909 * event The event, just happened. 910 * arg Generic pointer, casted from channel * upon call. 911 */ 912 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg) 913 { 914 struct channel *ch = arg; 915 struct net_device *dev = ch->netdev; 916 struct ctcm_priv *priv = dev->ml_priv; 917 918 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 919 "%s(%s): RX %s busy, init. fail", 920 CTCM_FUNTAIL, dev->name, ch->id); 921 fsm_newstate(fi, CTC_STATE_RXERR); 922 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 923 } 924 925 /** 926 * Handle RX Unit check remote reset (remote disconnected) 927 * 928 * fi An instance of a channel statemachine. 929 * event The event, just happened. 930 * arg Generic pointer, casted from channel * upon call. 931 */ 932 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg) 933 { 934 struct channel *ch = arg; 935 struct channel *ch2; 936 struct net_device *dev = ch->netdev; 937 struct ctcm_priv *priv = dev->ml_priv; 938 939 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 940 "%s: %s: remote disconnect - re-init ...", 941 CTCM_FUNTAIL, dev->name); 942 fsm_deltimer(&ch->timer); 943 /* 944 * Notify device statemachine 945 */ 946 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 947 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 948 949 fsm_newstate(fi, CTC_STATE_DTERM); 950 ch2 = priv->channel[CTCM_WRITE]; 951 fsm_newstate(ch2->fsm, CTC_STATE_DTERM); 952 953 ccw_device_halt(ch->cdev, (unsigned long)ch); 954 ccw_device_halt(ch2->cdev, (unsigned long)ch2); 955 } 956 957 /** 958 * Handle error during TX channel initialization. 959 * 960 * fi An instance of a channel statemachine. 961 * event The event, just happened. 962 * arg Generic pointer, casted from channel * upon call. 963 */ 964 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg) 965 { 966 struct channel *ch = arg; 967 struct net_device *dev = ch->netdev; 968 struct ctcm_priv *priv = dev->ml_priv; 969 970 if (event == CTC_EVENT_TIMER) { 971 fsm_deltimer(&ch->timer); 972 if (ch->retry++ < 3) 973 ctcm_chx_restart(fi, event, arg); 974 else { 975 fsm_newstate(fi, CTC_STATE_TXERR); 976 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 977 } 978 } else { 979 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 980 "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, 981 ctc_ch_event_names[event], fsm_getstate_str(fi)); 982 983 dev_warn(&dev->dev, 984 "Initialization failed with RX/TX init handshake " 985 "error %s\n", ctc_ch_event_names[event]); 986 } 987 } 988 989 /** 990 * Handle TX timeout by retrying operation. 991 * 992 * fi An instance of a channel statemachine. 993 * event The event, just happened. 994 * arg Generic pointer, casted from channel * upon call. 995 */ 996 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg) 997 { 998 struct channel *ch = arg; 999 struct net_device *dev = ch->netdev; 1000 struct ctcm_priv *priv = dev->ml_priv; 1001 struct sk_buff *skb; 1002 1003 CTCM_PR_DEBUG("Enter: %s: cp=%i ch=0x%p id=%s\n", 1004 __func__, smp_processor_id(), ch, ch->id); 1005 1006 fsm_deltimer(&ch->timer); 1007 if (ch->retry++ > 3) { 1008 struct mpc_group *gptr = priv->mpcg; 1009 CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO, 1010 "%s: %s: retries exceeded", 1011 CTCM_FUNTAIL, ch->id); 1012 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 1013 /* call restart if not MPC or if MPC and mpcg fsm is ready. 1014 use gptr as mpc indicator */ 1015 if (!(gptr && (fsm_getstate(gptr->fsm) != MPCG_STATE_READY))) 1016 ctcm_chx_restart(fi, event, arg); 1017 goto done; 1018 } 1019 1020 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, 1021 "%s : %s: retry %d", 1022 CTCM_FUNTAIL, ch->id, ch->retry); 1023 skb = skb_peek(&ch->io_queue); 1024 if (skb) { 1025 int rc = 0; 1026 unsigned long saveflags = 0; 1027 clear_normalized_cda(&ch->ccw[4]); 1028 ch->ccw[4].count = skb->len; 1029 if (set_normalized_cda(&ch->ccw[4], skb->data)) { 1030 CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO, 1031 "%s: %s: IDAL alloc failed", 1032 CTCM_FUNTAIL, ch->id); 1033 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 1034 ctcm_chx_restart(fi, event, arg); 1035 goto done; 1036 } 1037 fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch); 1038 if (event == CTC_EVENT_TIMER) /* for TIMER not yet locked */ 1039 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 1040 /* Such conditional locking is a known problem for 1041 * sparse because its undeterministic in static view. 1042 * Warnings should be ignored here. */ 1043 if (do_debug_ccw) 1044 ctcmpc_dumpit((char *)&ch->ccw[3], 1045 sizeof(struct ccw1) * 3); 1046 1047 rc = ccw_device_start(ch->cdev, &ch->ccw[3], 1048 (unsigned long)ch, 0xff, 0); 1049 if (event == CTC_EVENT_TIMER) 1050 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), 1051 saveflags); 1052 if (rc != 0) { 1053 fsm_deltimer(&ch->timer); 1054 ctcm_ccw_check_rc(ch, rc, "TX in chx_txretry"); 1055 ctcm_purge_skb_queue(&ch->io_queue); 1056 } 1057 } 1058 done: 1059 return; 1060 } 1061 1062 /** 1063 * Handle fatal errors during an I/O command. 1064 * 1065 * fi An instance of a channel statemachine. 1066 * event The event, just happened. 1067 * arg Generic pointer, casted from channel * upon call. 1068 */ 1069 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg) 1070 { 1071 struct channel *ch = arg; 1072 struct net_device *dev = ch->netdev; 1073 struct ctcm_priv *priv = dev->ml_priv; 1074 int rd = CHANNEL_DIRECTION(ch->flags); 1075 1076 fsm_deltimer(&ch->timer); 1077 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 1078 "%s: %s: %s unrecoverable channel error", 1079 CTCM_FUNTAIL, ch->id, rd == CTCM_READ ? "RX" : "TX"); 1080 1081 if (IS_MPC(ch)) { 1082 priv->stats.tx_dropped++; 1083 priv->stats.tx_errors++; 1084 } 1085 if (rd == CTCM_READ) { 1086 fsm_newstate(fi, CTC_STATE_RXERR); 1087 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 1088 } else { 1089 fsm_newstate(fi, CTC_STATE_TXERR); 1090 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 1091 } 1092 } 1093 1094 /* 1095 * The ctcm statemachine for a channel. 1096 */ 1097 const fsm_node ch_fsm[] = { 1098 { CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop }, 1099 { CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start }, 1100 { CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1101 { CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop }, 1102 1103 { CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop }, 1104 { CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop }, 1105 { CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1106 { CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop }, 1107 { CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start }, 1108 1109 { CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1110 { CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop }, 1111 { CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, 1112 { CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr }, 1113 { CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1114 { CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1115 1116 { CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio }, 1117 { CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode }, 1118 { CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1119 { CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1120 1121 { CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1122 { CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop }, 1123 { CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, chx_firstio }, 1124 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1125 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1126 { CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode }, 1127 { CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1128 { CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1129 1130 { CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1131 { CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop }, 1132 { CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, chx_rxidle }, 1133 { CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr }, 1134 { CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr }, 1135 { CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr }, 1136 { CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail }, 1137 { CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1138 { CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, chx_firstio }, 1139 { CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1140 1141 { CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, 1142 { CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop }, 1143 { CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, chx_rx }, 1144 { CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc }, 1145 { CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1146 { CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1147 { CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, chx_rx }, 1148 1149 { CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1150 { CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop }, 1151 { CTC_STATE_TXINIT, CTC_EVENT_FINSTAT, ctcm_chx_txidle }, 1152 { CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr }, 1153 { CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr }, 1154 { CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr }, 1155 { CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1156 { CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1157 1158 { CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, 1159 { CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop }, 1160 { CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, chx_firstio }, 1161 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, 1162 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, 1163 { CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1164 { CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1165 1166 { CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop }, 1167 { CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart }, 1168 { CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped }, 1169 { CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, 1170 { CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, 1171 { CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1172 1173 { CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio }, 1174 { CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart }, 1175 { CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, 1176 { CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, 1177 { CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, 1178 { CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1179 1180 { CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio }, 1181 { CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop }, 1182 { CTC_STATE_TX, CTC_EVENT_FINSTAT, chx_txdone }, 1183 { CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_txretry }, 1184 { CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_txretry }, 1185 { CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry }, 1186 { CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1187 { CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1188 1189 { CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, 1190 { CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, 1191 { CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1192 { CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1193 }; 1194 1195 int ch_fsm_len = ARRAY_SIZE(ch_fsm); 1196 1197 /* 1198 * MPC actions for mpc channel statemachine 1199 * handling of MPC protocol requires extra 1200 * statemachine and actions which are prefixed ctcmpc_ . 1201 * The ctc_ch_states and ctc_ch_state_names, 1202 * ctc_ch_events and ctc_ch_event_names share the ctcm definitions 1203 * which are expanded by some elements. 1204 */ 1205 1206 /* 1207 * Actions for mpc channel statemachine. 1208 */ 1209 1210 /** 1211 * Normal data has been send. Free the corresponding 1212 * skb (it's in io_queue), reset dev->tbusy and 1213 * revert to idle state. 1214 * 1215 * fi An instance of a channel statemachine. 1216 * event The event, just happened. 1217 * arg Generic pointer, casted from channel * upon call. 1218 */ 1219 static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg) 1220 { 1221 struct channel *ch = arg; 1222 struct net_device *dev = ch->netdev; 1223 struct ctcm_priv *priv = dev->ml_priv; 1224 struct mpc_group *grp = priv->mpcg; 1225 struct sk_buff *skb; 1226 int first = 1; 1227 int i; 1228 __u32 data_space; 1229 unsigned long duration; 1230 struct sk_buff *peekskb; 1231 int rc; 1232 struct th_header *header; 1233 struct pdu *p_header; 1234 struct timespec done_stamp = current_kernel_time(); /* xtime */ 1235 1236 CTCM_PR_DEBUG("Enter %s: %s cp:%i\n", 1237 __func__, dev->name, smp_processor_id()); 1238 1239 duration = 1240 (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 + 1241 (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000; 1242 if (duration > ch->prof.tx_time) 1243 ch->prof.tx_time = duration; 1244 1245 if (ch->irb->scsw.cmd.count != 0) 1246 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, 1247 "%s(%s): TX not complete, remaining %d bytes", 1248 CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count); 1249 fsm_deltimer(&ch->timer); 1250 while ((skb = skb_dequeue(&ch->io_queue))) { 1251 priv->stats.tx_packets++; 1252 priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH; 1253 if (first) { 1254 priv->stats.tx_bytes += 2; 1255 first = 0; 1256 } 1257 atomic_dec(&skb->users); 1258 dev_kfree_skb_irq(skb); 1259 } 1260 spin_lock(&ch->collect_lock); 1261 clear_normalized_cda(&ch->ccw[4]); 1262 if ((ch->collect_len <= 0) || (grp->in_sweep != 0)) { 1263 spin_unlock(&ch->collect_lock); 1264 fsm_newstate(fi, CTC_STATE_TXIDLE); 1265 goto done; 1266 } 1267 1268 if (ctcm_checkalloc_buffer(ch)) { 1269 spin_unlock(&ch->collect_lock); 1270 goto done; 1271 } 1272 ch->trans_skb->data = ch->trans_skb_data; 1273 skb_reset_tail_pointer(ch->trans_skb); 1274 ch->trans_skb->len = 0; 1275 if (ch->prof.maxmulti < (ch->collect_len + TH_HEADER_LENGTH)) 1276 ch->prof.maxmulti = ch->collect_len + TH_HEADER_LENGTH; 1277 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue)) 1278 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue); 1279 i = 0; 1280 p_header = NULL; 1281 data_space = grp->group_max_buflen - TH_HEADER_LENGTH; 1282 1283 CTCM_PR_DBGDATA("%s: building trans_skb from collect_q" 1284 " data_space:%04x\n", 1285 __func__, data_space); 1286 1287 while ((skb = skb_dequeue(&ch->collect_queue))) { 1288 memcpy(skb_put(ch->trans_skb, skb->len), skb->data, skb->len); 1289 p_header = (struct pdu *) 1290 (skb_tail_pointer(ch->trans_skb) - skb->len); 1291 p_header->pdu_flag = 0x00; 1292 if (skb->protocol == ntohs(ETH_P_SNAP)) 1293 p_header->pdu_flag |= 0x60; 1294 else 1295 p_header->pdu_flag |= 0x20; 1296 1297 CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n", 1298 __func__, ch->trans_skb->len); 1299 CTCM_PR_DBGDATA("%s: pdu header and data for up" 1300 " to 32 bytes sent to vtam\n", __func__); 1301 CTCM_D3_DUMP((char *)p_header, min_t(int, skb->len, 32)); 1302 1303 ch->collect_len -= skb->len; 1304 data_space -= skb->len; 1305 priv->stats.tx_packets++; 1306 priv->stats.tx_bytes += skb->len; 1307 atomic_dec(&skb->users); 1308 dev_kfree_skb_any(skb); 1309 peekskb = skb_peek(&ch->collect_queue); 1310 if (peekskb->len > data_space) 1311 break; 1312 i++; 1313 } 1314 /* p_header points to the last one we handled */ 1315 if (p_header) 1316 p_header->pdu_flag |= PDU_LAST; /*Say it's the last one*/ 1317 header = kzalloc(TH_HEADER_LENGTH, gfp_type()); 1318 if (!header) { 1319 spin_unlock(&ch->collect_lock); 1320 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); 1321 goto done; 1322 } 1323 header->th_ch_flag = TH_HAS_PDU; /* Normal data */ 1324 ch->th_seq_num++; 1325 header->th_seq_num = ch->th_seq_num; 1326 1327 CTCM_PR_DBGDATA("%s: ToVTAM_th_seq= %08x\n" , 1328 __func__, ch->th_seq_num); 1329 1330 memcpy(skb_push(ch->trans_skb, TH_HEADER_LENGTH), header, 1331 TH_HEADER_LENGTH); /* put the TH on the packet */ 1332 1333 kfree(header); 1334 1335 CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n", 1336 __func__, ch->trans_skb->len); 1337 CTCM_PR_DBGDATA("%s: up-to-50 bytes of trans_skb " 1338 "data to vtam from collect_q\n", __func__); 1339 CTCM_D3_DUMP((char *)ch->trans_skb->data, 1340 min_t(int, ch->trans_skb->len, 50)); 1341 1342 spin_unlock(&ch->collect_lock); 1343 clear_normalized_cda(&ch->ccw[1]); 1344 1345 CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n", 1346 (void *)(unsigned long)ch->ccw[1].cda, 1347 ch->trans_skb->data); 1348 ch->ccw[1].count = ch->max_bufsize; 1349 1350 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) { 1351 dev_kfree_skb_any(ch->trans_skb); 1352 ch->trans_skb = NULL; 1353 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR, 1354 "%s: %s: IDAL alloc failed", 1355 CTCM_FUNTAIL, ch->id); 1356 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); 1357 return; 1358 } 1359 1360 CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n", 1361 (void *)(unsigned long)ch->ccw[1].cda, 1362 ch->trans_skb->data); 1363 1364 ch->ccw[1].count = ch->trans_skb->len; 1365 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 1366 ch->prof.send_stamp = current_kernel_time(); /* xtime */ 1367 if (do_debug_ccw) 1368 ctcmpc_dumpit((char *)&ch->ccw[0], sizeof(struct ccw1) * 3); 1369 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 1370 (unsigned long)ch, 0xff, 0); 1371 ch->prof.doios_multi++; 1372 if (rc != 0) { 1373 priv->stats.tx_dropped += i; 1374 priv->stats.tx_errors += i; 1375 fsm_deltimer(&ch->timer); 1376 ctcm_ccw_check_rc(ch, rc, "chained TX"); 1377 } 1378 done: 1379 ctcm_clear_busy(dev); 1380 return; 1381 } 1382 1383 /** 1384 * Got normal data, check for sanity, queue it up, allocate new buffer 1385 * trigger bottom half, and initiate next read. 1386 * 1387 * fi An instance of a channel statemachine. 1388 * event The event, just happened. 1389 * arg Generic pointer, casted from channel * upon call. 1390 */ 1391 static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg) 1392 { 1393 struct channel *ch = arg; 1394 struct net_device *dev = ch->netdev; 1395 struct ctcm_priv *priv = dev->ml_priv; 1396 struct mpc_group *grp = priv->mpcg; 1397 struct sk_buff *skb = ch->trans_skb; 1398 struct sk_buff *new_skb; 1399 unsigned long saveflags = 0; /* avoids compiler warning */ 1400 int len = ch->max_bufsize - ch->irb->scsw.cmd.count; 1401 1402 CTCM_PR_DEBUG("%s: %s: cp:%i %s maxbuf : %04x, len: %04x\n", 1403 CTCM_FUNTAIL, dev->name, smp_processor_id(), 1404 ch->id, ch->max_bufsize, len); 1405 fsm_deltimer(&ch->timer); 1406 1407 if (skb == NULL) { 1408 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, 1409 "%s(%s): TRANS_SKB = NULL", 1410 CTCM_FUNTAIL, dev->name); 1411 goto again; 1412 } 1413 1414 if (len < TH_HEADER_LENGTH) { 1415 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, 1416 "%s(%s): packet length %d to short", 1417 CTCM_FUNTAIL, dev->name, len); 1418 priv->stats.rx_dropped++; 1419 priv->stats.rx_length_errors++; 1420 } else { 1421 /* must have valid th header or game over */ 1422 __u32 block_len = len; 1423 len = TH_HEADER_LENGTH + XID2_LENGTH + 4; 1424 new_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC); 1425 1426 if (new_skb == NULL) { 1427 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, 1428 "%s(%d): skb allocation failed", 1429 CTCM_FUNTAIL, dev->name); 1430 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); 1431 goto again; 1432 } 1433 switch (fsm_getstate(grp->fsm)) { 1434 case MPCG_STATE_RESET: 1435 case MPCG_STATE_INOP: 1436 dev_kfree_skb_any(new_skb); 1437 break; 1438 case MPCG_STATE_FLOWC: 1439 case MPCG_STATE_READY: 1440 memcpy(skb_put(new_skb, block_len), 1441 skb->data, block_len); 1442 skb_queue_tail(&ch->io_queue, new_skb); 1443 tasklet_schedule(&ch->ch_tasklet); 1444 break; 1445 default: 1446 memcpy(skb_put(new_skb, len), skb->data, len); 1447 skb_queue_tail(&ch->io_queue, new_skb); 1448 tasklet_hi_schedule(&ch->ch_tasklet); 1449 break; 1450 } 1451 } 1452 1453 again: 1454 switch (fsm_getstate(grp->fsm)) { 1455 int rc, dolock; 1456 case MPCG_STATE_FLOWC: 1457 case MPCG_STATE_READY: 1458 if (ctcm_checkalloc_buffer(ch)) 1459 break; 1460 ch->trans_skb->data = ch->trans_skb_data; 1461 skb_reset_tail_pointer(ch->trans_skb); 1462 ch->trans_skb->len = 0; 1463 ch->ccw[1].count = ch->max_bufsize; 1464 if (do_debug_ccw) 1465 ctcmpc_dumpit((char *)&ch->ccw[0], 1466 sizeof(struct ccw1) * 3); 1467 dolock = !in_irq(); 1468 if (dolock) 1469 spin_lock_irqsave( 1470 get_ccwdev_lock(ch->cdev), saveflags); 1471 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 1472 (unsigned long)ch, 0xff, 0); 1473 if (dolock) /* see remark about conditional locking */ 1474 spin_unlock_irqrestore( 1475 get_ccwdev_lock(ch->cdev), saveflags); 1476 if (rc != 0) 1477 ctcm_ccw_check_rc(ch, rc, "normal RX"); 1478 default: 1479 break; 1480 } 1481 1482 CTCM_PR_DEBUG("Exit %s: %s, ch=0x%p, id=%s\n", 1483 __func__, dev->name, ch, ch->id); 1484 1485 } 1486 1487 /** 1488 * Initialize connection by sending a __u16 of value 0. 1489 * 1490 * fi An instance of a channel statemachine. 1491 * event The event, just happened. 1492 * arg Generic pointer, casted from channel * upon call. 1493 */ 1494 static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg) 1495 { 1496 struct channel *ch = arg; 1497 struct net_device *dev = ch->netdev; 1498 struct ctcm_priv *priv = dev->ml_priv; 1499 struct mpc_group *gptr = priv->mpcg; 1500 1501 CTCM_PR_DEBUG("Enter %s: id=%s, ch=0x%p\n", 1502 __func__, ch->id, ch); 1503 1504 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_INFO, 1505 "%s: %s: chstate:%i, grpstate:%i, prot:%i\n", 1506 CTCM_FUNTAIL, ch->id, fsm_getstate(fi), 1507 fsm_getstate(gptr->fsm), ch->protocol); 1508 1509 if (fsm_getstate(fi) == CTC_STATE_TXIDLE) 1510 MPC_DBF_DEV_NAME(TRACE, dev, "remote side issued READ? "); 1511 1512 fsm_deltimer(&ch->timer); 1513 if (ctcm_checkalloc_buffer(ch)) 1514 goto done; 1515 1516 switch (fsm_getstate(fi)) { 1517 case CTC_STATE_STARTRETRY: 1518 case CTC_STATE_SETUPWAIT: 1519 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { 1520 ctcmpc_chx_rxidle(fi, event, arg); 1521 } else { 1522 fsm_newstate(fi, CTC_STATE_TXIDLE); 1523 fsm_event(priv->fsm, DEV_EVENT_TXUP, dev); 1524 } 1525 goto done; 1526 default: 1527 break; 1528 }; 1529 1530 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) 1531 ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); 1532 1533 done: 1534 CTCM_PR_DEBUG("Exit %s: id=%s, ch=0x%p\n", 1535 __func__, ch->id, ch); 1536 return; 1537 } 1538 1539 /** 1540 * Got initial data, check it. If OK, 1541 * notify device statemachine that we are up and 1542 * running. 1543 * 1544 * fi An instance of a channel statemachine. 1545 * event The event, just happened. 1546 * arg Generic pointer, casted from channel * upon call. 1547 */ 1548 void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg) 1549 { 1550 struct channel *ch = arg; 1551 struct net_device *dev = ch->netdev; 1552 struct ctcm_priv *priv = dev->ml_priv; 1553 struct mpc_group *grp = priv->mpcg; 1554 int rc; 1555 unsigned long saveflags = 0; /* avoids compiler warning */ 1556 1557 fsm_deltimer(&ch->timer); 1558 CTCM_PR_DEBUG("%s: %s: %s: cp:%i, chstate:%i grpstate:%i\n", 1559 __func__, ch->id, dev->name, smp_processor_id(), 1560 fsm_getstate(fi), fsm_getstate(grp->fsm)); 1561 1562 fsm_newstate(fi, CTC_STATE_RXIDLE); 1563 /* XID processing complete */ 1564 1565 switch (fsm_getstate(grp->fsm)) { 1566 case MPCG_STATE_FLOWC: 1567 case MPCG_STATE_READY: 1568 if (ctcm_checkalloc_buffer(ch)) 1569 goto done; 1570 ch->trans_skb->data = ch->trans_skb_data; 1571 skb_reset_tail_pointer(ch->trans_skb); 1572 ch->trans_skb->len = 0; 1573 ch->ccw[1].count = ch->max_bufsize; 1574 CTCM_CCW_DUMP((char *)&ch->ccw[0], sizeof(struct ccw1) * 3); 1575 if (event == CTC_EVENT_START) 1576 /* see remark about conditional locking */ 1577 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 1578 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 1579 (unsigned long)ch, 0xff, 0); 1580 if (event == CTC_EVENT_START) 1581 spin_unlock_irqrestore( 1582 get_ccwdev_lock(ch->cdev), saveflags); 1583 if (rc != 0) { 1584 fsm_newstate(fi, CTC_STATE_RXINIT); 1585 ctcm_ccw_check_rc(ch, rc, "initial RX"); 1586 goto done; 1587 } 1588 break; 1589 default: 1590 break; 1591 } 1592 1593 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); 1594 done: 1595 return; 1596 } 1597 1598 /* 1599 * ctcmpc channel FSM action 1600 * called from several points in ctcmpc_ch_fsm 1601 * ctcmpc only 1602 */ 1603 static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg) 1604 { 1605 struct channel *ch = arg; 1606 struct net_device *dev = ch->netdev; 1607 struct ctcm_priv *priv = dev->ml_priv; 1608 struct mpc_group *grp = priv->mpcg; 1609 1610 CTCM_PR_DEBUG("%s(%s): %s(ch=0x%p), cp=%i, ChStat:%s, GrpStat:%s\n", 1611 __func__, dev->name, ch->id, ch, smp_processor_id(), 1612 fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm)); 1613 1614 switch (fsm_getstate(grp->fsm)) { 1615 case MPCG_STATE_XID2INITW: 1616 /* ok..start yside xid exchanges */ 1617 if (!ch->in_mpcgroup) 1618 break; 1619 if (fsm_getstate(ch->fsm) == CH_XID0_PENDING) { 1620 fsm_deltimer(&grp->timer); 1621 fsm_addtimer(&grp->timer, 1622 MPC_XID_TIMEOUT_VALUE, 1623 MPCG_EVENT_TIMER, dev); 1624 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); 1625 1626 } else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1) 1627 /* attn rcvd before xid0 processed via bh */ 1628 fsm_newstate(ch->fsm, CH_XID7_PENDING1); 1629 break; 1630 case MPCG_STATE_XID2INITX: 1631 case MPCG_STATE_XID0IOWAIT: 1632 case MPCG_STATE_XID0IOWAIX: 1633 /* attn rcvd before xid0 processed on ch 1634 but mid-xid0 processing for group */ 1635 if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1) 1636 fsm_newstate(ch->fsm, CH_XID7_PENDING1); 1637 break; 1638 case MPCG_STATE_XID7INITW: 1639 case MPCG_STATE_XID7INITX: 1640 case MPCG_STATE_XID7INITI: 1641 case MPCG_STATE_XID7INITZ: 1642 switch (fsm_getstate(ch->fsm)) { 1643 case CH_XID7_PENDING: 1644 fsm_newstate(ch->fsm, CH_XID7_PENDING1); 1645 break; 1646 case CH_XID7_PENDING2: 1647 fsm_newstate(ch->fsm, CH_XID7_PENDING3); 1648 break; 1649 } 1650 fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev); 1651 break; 1652 } 1653 1654 return; 1655 } 1656 1657 /* 1658 * ctcmpc channel FSM action 1659 * called from one point in ctcmpc_ch_fsm 1660 * ctcmpc only 1661 */ 1662 static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg) 1663 { 1664 struct channel *ch = arg; 1665 struct net_device *dev = ch->netdev; 1666 struct ctcm_priv *priv = dev->ml_priv; 1667 struct mpc_group *grp = priv->mpcg; 1668 1669 CTCM_PR_DEBUG("%s(%s): %s\n ChState:%s GrpState:%s\n", 1670 __func__, dev->name, ch->id, 1671 fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm)); 1672 1673 fsm_deltimer(&ch->timer); 1674 1675 switch (fsm_getstate(grp->fsm)) { 1676 case MPCG_STATE_XID0IOWAIT: 1677 /* vtam wants to be primary.start yside xid exchanges*/ 1678 /* only receive one attn-busy at a time so must not */ 1679 /* change state each time */ 1680 grp->changed_side = 1; 1681 fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW); 1682 break; 1683 case MPCG_STATE_XID2INITW: 1684 if (grp->changed_side == 1) { 1685 grp->changed_side = 2; 1686 break; 1687 } 1688 /* process began via call to establish_conn */ 1689 /* so must report failure instead of reverting */ 1690 /* back to ready-for-xid passive state */ 1691 if (grp->estconnfunc) 1692 goto done; 1693 /* this attnbusy is NOT the result of xside xid */ 1694 /* collisions so yside must have been triggered */ 1695 /* by an ATTN that was not intended to start XID */ 1696 /* processing. Revert back to ready-for-xid and */ 1697 /* wait for ATTN interrupt to signal xid start */ 1698 if (fsm_getstate(ch->fsm) == CH_XID0_INPROGRESS) { 1699 fsm_newstate(ch->fsm, CH_XID0_PENDING) ; 1700 fsm_deltimer(&grp->timer); 1701 goto done; 1702 } 1703 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); 1704 goto done; 1705 case MPCG_STATE_XID2INITX: 1706 /* XID2 was received before ATTN Busy for second 1707 channel.Send yside xid for second channel. 1708 */ 1709 if (grp->changed_side == 1) { 1710 grp->changed_side = 2; 1711 break; 1712 } 1713 case MPCG_STATE_XID0IOWAIX: 1714 case MPCG_STATE_XID7INITW: 1715 case MPCG_STATE_XID7INITX: 1716 case MPCG_STATE_XID7INITI: 1717 case MPCG_STATE_XID7INITZ: 1718 default: 1719 /* multiple attn-busy indicates too out-of-sync */ 1720 /* and they are certainly not being received as part */ 1721 /* of valid mpc group negotiations.. */ 1722 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); 1723 goto done; 1724 } 1725 1726 if (grp->changed_side == 1) { 1727 fsm_deltimer(&grp->timer); 1728 fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE, 1729 MPCG_EVENT_TIMER, dev); 1730 } 1731 if (ch->in_mpcgroup) 1732 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); 1733 else 1734 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, 1735 "%s(%s): channel %s not added to group", 1736 CTCM_FUNTAIL, dev->name, ch->id); 1737 1738 done: 1739 return; 1740 } 1741 1742 /* 1743 * ctcmpc channel FSM action 1744 * called from several points in ctcmpc_ch_fsm 1745 * ctcmpc only 1746 */ 1747 static void ctcmpc_chx_resend(fsm_instance *fsm, int event, void *arg) 1748 { 1749 struct channel *ch = arg; 1750 struct net_device *dev = ch->netdev; 1751 struct ctcm_priv *priv = dev->ml_priv; 1752 struct mpc_group *grp = priv->mpcg; 1753 1754 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); 1755 return; 1756 } 1757 1758 /* 1759 * ctcmpc channel FSM action 1760 * called from several points in ctcmpc_ch_fsm 1761 * ctcmpc only 1762 */ 1763 static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg) 1764 { 1765 struct channel *ach = arg; 1766 struct net_device *dev = ach->netdev; 1767 struct ctcm_priv *priv = dev->ml_priv; 1768 struct mpc_group *grp = priv->mpcg; 1769 struct channel *wch = priv->channel[CTCM_WRITE]; 1770 struct channel *rch = priv->channel[CTCM_READ]; 1771 struct sk_buff *skb; 1772 struct th_sweep *header; 1773 int rc = 0; 1774 unsigned long saveflags = 0; 1775 1776 CTCM_PR_DEBUG("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n", 1777 __func__, smp_processor_id(), ach, ach->id); 1778 1779 if (grp->in_sweep == 0) 1780 goto done; 1781 1782 CTCM_PR_DBGDATA("%s: 1: ToVTAM_th_seq= %08x\n" , 1783 __func__, wch->th_seq_num); 1784 CTCM_PR_DBGDATA("%s: 1: FromVTAM_th_seq= %08x\n" , 1785 __func__, rch->th_seq_num); 1786 1787 if (fsm_getstate(wch->fsm) != CTC_STATE_TXIDLE) { 1788 /* give the previous IO time to complete */ 1789 fsm_addtimer(&wch->sweep_timer, 1790 200, CTC_EVENT_RSWEEP_TIMER, wch); 1791 goto done; 1792 } 1793 1794 skb = skb_dequeue(&wch->sweep_queue); 1795 if (!skb) 1796 goto done; 1797 1798 if (set_normalized_cda(&wch->ccw[4], skb->data)) { 1799 grp->in_sweep = 0; 1800 ctcm_clear_busy_do(dev); 1801 dev_kfree_skb_any(skb); 1802 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); 1803 goto done; 1804 } else { 1805 atomic_inc(&skb->users); 1806 skb_queue_tail(&wch->io_queue, skb); 1807 } 1808 1809 /* send out the sweep */ 1810 wch->ccw[4].count = skb->len; 1811 1812 header = (struct th_sweep *)skb->data; 1813 switch (header->th.th_ch_flag) { 1814 case TH_SWEEP_REQ: 1815 grp->sweep_req_pend_num--; 1816 break; 1817 case TH_SWEEP_RESP: 1818 grp->sweep_rsp_pend_num--; 1819 break; 1820 } 1821 1822 header->sw.th_last_seq = wch->th_seq_num; 1823 1824 CTCM_CCW_DUMP((char *)&wch->ccw[3], sizeof(struct ccw1) * 3); 1825 CTCM_PR_DBGDATA("%s: sweep packet\n", __func__); 1826 CTCM_D3_DUMP((char *)header, TH_SWEEP_LENGTH); 1827 1828 fsm_addtimer(&wch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, wch); 1829 fsm_newstate(wch->fsm, CTC_STATE_TX); 1830 1831 spin_lock_irqsave(get_ccwdev_lock(wch->cdev), saveflags); 1832 wch->prof.send_stamp = current_kernel_time(); /* xtime */ 1833 rc = ccw_device_start(wch->cdev, &wch->ccw[3], 1834 (unsigned long) wch, 0xff, 0); 1835 spin_unlock_irqrestore(get_ccwdev_lock(wch->cdev), saveflags); 1836 1837 if ((grp->sweep_req_pend_num == 0) && 1838 (grp->sweep_rsp_pend_num == 0)) { 1839 grp->in_sweep = 0; 1840 rch->th_seq_num = 0x00; 1841 wch->th_seq_num = 0x00; 1842 ctcm_clear_busy_do(dev); 1843 } 1844 1845 CTCM_PR_DBGDATA("%s: To-/From-VTAM_th_seq = %08x/%08x\n" , 1846 __func__, wch->th_seq_num, rch->th_seq_num); 1847 1848 if (rc != 0) 1849 ctcm_ccw_check_rc(wch, rc, "send sweep"); 1850 1851 done: 1852 return; 1853 } 1854 1855 1856 /* 1857 * The ctcmpc statemachine for a channel. 1858 */ 1859 1860 const fsm_node ctcmpc_ch_fsm[] = { 1861 { CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop }, 1862 { CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start }, 1863 { CTC_STATE_STOPPED, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1864 { CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1865 { CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop }, 1866 1867 { CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop }, 1868 { CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop }, 1869 { CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1870 { CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop }, 1871 { CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start }, 1872 { CTC_STATE_NOTOP, CTC_EVENT_UC_RCRESET, ctcm_chx_stop }, 1873 { CTC_STATE_NOTOP, CTC_EVENT_UC_RSRESET, ctcm_chx_stop }, 1874 { CTC_STATE_NOTOP, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1875 1876 { CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1877 { CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop }, 1878 { CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, 1879 { CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr }, 1880 { CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1881 { CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1882 1883 { CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio }, 1884 { CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode }, 1885 { CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, 1886 { CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1887 { CTC_STATE_STARTRETRY, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1888 1889 { CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1890 { CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop }, 1891 { CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio }, 1892 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1893 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1894 { CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode }, 1895 { CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1896 { CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1897 1898 { CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1899 { CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop }, 1900 { CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, ctcmpc_chx_rxidle }, 1901 { CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr }, 1902 { CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr }, 1903 { CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr }, 1904 { CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail }, 1905 { CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1906 { CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, ctcmpc_chx_firstio }, 1907 { CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1908 1909 { CH_XID0_PENDING, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1910 { CH_XID0_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1911 { CH_XID0_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio }, 1912 { CH_XID0_PENDING, CTC_EVENT_START, ctcm_action_nop }, 1913 { CH_XID0_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1914 { CH_XID0_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1915 { CH_XID0_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1916 { CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1917 { CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1918 { CH_XID0_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1919 1920 { CH_XID0_INPROGRESS, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1921 { CH_XID0_INPROGRESS, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1922 { CH_XID0_INPROGRESS, CTC_EVENT_STOP, ctcm_chx_haltio }, 1923 { CH_XID0_INPROGRESS, CTC_EVENT_START, ctcm_action_nop }, 1924 { CH_XID0_INPROGRESS, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1925 { CH_XID0_INPROGRESS, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1926 { CH_XID0_INPROGRESS, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1927 { CH_XID0_INPROGRESS, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1928 { CH_XID0_INPROGRESS, CTC_EVENT_ATTNBUSY, ctcmpc_chx_attnbusy }, 1929 { CH_XID0_INPROGRESS, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1930 { CH_XID0_INPROGRESS, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1931 1932 { CH_XID7_PENDING, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1933 { CH_XID7_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1934 { CH_XID7_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio }, 1935 { CH_XID7_PENDING, CTC_EVENT_START, ctcm_action_nop }, 1936 { CH_XID7_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1937 { CH_XID7_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1938 { CH_XID7_PENDING, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1939 { CH_XID7_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1940 { CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1941 { CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1942 { CH_XID7_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1943 { CH_XID7_PENDING, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1944 { CH_XID7_PENDING, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1945 1946 { CH_XID7_PENDING1, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1947 { CH_XID7_PENDING1, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1948 { CH_XID7_PENDING1, CTC_EVENT_STOP, ctcm_chx_haltio }, 1949 { CH_XID7_PENDING1, CTC_EVENT_START, ctcm_action_nop }, 1950 { CH_XID7_PENDING1, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1951 { CH_XID7_PENDING1, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1952 { CH_XID7_PENDING1, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1953 { CH_XID7_PENDING1, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1954 { CH_XID7_PENDING1, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1955 { CH_XID7_PENDING1, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1956 { CH_XID7_PENDING1, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1957 { CH_XID7_PENDING1, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1958 1959 { CH_XID7_PENDING2, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1960 { CH_XID7_PENDING2, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1961 { CH_XID7_PENDING2, CTC_EVENT_STOP, ctcm_chx_haltio }, 1962 { CH_XID7_PENDING2, CTC_EVENT_START, ctcm_action_nop }, 1963 { CH_XID7_PENDING2, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1964 { CH_XID7_PENDING2, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1965 { CH_XID7_PENDING2, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1966 { CH_XID7_PENDING2, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1967 { CH_XID7_PENDING2, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1968 { CH_XID7_PENDING2, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1969 { CH_XID7_PENDING2, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1970 { CH_XID7_PENDING2, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1971 1972 { CH_XID7_PENDING3, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1973 { CH_XID7_PENDING3, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1974 { CH_XID7_PENDING3, CTC_EVENT_STOP, ctcm_chx_haltio }, 1975 { CH_XID7_PENDING3, CTC_EVENT_START, ctcm_action_nop }, 1976 { CH_XID7_PENDING3, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1977 { CH_XID7_PENDING3, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1978 { CH_XID7_PENDING3, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1979 { CH_XID7_PENDING3, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1980 { CH_XID7_PENDING3, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1981 { CH_XID7_PENDING3, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1982 { CH_XID7_PENDING3, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1983 { CH_XID7_PENDING3, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1984 1985 { CH_XID7_PENDING4, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1986 { CH_XID7_PENDING4, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1987 { CH_XID7_PENDING4, CTC_EVENT_STOP, ctcm_chx_haltio }, 1988 { CH_XID7_PENDING4, CTC_EVENT_START, ctcm_action_nop }, 1989 { CH_XID7_PENDING4, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1990 { CH_XID7_PENDING4, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1991 { CH_XID7_PENDING4, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1992 { CH_XID7_PENDING4, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1993 { CH_XID7_PENDING4, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1994 { CH_XID7_PENDING4, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1995 { CH_XID7_PENDING4, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1996 { CH_XID7_PENDING4, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1997 1998 { CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, 1999 { CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop }, 2000 { CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 2001 { CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc }, 2002 { CTC_STATE_RXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail }, 2003 { CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2004 { CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2005 { CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 2006 2007 { CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 2008 { CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop }, 2009 { CTC_STATE_TXINIT, CTC_EVENT_FINSTAT, ctcm_chx_txidle }, 2010 { CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr }, 2011 { CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr }, 2012 { CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr }, 2013 { CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2014 { CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2015 { CTC_STATE_TXINIT, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep }, 2016 2017 { CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, 2018 { CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop }, 2019 { CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio }, 2020 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_fail }, 2021 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail }, 2022 { CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2023 { CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2024 { CTC_STATE_TXIDLE, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep }, 2025 2026 { CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop }, 2027 { CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart }, 2028 { CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped }, 2029 { CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, 2030 { CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, 2031 { CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2032 { CTC_STATE_TERM, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 2033 { CTC_STATE_TERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2034 2035 { CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio }, 2036 { CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart }, 2037 { CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, 2038 { CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, 2039 { CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, 2040 { CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2041 { CTC_STATE_DTERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2042 2043 { CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio }, 2044 { CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop }, 2045 { CTC_STATE_TX, CTC_EVENT_FINSTAT, ctcmpc_chx_txdone }, 2046 { CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_fail }, 2047 { CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_fail }, 2048 { CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry }, 2049 { CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2050 { CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2051 { CTC_STATE_TX, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep }, 2052 { CTC_STATE_TX, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 2053 2054 { CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, 2055 { CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, 2056 { CTC_STATE_TXERR, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2057 { CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2058 { CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2059 }; 2060 2061 int mpc_ch_fsm_len = ARRAY_SIZE(ctcmpc_ch_fsm); 2062 2063 /* 2064 * Actions for interface - statemachine. 2065 */ 2066 2067 /** 2068 * Startup channels by sending CTC_EVENT_START to each channel. 2069 * 2070 * fi An instance of an interface statemachine. 2071 * event The event, just happened. 2072 * arg Generic pointer, casted from struct net_device * upon call. 2073 */ 2074 static void dev_action_start(fsm_instance *fi, int event, void *arg) 2075 { 2076 struct net_device *dev = arg; 2077 struct ctcm_priv *priv = dev->ml_priv; 2078 int direction; 2079 2080 CTCMY_DBF_DEV_NAME(SETUP, dev, ""); 2081 2082 fsm_deltimer(&priv->restart_timer); 2083 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); 2084 if (IS_MPC(priv)) 2085 priv->mpcg->channels_terminating = 0; 2086 for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) { 2087 struct channel *ch = priv->channel[direction]; 2088 fsm_event(ch->fsm, CTC_EVENT_START, ch); 2089 } 2090 } 2091 2092 /** 2093 * Shutdown channels by sending CTC_EVENT_STOP to each channel. 2094 * 2095 * fi An instance of an interface statemachine. 2096 * event The event, just happened. 2097 * arg Generic pointer, casted from struct net_device * upon call. 2098 */ 2099 static void dev_action_stop(fsm_instance *fi, int event, void *arg) 2100 { 2101 int direction; 2102 struct net_device *dev = arg; 2103 struct ctcm_priv *priv = dev->ml_priv; 2104 2105 CTCMY_DBF_DEV_NAME(SETUP, dev, ""); 2106 2107 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); 2108 for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) { 2109 struct channel *ch = priv->channel[direction]; 2110 fsm_event(ch->fsm, CTC_EVENT_STOP, ch); 2111 ch->th_seq_num = 0x00; 2112 CTCM_PR_DEBUG("%s: CH_th_seq= %08x\n", 2113 __func__, ch->th_seq_num); 2114 } 2115 if (IS_MPC(priv)) 2116 fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET); 2117 } 2118 2119 static void dev_action_restart(fsm_instance *fi, int event, void *arg) 2120 { 2121 int restart_timer; 2122 struct net_device *dev = arg; 2123 struct ctcm_priv *priv = dev->ml_priv; 2124 2125 CTCMY_DBF_DEV_NAME(TRACE, dev, ""); 2126 2127 if (IS_MPC(priv)) { 2128 restart_timer = CTCM_TIME_1_SEC; 2129 } else { 2130 restart_timer = CTCM_TIME_5_SEC; 2131 } 2132 dev_info(&dev->dev, "Restarting device\n"); 2133 2134 dev_action_stop(fi, event, arg); 2135 fsm_event(priv->fsm, DEV_EVENT_STOP, dev); 2136 if (IS_MPC(priv)) 2137 fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET); 2138 2139 /* going back into start sequence too quickly can */ 2140 /* result in the other side becoming unreachable due */ 2141 /* to sense reported when IO is aborted */ 2142 fsm_addtimer(&priv->restart_timer, restart_timer, 2143 DEV_EVENT_START, dev); 2144 } 2145 2146 /** 2147 * Called from channel statemachine 2148 * when a channel is up and running. 2149 * 2150 * fi An instance of an interface statemachine. 2151 * event The event, just happened. 2152 * arg Generic pointer, casted from struct net_device * upon call. 2153 */ 2154 static void dev_action_chup(fsm_instance *fi, int event, void *arg) 2155 { 2156 struct net_device *dev = arg; 2157 struct ctcm_priv *priv = dev->ml_priv; 2158 int dev_stat = fsm_getstate(fi); 2159 2160 CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE, 2161 "%s(%s): priv = %p [%d,%d]\n ", CTCM_FUNTAIL, 2162 dev->name, dev->ml_priv, dev_stat, event); 2163 2164 switch (fsm_getstate(fi)) { 2165 case DEV_STATE_STARTWAIT_RXTX: 2166 if (event == DEV_EVENT_RXUP) 2167 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX); 2168 else 2169 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX); 2170 break; 2171 case DEV_STATE_STARTWAIT_RX: 2172 if (event == DEV_EVENT_RXUP) { 2173 fsm_newstate(fi, DEV_STATE_RUNNING); 2174 dev_info(&dev->dev, 2175 "Connected with remote side\n"); 2176 ctcm_clear_busy(dev); 2177 } 2178 break; 2179 case DEV_STATE_STARTWAIT_TX: 2180 if (event == DEV_EVENT_TXUP) { 2181 fsm_newstate(fi, DEV_STATE_RUNNING); 2182 dev_info(&dev->dev, 2183 "Connected with remote side\n"); 2184 ctcm_clear_busy(dev); 2185 } 2186 break; 2187 case DEV_STATE_STOPWAIT_TX: 2188 if (event == DEV_EVENT_RXUP) 2189 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); 2190 break; 2191 case DEV_STATE_STOPWAIT_RX: 2192 if (event == DEV_EVENT_TXUP) 2193 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); 2194 break; 2195 } 2196 2197 if (IS_MPC(priv)) { 2198 if (event == DEV_EVENT_RXUP) 2199 mpc_channel_action(priv->channel[CTCM_READ], 2200 CTCM_READ, MPC_CHANNEL_ADD); 2201 else 2202 mpc_channel_action(priv->channel[CTCM_WRITE], 2203 CTCM_WRITE, MPC_CHANNEL_ADD); 2204 } 2205 } 2206 2207 /** 2208 * Called from device statemachine 2209 * when a channel has been shutdown. 2210 * 2211 * fi An instance of an interface statemachine. 2212 * event The event, just happened. 2213 * arg Generic pointer, casted from struct net_device * upon call. 2214 */ 2215 static void dev_action_chdown(fsm_instance *fi, int event, void *arg) 2216 { 2217 2218 struct net_device *dev = arg; 2219 struct ctcm_priv *priv = dev->ml_priv; 2220 2221 CTCMY_DBF_DEV_NAME(SETUP, dev, ""); 2222 2223 switch (fsm_getstate(fi)) { 2224 case DEV_STATE_RUNNING: 2225 if (event == DEV_EVENT_TXDOWN) 2226 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX); 2227 else 2228 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX); 2229 break; 2230 case DEV_STATE_STARTWAIT_RX: 2231 if (event == DEV_EVENT_TXDOWN) 2232 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); 2233 break; 2234 case DEV_STATE_STARTWAIT_TX: 2235 if (event == DEV_EVENT_RXDOWN) 2236 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); 2237 break; 2238 case DEV_STATE_STOPWAIT_RXTX: 2239 if (event == DEV_EVENT_TXDOWN) 2240 fsm_newstate(fi, DEV_STATE_STOPWAIT_RX); 2241 else 2242 fsm_newstate(fi, DEV_STATE_STOPWAIT_TX); 2243 break; 2244 case DEV_STATE_STOPWAIT_RX: 2245 if (event == DEV_EVENT_RXDOWN) 2246 fsm_newstate(fi, DEV_STATE_STOPPED); 2247 break; 2248 case DEV_STATE_STOPWAIT_TX: 2249 if (event == DEV_EVENT_TXDOWN) 2250 fsm_newstate(fi, DEV_STATE_STOPPED); 2251 break; 2252 } 2253 if (IS_MPC(priv)) { 2254 if (event == DEV_EVENT_RXDOWN) 2255 mpc_channel_action(priv->channel[CTCM_READ], 2256 CTCM_READ, MPC_CHANNEL_REMOVE); 2257 else 2258 mpc_channel_action(priv->channel[CTCM_WRITE], 2259 CTCM_WRITE, MPC_CHANNEL_REMOVE); 2260 } 2261 } 2262 2263 const fsm_node dev_fsm[] = { 2264 { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start }, 2265 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start }, 2266 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown }, 2267 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown }, 2268 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart }, 2269 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start }, 2270 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup }, 2271 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup }, 2272 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown }, 2273 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart }, 2274 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start }, 2275 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup }, 2276 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup }, 2277 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown }, 2278 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart }, 2279 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop }, 2280 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup }, 2281 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup }, 2282 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown }, 2283 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown }, 2284 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart }, 2285 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop }, 2286 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup }, 2287 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup }, 2288 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown }, 2289 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart }, 2290 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop }, 2291 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup }, 2292 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup }, 2293 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown }, 2294 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart }, 2295 { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop }, 2296 { DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown }, 2297 { DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown }, 2298 { DEV_STATE_RUNNING, DEV_EVENT_TXUP, ctcm_action_nop }, 2299 { DEV_STATE_RUNNING, DEV_EVENT_RXUP, ctcm_action_nop }, 2300 { DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart }, 2301 }; 2302 2303 int dev_fsm_len = ARRAY_SIZE(dev_fsm); 2304 2305 /* --- This is the END my friend --- */ 2306 2307