1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright IBM Corp. 2001, 2007 4 * Authors: Fritz Elfert (felfert@millenux.com) 5 * Peter Tiedemann (ptiedem@de.ibm.com) 6 * MPC additions : 7 * Belinda Thompson (belindat@us.ibm.com) 8 * Andy Richter (richtera@us.ibm.com) 9 */ 10 11 #undef DEBUG 12 #undef DEBUGDATA 13 #undef DEBUGCCW 14 15 #define KMSG_COMPONENT "ctcm" 16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 17 18 #include <linux/module.h> 19 #include <linux/init.h> 20 #include <linux/kernel.h> 21 #include <linux/slab.h> 22 #include <linux/errno.h> 23 #include <linux/types.h> 24 #include <linux/interrupt.h> 25 #include <linux/timer.h> 26 #include <linux/bitops.h> 27 28 #include <linux/signal.h> 29 #include <linux/string.h> 30 31 #include <linux/ip.h> 32 #include <linux/if_arp.h> 33 #include <linux/tcp.h> 34 #include <linux/skbuff.h> 35 #include <linux/ctype.h> 36 #include <net/dst.h> 37 38 #include <linux/io.h> 39 #include <asm/ccwdev.h> 40 #include <asm/ccwgroup.h> 41 #include <linux/uaccess.h> 42 43 #include <asm/idals.h> 44 45 #include "fsm.h" 46 47 #include "ctcm_dbug.h" 48 #include "ctcm_main.h" 49 #include "ctcm_fsms.h" 50 51 const char *dev_state_names[] = { 52 [DEV_STATE_STOPPED] = "Stopped", 53 [DEV_STATE_STARTWAIT_RXTX] = "StartWait RXTX", 54 [DEV_STATE_STARTWAIT_RX] = "StartWait RX", 55 [DEV_STATE_STARTWAIT_TX] = "StartWait TX", 56 [DEV_STATE_STOPWAIT_RXTX] = "StopWait RXTX", 57 [DEV_STATE_STOPWAIT_RX] = "StopWait RX", 58 [DEV_STATE_STOPWAIT_TX] = "StopWait TX", 59 [DEV_STATE_RUNNING] = "Running", 60 }; 61 62 const char *dev_event_names[] = { 63 [DEV_EVENT_START] = "Start", 64 [DEV_EVENT_STOP] = "Stop", 65 [DEV_EVENT_RXUP] = "RX up", 66 [DEV_EVENT_TXUP] = "TX up", 67 [DEV_EVENT_RXDOWN] = "RX down", 68 [DEV_EVENT_TXDOWN] = "TX down", 69 [DEV_EVENT_RESTART] = "Restart", 70 }; 71 72 const char *ctc_ch_event_names[] = { 73 [CTC_EVENT_IO_SUCCESS] = "ccw_device success", 74 [CTC_EVENT_IO_EBUSY] = "ccw_device busy", 75 [CTC_EVENT_IO_ENODEV] = "ccw_device enodev", 76 [CTC_EVENT_IO_UNKNOWN] = "ccw_device unknown", 77 [CTC_EVENT_ATTNBUSY] = "Status ATTN & BUSY", 78 [CTC_EVENT_ATTN] = "Status ATTN", 79 [CTC_EVENT_BUSY] = "Status BUSY", 80 [CTC_EVENT_UC_RCRESET] = "Unit check remote reset", 81 [CTC_EVENT_UC_RSRESET] = "Unit check remote system reset", 82 [CTC_EVENT_UC_TXTIMEOUT] = "Unit check TX timeout", 83 [CTC_EVENT_UC_TXPARITY] = "Unit check TX parity", 84 [CTC_EVENT_UC_HWFAIL] = "Unit check Hardware failure", 85 [CTC_EVENT_UC_RXPARITY] = "Unit check RX parity", 86 [CTC_EVENT_UC_ZERO] = "Unit check ZERO", 87 [CTC_EVENT_UC_UNKNOWN] = "Unit check Unknown", 88 [CTC_EVENT_SC_UNKNOWN] = "SubChannel check Unknown", 89 [CTC_EVENT_MC_FAIL] = "Machine check failure", 90 [CTC_EVENT_MC_GOOD] = "Machine check operational", 91 [CTC_EVENT_IRQ] = "IRQ normal", 92 [CTC_EVENT_FINSTAT] = "IRQ final", 93 [CTC_EVENT_TIMER] = "Timer", 94 [CTC_EVENT_START] = "Start", 95 [CTC_EVENT_STOP] = "Stop", 96 /* 97 * additional MPC events 98 */ 99 [CTC_EVENT_SEND_XID] = "XID Exchange", 100 [CTC_EVENT_RSWEEP_TIMER] = "MPC Group Sweep Timer", 101 }; 102 103 const char *ctc_ch_state_names[] = { 104 [CTC_STATE_IDLE] = "Idle", 105 [CTC_STATE_STOPPED] = "Stopped", 106 [CTC_STATE_STARTWAIT] = "StartWait", 107 [CTC_STATE_STARTRETRY] = "StartRetry", 108 [CTC_STATE_SETUPWAIT] = "SetupWait", 109 [CTC_STATE_RXINIT] = "RX init", 110 [CTC_STATE_TXINIT] = "TX init", 111 [CTC_STATE_RX] = "RX", 112 [CTC_STATE_TX] = "TX", 113 [CTC_STATE_RXIDLE] = "RX idle", 114 [CTC_STATE_TXIDLE] = "TX idle", 115 [CTC_STATE_RXERR] = "RX error", 116 [CTC_STATE_TXERR] = "TX error", 117 [CTC_STATE_TERM] = "Terminating", 118 [CTC_STATE_DTERM] = "Restarting", 119 [CTC_STATE_NOTOP] = "Not operational", 120 /* 121 * additional MPC states 122 */ 123 [CH_XID0_PENDING] = "Pending XID0 Start", 124 [CH_XID0_INPROGRESS] = "In XID0 Negotiations ", 125 [CH_XID7_PENDING] = "Pending XID7 P1 Start", 126 [CH_XID7_PENDING1] = "Active XID7 P1 Exchange ", 127 [CH_XID7_PENDING2] = "Pending XID7 P2 Start ", 128 [CH_XID7_PENDING3] = "Active XID7 P2 Exchange ", 129 [CH_XID7_PENDING4] = "XID7 Complete - Pending READY ", 130 }; 131 132 static void ctcm_action_nop(fsm_instance *fi, int event, void *arg); 133 134 /* 135 * ----- static ctcm actions for channel statemachine ----- 136 * 137 */ 138 static void chx_txdone(fsm_instance *fi, int event, void *arg); 139 static void chx_rx(fsm_instance *fi, int event, void *arg); 140 static void chx_rxidle(fsm_instance *fi, int event, void *arg); 141 static void chx_firstio(fsm_instance *fi, int event, void *arg); 142 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg); 143 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg); 144 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg); 145 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg); 146 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg); 147 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg); 148 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg); 149 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg); 150 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg); 151 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg); 152 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg); 153 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg); 154 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg); 155 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg); 156 157 /* 158 * ----- static ctcmpc actions for ctcmpc channel statemachine ----- 159 * 160 */ 161 static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg); 162 static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg); 163 static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg); 164 /* shared : 165 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg); 166 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg); 167 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg); 168 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg); 169 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg); 170 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg); 171 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg); 172 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg); 173 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg); 174 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg); 175 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg); 176 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg); 177 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg); 178 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg); 179 */ 180 static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg); 181 static void ctcmpc_chx_attnbusy(fsm_instance *, int, void *); 182 static void ctcmpc_chx_resend(fsm_instance *, int, void *); 183 static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg); 184 185 /** 186 * Check return code of a preceding ccw_device call, halt_IO etc... 187 * 188 * ch : The channel, the error belongs to. 189 * Returns the error code (!= 0) to inspect. 190 */ 191 void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg) 192 { 193 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 194 "%s(%s): %s: %04x\n", 195 CTCM_FUNTAIL, ch->id, msg, rc); 196 switch (rc) { 197 case -EBUSY: 198 pr_info("%s: The communication peer is busy\n", 199 ch->id); 200 fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch); 201 break; 202 case -ENODEV: 203 pr_err("%s: The specified target device is not valid\n", 204 ch->id); 205 fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch); 206 break; 207 default: 208 pr_err("An I/O operation resulted in error %04x\n", 209 rc); 210 fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch); 211 } 212 } 213 214 void ctcm_purge_skb_queue(struct sk_buff_head *q) 215 { 216 struct sk_buff *skb; 217 218 CTCM_DBF_TEXT(TRACE, CTC_DBF_DEBUG, __func__); 219 220 while ((skb = skb_dequeue(q))) { 221 refcount_dec(&skb->users); 222 dev_kfree_skb_any(skb); 223 } 224 } 225 226 /** 227 * NOP action for statemachines 228 */ 229 static void ctcm_action_nop(fsm_instance *fi, int event, void *arg) 230 { 231 } 232 233 /* 234 * Actions for channel - statemachines. 235 */ 236 237 /** 238 * Normal data has been send. Free the corresponding 239 * skb (it's in io_queue), reset dev->tbusy and 240 * revert to idle state. 241 * 242 * fi An instance of a channel statemachine. 243 * event The event, just happened. 244 * arg Generic pointer, casted from channel * upon call. 245 */ 246 static void chx_txdone(fsm_instance *fi, int event, void *arg) 247 { 248 struct channel *ch = arg; 249 struct net_device *dev = ch->netdev; 250 struct ctcm_priv *priv = dev->ml_priv; 251 struct sk_buff *skb; 252 int first = 1; 253 int i; 254 unsigned long duration; 255 unsigned long done_stamp = jiffies; 256 257 CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name); 258 259 duration = done_stamp - ch->prof.send_stamp; 260 if (duration > ch->prof.tx_time) 261 ch->prof.tx_time = duration; 262 263 if (ch->irb->scsw.cmd.count != 0) 264 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, 265 "%s(%s): TX not complete, remaining %d bytes", 266 CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count); 267 fsm_deltimer(&ch->timer); 268 while ((skb = skb_dequeue(&ch->io_queue))) { 269 priv->stats.tx_packets++; 270 priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; 271 if (first) { 272 priv->stats.tx_bytes += 2; 273 first = 0; 274 } 275 refcount_dec(&skb->users); 276 dev_kfree_skb_irq(skb); 277 } 278 spin_lock(&ch->collect_lock); 279 clear_normalized_cda(&ch->ccw[4]); 280 if (ch->collect_len > 0) { 281 int rc; 282 283 if (ctcm_checkalloc_buffer(ch)) { 284 spin_unlock(&ch->collect_lock); 285 return; 286 } 287 ch->trans_skb->data = ch->trans_skb_data; 288 skb_reset_tail_pointer(ch->trans_skb); 289 ch->trans_skb->len = 0; 290 if (ch->prof.maxmulti < (ch->collect_len + 2)) 291 ch->prof.maxmulti = ch->collect_len + 2; 292 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue)) 293 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue); 294 *((__u16 *)skb_put(ch->trans_skb, 2)) = ch->collect_len + 2; 295 i = 0; 296 while ((skb = skb_dequeue(&ch->collect_queue))) { 297 skb_copy_from_linear_data(skb, 298 skb_put(ch->trans_skb, skb->len), skb->len); 299 priv->stats.tx_packets++; 300 priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; 301 refcount_dec(&skb->users); 302 dev_kfree_skb_irq(skb); 303 i++; 304 } 305 ch->collect_len = 0; 306 spin_unlock(&ch->collect_lock); 307 ch->ccw[1].count = ch->trans_skb->len; 308 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 309 ch->prof.send_stamp = jiffies; 310 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); 311 ch->prof.doios_multi++; 312 if (rc != 0) { 313 priv->stats.tx_dropped += i; 314 priv->stats.tx_errors += i; 315 fsm_deltimer(&ch->timer); 316 ctcm_ccw_check_rc(ch, rc, "chained TX"); 317 } 318 } else { 319 spin_unlock(&ch->collect_lock); 320 fsm_newstate(fi, CTC_STATE_TXIDLE); 321 } 322 ctcm_clear_busy_do(dev); 323 } 324 325 /** 326 * Initial data is sent. 327 * Notify device statemachine that we are up and 328 * running. 329 * 330 * fi An instance of a channel statemachine. 331 * event The event, just happened. 332 * arg Generic pointer, casted from channel * upon call. 333 */ 334 void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg) 335 { 336 struct channel *ch = arg; 337 struct net_device *dev = ch->netdev; 338 struct ctcm_priv *priv = dev->ml_priv; 339 340 CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name); 341 342 fsm_deltimer(&ch->timer); 343 fsm_newstate(fi, CTC_STATE_TXIDLE); 344 fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev); 345 } 346 347 /** 348 * Got normal data, check for sanity, queue it up, allocate new buffer 349 * trigger bottom half, and initiate next read. 350 * 351 * fi An instance of a channel statemachine. 352 * event The event, just happened. 353 * arg Generic pointer, casted from channel * upon call. 354 */ 355 static void chx_rx(fsm_instance *fi, int event, void *arg) 356 { 357 struct channel *ch = arg; 358 struct net_device *dev = ch->netdev; 359 struct ctcm_priv *priv = dev->ml_priv; 360 int len = ch->max_bufsize - ch->irb->scsw.cmd.count; 361 struct sk_buff *skb = ch->trans_skb; 362 __u16 block_len = *((__u16 *)skb->data); 363 int check_len; 364 int rc; 365 366 fsm_deltimer(&ch->timer); 367 if (len < 8) { 368 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 369 "%s(%s): got packet with length %d < 8\n", 370 CTCM_FUNTAIL, dev->name, len); 371 priv->stats.rx_dropped++; 372 priv->stats.rx_length_errors++; 373 goto again; 374 } 375 if (len > ch->max_bufsize) { 376 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 377 "%s(%s): got packet with length %d > %d\n", 378 CTCM_FUNTAIL, dev->name, len, ch->max_bufsize); 379 priv->stats.rx_dropped++; 380 priv->stats.rx_length_errors++; 381 goto again; 382 } 383 384 /* 385 * VM TCP seems to have a bug sending 2 trailing bytes of garbage. 386 */ 387 switch (ch->protocol) { 388 case CTCM_PROTO_S390: 389 case CTCM_PROTO_OS390: 390 check_len = block_len + 2; 391 break; 392 default: 393 check_len = block_len; 394 break; 395 } 396 if ((len < block_len) || (len > check_len)) { 397 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 398 "%s(%s): got block length %d != rx length %d\n", 399 CTCM_FUNTAIL, dev->name, block_len, len); 400 if (do_debug) 401 ctcmpc_dump_skb(skb, 0); 402 403 *((__u16 *)skb->data) = len; 404 priv->stats.rx_dropped++; 405 priv->stats.rx_length_errors++; 406 goto again; 407 } 408 if (block_len > 2) { 409 *((__u16 *)skb->data) = block_len - 2; 410 ctcm_unpack_skb(ch, skb); 411 } 412 again: 413 skb->data = ch->trans_skb_data; 414 skb_reset_tail_pointer(skb); 415 skb->len = 0; 416 if (ctcm_checkalloc_buffer(ch)) 417 return; 418 ch->ccw[1].count = ch->max_bufsize; 419 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); 420 if (rc != 0) 421 ctcm_ccw_check_rc(ch, rc, "normal RX"); 422 } 423 424 /** 425 * Initialize connection by sending a __u16 of value 0. 426 * 427 * fi An instance of a channel statemachine. 428 * event The event, just happened. 429 * arg Generic pointer, casted from channel * upon call. 430 */ 431 static void chx_firstio(fsm_instance *fi, int event, void *arg) 432 { 433 int rc; 434 struct channel *ch = arg; 435 int fsmstate = fsm_getstate(fi); 436 437 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 438 "%s(%s) : %02x", 439 CTCM_FUNTAIL, ch->id, fsmstate); 440 441 ch->sense_rc = 0; /* reset unit check report control */ 442 if (fsmstate == CTC_STATE_TXIDLE) 443 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, 444 "%s(%s): remote side issued READ?, init.\n", 445 CTCM_FUNTAIL, ch->id); 446 fsm_deltimer(&ch->timer); 447 if (ctcm_checkalloc_buffer(ch)) 448 return; 449 if ((fsmstate == CTC_STATE_SETUPWAIT) && 450 (ch->protocol == CTCM_PROTO_OS390)) { 451 /* OS/390 resp. z/OS */ 452 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { 453 *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN; 454 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, 455 CTC_EVENT_TIMER, ch); 456 chx_rxidle(fi, event, arg); 457 } else { 458 struct net_device *dev = ch->netdev; 459 struct ctcm_priv *priv = dev->ml_priv; 460 fsm_newstate(fi, CTC_STATE_TXIDLE); 461 fsm_event(priv->fsm, DEV_EVENT_TXUP, dev); 462 } 463 return; 464 } 465 /* 466 * Don't setup a timer for receiving the initial RX frame 467 * if in compatibility mode, since VM TCP delays the initial 468 * frame until it has some data to send. 469 */ 470 if ((CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) || 471 (ch->protocol != CTCM_PROTO_S390)) 472 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 473 474 *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN; 475 ch->ccw[1].count = 2; /* Transfer only length */ 476 477 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) 478 ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); 479 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); 480 if (rc != 0) { 481 fsm_deltimer(&ch->timer); 482 fsm_newstate(fi, CTC_STATE_SETUPWAIT); 483 ctcm_ccw_check_rc(ch, rc, "init IO"); 484 } 485 /* 486 * If in compatibility mode since we don't setup a timer, we 487 * also signal RX channel up immediately. This enables us 488 * to send packets early which in turn usually triggers some 489 * reply from VM TCP which brings up the RX channel to it's 490 * final state. 491 */ 492 if ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) && 493 (ch->protocol == CTCM_PROTO_S390)) { 494 struct net_device *dev = ch->netdev; 495 struct ctcm_priv *priv = dev->ml_priv; 496 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); 497 } 498 } 499 500 /** 501 * Got initial data, check it. If OK, 502 * notify device statemachine that we are up and 503 * running. 504 * 505 * fi An instance of a channel statemachine. 506 * event The event, just happened. 507 * arg Generic pointer, casted from channel * upon call. 508 */ 509 static void chx_rxidle(fsm_instance *fi, int event, void *arg) 510 { 511 struct channel *ch = arg; 512 struct net_device *dev = ch->netdev; 513 struct ctcm_priv *priv = dev->ml_priv; 514 __u16 buflen; 515 int rc; 516 517 fsm_deltimer(&ch->timer); 518 buflen = *((__u16 *)ch->trans_skb->data); 519 CTCM_PR_DEBUG("%s: %s: Initial RX count = %d\n", 520 __func__, dev->name, buflen); 521 522 if (buflen >= CTCM_INITIAL_BLOCKLEN) { 523 if (ctcm_checkalloc_buffer(ch)) 524 return; 525 ch->ccw[1].count = ch->max_bufsize; 526 fsm_newstate(fi, CTC_STATE_RXIDLE); 527 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); 528 if (rc != 0) { 529 fsm_newstate(fi, CTC_STATE_RXINIT); 530 ctcm_ccw_check_rc(ch, rc, "initial RX"); 531 } else 532 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); 533 } else { 534 CTCM_PR_DEBUG("%s: %s: Initial RX count %d not %d\n", 535 __func__, dev->name, 536 buflen, CTCM_INITIAL_BLOCKLEN); 537 chx_firstio(fi, event, arg); 538 } 539 } 540 541 /** 542 * Set channel into extended mode. 543 * 544 * fi An instance of a channel statemachine. 545 * event The event, just happened. 546 * arg Generic pointer, casted from channel * upon call. 547 */ 548 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg) 549 { 550 struct channel *ch = arg; 551 int rc; 552 unsigned long saveflags = 0; 553 int timeout = CTCM_TIME_5_SEC; 554 555 fsm_deltimer(&ch->timer); 556 if (IS_MPC(ch)) { 557 timeout = 1500; 558 CTCM_PR_DEBUG("enter %s: cp=%i ch=0x%p id=%s\n", 559 __func__, smp_processor_id(), ch, ch->id); 560 } 561 fsm_addtimer(&ch->timer, timeout, CTC_EVENT_TIMER, ch); 562 fsm_newstate(fi, CTC_STATE_SETUPWAIT); 563 CTCM_CCW_DUMP((char *)&ch->ccw[6], sizeof(struct ccw1) * 2); 564 565 if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */ 566 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 567 /* Such conditional locking is undeterministic in 568 * static view. => ignore sparse warnings here. */ 569 570 rc = ccw_device_start(ch->cdev, &ch->ccw[6], 0, 0xff, 0); 571 if (event == CTC_EVENT_TIMER) /* see above comments */ 572 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); 573 if (rc != 0) { 574 fsm_deltimer(&ch->timer); 575 fsm_newstate(fi, CTC_STATE_STARTWAIT); 576 ctcm_ccw_check_rc(ch, rc, "set Mode"); 577 } else 578 ch->retry = 0; 579 } 580 581 /** 582 * Setup channel. 583 * 584 * fi An instance of a channel statemachine. 585 * event The event, just happened. 586 * arg Generic pointer, casted from channel * upon call. 587 */ 588 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg) 589 { 590 struct channel *ch = arg; 591 unsigned long saveflags; 592 int rc; 593 594 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s", 595 CTCM_FUNTAIL, ch->id, 596 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX"); 597 598 if (ch->trans_skb != NULL) { 599 clear_normalized_cda(&ch->ccw[1]); 600 dev_kfree_skb(ch->trans_skb); 601 ch->trans_skb = NULL; 602 } 603 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { 604 ch->ccw[1].cmd_code = CCW_CMD_READ; 605 ch->ccw[1].flags = CCW_FLAG_SLI; 606 ch->ccw[1].count = 0; 607 } else { 608 ch->ccw[1].cmd_code = CCW_CMD_WRITE; 609 ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC; 610 ch->ccw[1].count = 0; 611 } 612 if (ctcm_checkalloc_buffer(ch)) { 613 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, 614 "%s(%s): %s trans_skb alloc delayed " 615 "until first transfer", 616 CTCM_FUNTAIL, ch->id, 617 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? 618 "RX" : "TX"); 619 } 620 ch->ccw[0].cmd_code = CCW_CMD_PREPARE; 621 ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC; 622 ch->ccw[0].count = 0; 623 ch->ccw[0].cda = 0; 624 ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */ 625 ch->ccw[2].flags = CCW_FLAG_SLI; 626 ch->ccw[2].count = 0; 627 ch->ccw[2].cda = 0; 628 memcpy(&ch->ccw[3], &ch->ccw[0], sizeof(struct ccw1) * 3); 629 ch->ccw[4].cda = 0; 630 ch->ccw[4].flags &= ~CCW_FLAG_IDA; 631 632 fsm_newstate(fi, CTC_STATE_STARTWAIT); 633 fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch); 634 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 635 rc = ccw_device_halt(ch->cdev, 0); 636 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); 637 if (rc != 0) { 638 if (rc != -EBUSY) 639 fsm_deltimer(&ch->timer); 640 ctcm_ccw_check_rc(ch, rc, "initial HaltIO"); 641 } 642 } 643 644 /** 645 * Shutdown a channel. 646 * 647 * fi An instance of a channel statemachine. 648 * event The event, just happened. 649 * arg Generic pointer, casted from channel * upon call. 650 */ 651 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg) 652 { 653 struct channel *ch = arg; 654 unsigned long saveflags = 0; 655 int rc; 656 int oldstate; 657 658 fsm_deltimer(&ch->timer); 659 if (IS_MPC(ch)) 660 fsm_deltimer(&ch->sweep_timer); 661 662 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 663 664 if (event == CTC_EVENT_STOP) /* only for STOP not yet locked */ 665 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 666 /* Such conditional locking is undeterministic in 667 * static view. => ignore sparse warnings here. */ 668 oldstate = fsm_getstate(fi); 669 fsm_newstate(fi, CTC_STATE_TERM); 670 rc = ccw_device_halt(ch->cdev, 0); 671 672 if (event == CTC_EVENT_STOP) 673 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); 674 /* see remark above about conditional locking */ 675 676 if (rc != 0 && rc != -EBUSY) { 677 fsm_deltimer(&ch->timer); 678 if (event != CTC_EVENT_STOP) { 679 fsm_newstate(fi, oldstate); 680 ctcm_ccw_check_rc(ch, rc, (char *)__func__); 681 } 682 } 683 } 684 685 /** 686 * Cleanup helper for chx_fail and chx_stopped 687 * cleanup channels queue and notify interface statemachine. 688 * 689 * fi An instance of a channel statemachine. 690 * state The next state (depending on caller). 691 * ch The channel to operate on. 692 */ 693 static void ctcm_chx_cleanup(fsm_instance *fi, int state, 694 struct channel *ch) 695 { 696 struct net_device *dev = ch->netdev; 697 struct ctcm_priv *priv = dev->ml_priv; 698 699 CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE, 700 "%s(%s): %s[%d]\n", 701 CTCM_FUNTAIL, dev->name, ch->id, state); 702 703 fsm_deltimer(&ch->timer); 704 if (IS_MPC(ch)) 705 fsm_deltimer(&ch->sweep_timer); 706 707 fsm_newstate(fi, state); 708 if (state == CTC_STATE_STOPPED && ch->trans_skb != NULL) { 709 clear_normalized_cda(&ch->ccw[1]); 710 dev_kfree_skb_any(ch->trans_skb); 711 ch->trans_skb = NULL; 712 } 713 714 ch->th_seg = 0x00; 715 ch->th_seq_num = 0x00; 716 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { 717 skb_queue_purge(&ch->io_queue); 718 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 719 } else { 720 ctcm_purge_skb_queue(&ch->io_queue); 721 if (IS_MPC(ch)) 722 ctcm_purge_skb_queue(&ch->sweep_queue); 723 spin_lock(&ch->collect_lock); 724 ctcm_purge_skb_queue(&ch->collect_queue); 725 ch->collect_len = 0; 726 spin_unlock(&ch->collect_lock); 727 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 728 } 729 } 730 731 /** 732 * A channel has successfully been halted. 733 * Cleanup it's queue and notify interface statemachine. 734 * 735 * fi An instance of a channel statemachine. 736 * event The event, just happened. 737 * arg Generic pointer, casted from channel * upon call. 738 */ 739 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg) 740 { 741 ctcm_chx_cleanup(fi, CTC_STATE_STOPPED, arg); 742 } 743 744 /** 745 * A stop command from device statemachine arrived and we are in 746 * not operational mode. Set state to stopped. 747 * 748 * fi An instance of a channel statemachine. 749 * event The event, just happened. 750 * arg Generic pointer, casted from channel * upon call. 751 */ 752 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg) 753 { 754 fsm_newstate(fi, CTC_STATE_STOPPED); 755 } 756 757 /** 758 * A machine check for no path, not operational status or gone device has 759 * happened. 760 * Cleanup queue and notify interface statemachine. 761 * 762 * fi An instance of a channel statemachine. 763 * event The event, just happened. 764 * arg Generic pointer, casted from channel * upon call. 765 */ 766 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg) 767 { 768 ctcm_chx_cleanup(fi, CTC_STATE_NOTOP, arg); 769 } 770 771 /** 772 * Handle error during setup of channel. 773 * 774 * fi An instance of a channel statemachine. 775 * event The event, just happened. 776 * arg Generic pointer, casted from channel * upon call. 777 */ 778 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg) 779 { 780 struct channel *ch = arg; 781 struct net_device *dev = ch->netdev; 782 struct ctcm_priv *priv = dev->ml_priv; 783 784 /* 785 * Special case: Got UC_RCRESET on setmode. 786 * This means that remote side isn't setup. In this case 787 * simply retry after some 10 secs... 788 */ 789 if ((fsm_getstate(fi) == CTC_STATE_SETUPWAIT) && 790 ((event == CTC_EVENT_UC_RCRESET) || 791 (event == CTC_EVENT_UC_RSRESET))) { 792 fsm_newstate(fi, CTC_STATE_STARTRETRY); 793 fsm_deltimer(&ch->timer); 794 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 795 if (!IS_MPC(ch) && 796 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) { 797 int rc = ccw_device_halt(ch->cdev, 0); 798 if (rc != 0) 799 ctcm_ccw_check_rc(ch, rc, 800 "HaltIO in chx_setuperr"); 801 } 802 return; 803 } 804 805 CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT, 806 "%s(%s) : %s error during %s channel setup state=%s\n", 807 CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event], 808 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX", 809 fsm_getstate_str(fi)); 810 811 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { 812 fsm_newstate(fi, CTC_STATE_RXERR); 813 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 814 } else { 815 fsm_newstate(fi, CTC_STATE_TXERR); 816 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 817 } 818 } 819 820 /** 821 * Restart a channel after an error. 822 * 823 * fi An instance of a channel statemachine. 824 * event The event, just happened. 825 * arg Generic pointer, casted from channel * upon call. 826 */ 827 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg) 828 { 829 struct channel *ch = arg; 830 struct net_device *dev = ch->netdev; 831 unsigned long saveflags = 0; 832 int oldstate; 833 int rc; 834 835 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 836 "%s: %s[%d] of %s\n", 837 CTCM_FUNTAIL, ch->id, event, dev->name); 838 839 fsm_deltimer(&ch->timer); 840 841 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 842 oldstate = fsm_getstate(fi); 843 fsm_newstate(fi, CTC_STATE_STARTWAIT); 844 if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */ 845 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 846 /* Such conditional locking is a known problem for 847 * sparse because its undeterministic in static view. 848 * Warnings should be ignored here. */ 849 rc = ccw_device_halt(ch->cdev, 0); 850 if (event == CTC_EVENT_TIMER) 851 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); 852 if (rc != 0) { 853 if (rc != -EBUSY) { 854 fsm_deltimer(&ch->timer); 855 fsm_newstate(fi, oldstate); 856 } 857 ctcm_ccw_check_rc(ch, rc, "HaltIO in ctcm_chx_restart"); 858 } 859 } 860 861 /** 862 * Handle error during RX initial handshake (exchange of 863 * 0-length block header) 864 * 865 * fi An instance of a channel statemachine. 866 * event The event, just happened. 867 * arg Generic pointer, casted from channel * upon call. 868 */ 869 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg) 870 { 871 struct channel *ch = arg; 872 struct net_device *dev = ch->netdev; 873 struct ctcm_priv *priv = dev->ml_priv; 874 875 if (event == CTC_EVENT_TIMER) { 876 if (!IS_MPCDEV(dev)) 877 /* TODO : check if MPC deletes timer somewhere */ 878 fsm_deltimer(&ch->timer); 879 if (ch->retry++ < 3) 880 ctcm_chx_restart(fi, event, arg); 881 else { 882 fsm_newstate(fi, CTC_STATE_RXERR); 883 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 884 } 885 } else { 886 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 887 "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, 888 ctc_ch_event_names[event], fsm_getstate_str(fi)); 889 890 dev_warn(&dev->dev, 891 "Initialization failed with RX/TX init handshake " 892 "error %s\n", ctc_ch_event_names[event]); 893 } 894 } 895 896 /** 897 * Notify device statemachine if we gave up initialization 898 * of RX channel. 899 * 900 * fi An instance of a channel statemachine. 901 * event The event, just happened. 902 * arg Generic pointer, casted from channel * upon call. 903 */ 904 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg) 905 { 906 struct channel *ch = arg; 907 struct net_device *dev = ch->netdev; 908 struct ctcm_priv *priv = dev->ml_priv; 909 910 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 911 "%s(%s): RX %s busy, init. fail", 912 CTCM_FUNTAIL, dev->name, ch->id); 913 fsm_newstate(fi, CTC_STATE_RXERR); 914 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 915 } 916 917 /** 918 * Handle RX Unit check remote reset (remote disconnected) 919 * 920 * fi An instance of a channel statemachine. 921 * event The event, just happened. 922 * arg Generic pointer, casted from channel * upon call. 923 */ 924 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg) 925 { 926 struct channel *ch = arg; 927 struct channel *ch2; 928 struct net_device *dev = ch->netdev; 929 struct ctcm_priv *priv = dev->ml_priv; 930 931 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 932 "%s: %s: remote disconnect - re-init ...", 933 CTCM_FUNTAIL, dev->name); 934 fsm_deltimer(&ch->timer); 935 /* 936 * Notify device statemachine 937 */ 938 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 939 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 940 941 fsm_newstate(fi, CTC_STATE_DTERM); 942 ch2 = priv->channel[CTCM_WRITE]; 943 fsm_newstate(ch2->fsm, CTC_STATE_DTERM); 944 945 ccw_device_halt(ch->cdev, 0); 946 ccw_device_halt(ch2->cdev, 0); 947 } 948 949 /** 950 * Handle error during TX channel initialization. 951 * 952 * fi An instance of a channel statemachine. 953 * event The event, just happened. 954 * arg Generic pointer, casted from channel * upon call. 955 */ 956 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg) 957 { 958 struct channel *ch = arg; 959 struct net_device *dev = ch->netdev; 960 struct ctcm_priv *priv = dev->ml_priv; 961 962 if (event == CTC_EVENT_TIMER) { 963 fsm_deltimer(&ch->timer); 964 if (ch->retry++ < 3) 965 ctcm_chx_restart(fi, event, arg); 966 else { 967 fsm_newstate(fi, CTC_STATE_TXERR); 968 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 969 } 970 } else { 971 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 972 "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, 973 ctc_ch_event_names[event], fsm_getstate_str(fi)); 974 975 dev_warn(&dev->dev, 976 "Initialization failed with RX/TX init handshake " 977 "error %s\n", ctc_ch_event_names[event]); 978 } 979 } 980 981 /** 982 * Handle TX timeout by retrying operation. 983 * 984 * fi An instance of a channel statemachine. 985 * event The event, just happened. 986 * arg Generic pointer, casted from channel * upon call. 987 */ 988 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg) 989 { 990 struct channel *ch = arg; 991 struct net_device *dev = ch->netdev; 992 struct ctcm_priv *priv = dev->ml_priv; 993 struct sk_buff *skb; 994 995 CTCM_PR_DEBUG("Enter: %s: cp=%i ch=0x%p id=%s\n", 996 __func__, smp_processor_id(), ch, ch->id); 997 998 fsm_deltimer(&ch->timer); 999 if (ch->retry++ > 3) { 1000 struct mpc_group *gptr = priv->mpcg; 1001 CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO, 1002 "%s: %s: retries exceeded", 1003 CTCM_FUNTAIL, ch->id); 1004 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 1005 /* call restart if not MPC or if MPC and mpcg fsm is ready. 1006 use gptr as mpc indicator */ 1007 if (!(gptr && (fsm_getstate(gptr->fsm) != MPCG_STATE_READY))) 1008 ctcm_chx_restart(fi, event, arg); 1009 goto done; 1010 } 1011 1012 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, 1013 "%s : %s: retry %d", 1014 CTCM_FUNTAIL, ch->id, ch->retry); 1015 skb = skb_peek(&ch->io_queue); 1016 if (skb) { 1017 int rc = 0; 1018 unsigned long saveflags = 0; 1019 clear_normalized_cda(&ch->ccw[4]); 1020 ch->ccw[4].count = skb->len; 1021 if (set_normalized_cda(&ch->ccw[4], skb->data)) { 1022 CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO, 1023 "%s: %s: IDAL alloc failed", 1024 CTCM_FUNTAIL, ch->id); 1025 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 1026 ctcm_chx_restart(fi, event, arg); 1027 goto done; 1028 } 1029 fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch); 1030 if (event == CTC_EVENT_TIMER) /* for TIMER not yet locked */ 1031 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 1032 /* Such conditional locking is a known problem for 1033 * sparse because its undeterministic in static view. 1034 * Warnings should be ignored here. */ 1035 if (do_debug_ccw) 1036 ctcmpc_dumpit((char *)&ch->ccw[3], 1037 sizeof(struct ccw1) * 3); 1038 1039 rc = ccw_device_start(ch->cdev, &ch->ccw[3], 0, 0xff, 0); 1040 if (event == CTC_EVENT_TIMER) 1041 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), 1042 saveflags); 1043 if (rc != 0) { 1044 fsm_deltimer(&ch->timer); 1045 ctcm_ccw_check_rc(ch, rc, "TX in chx_txretry"); 1046 ctcm_purge_skb_queue(&ch->io_queue); 1047 } 1048 } 1049 done: 1050 return; 1051 } 1052 1053 /** 1054 * Handle fatal errors during an I/O command. 1055 * 1056 * fi An instance of a channel statemachine. 1057 * event The event, just happened. 1058 * arg Generic pointer, casted from channel * upon call. 1059 */ 1060 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg) 1061 { 1062 struct channel *ch = arg; 1063 struct net_device *dev = ch->netdev; 1064 struct ctcm_priv *priv = dev->ml_priv; 1065 int rd = CHANNEL_DIRECTION(ch->flags); 1066 1067 fsm_deltimer(&ch->timer); 1068 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 1069 "%s: %s: %s unrecoverable channel error", 1070 CTCM_FUNTAIL, ch->id, rd == CTCM_READ ? "RX" : "TX"); 1071 1072 if (IS_MPC(ch)) { 1073 priv->stats.tx_dropped++; 1074 priv->stats.tx_errors++; 1075 } 1076 if (rd == CTCM_READ) { 1077 fsm_newstate(fi, CTC_STATE_RXERR); 1078 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 1079 } else { 1080 fsm_newstate(fi, CTC_STATE_TXERR); 1081 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 1082 } 1083 } 1084 1085 /* 1086 * The ctcm statemachine for a channel. 1087 */ 1088 const fsm_node ch_fsm[] = { 1089 { CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop }, 1090 { CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start }, 1091 { CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1092 { CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop }, 1093 1094 { CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop }, 1095 { CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop }, 1096 { CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1097 { CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop }, 1098 { CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start }, 1099 1100 { CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1101 { CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop }, 1102 { CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, 1103 { CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr }, 1104 { CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1105 { CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1106 1107 { CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio }, 1108 { CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode }, 1109 { CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1110 { CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1111 1112 { CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1113 { CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop }, 1114 { CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, chx_firstio }, 1115 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1116 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1117 { CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode }, 1118 { CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1119 { CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1120 1121 { CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1122 { CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop }, 1123 { CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, chx_rxidle }, 1124 { CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr }, 1125 { CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr }, 1126 { CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr }, 1127 { CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail }, 1128 { CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1129 { CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, chx_firstio }, 1130 { CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1131 1132 { CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, 1133 { CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop }, 1134 { CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, chx_rx }, 1135 { CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc }, 1136 { CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1137 { CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1138 { CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, chx_rx }, 1139 1140 { CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1141 { CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop }, 1142 { CTC_STATE_TXINIT, CTC_EVENT_FINSTAT, ctcm_chx_txidle }, 1143 { CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr }, 1144 { CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr }, 1145 { CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr }, 1146 { CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1147 { CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1148 1149 { CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, 1150 { CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop }, 1151 { CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, chx_firstio }, 1152 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, 1153 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, 1154 { CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1155 { CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1156 1157 { CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop }, 1158 { CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart }, 1159 { CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped }, 1160 { CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, 1161 { CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, 1162 { CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1163 1164 { CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio }, 1165 { CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart }, 1166 { CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, 1167 { CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, 1168 { CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, 1169 { CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1170 1171 { CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio }, 1172 { CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop }, 1173 { CTC_STATE_TX, CTC_EVENT_FINSTAT, chx_txdone }, 1174 { CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_txretry }, 1175 { CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_txretry }, 1176 { CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry }, 1177 { CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1178 { CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1179 1180 { CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, 1181 { CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, 1182 { CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1183 { CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1184 }; 1185 1186 int ch_fsm_len = ARRAY_SIZE(ch_fsm); 1187 1188 /* 1189 * MPC actions for mpc channel statemachine 1190 * handling of MPC protocol requires extra 1191 * statemachine and actions which are prefixed ctcmpc_ . 1192 * The ctc_ch_states and ctc_ch_state_names, 1193 * ctc_ch_events and ctc_ch_event_names share the ctcm definitions 1194 * which are expanded by some elements. 1195 */ 1196 1197 /* 1198 * Actions for mpc channel statemachine. 1199 */ 1200 1201 /** 1202 * Normal data has been send. Free the corresponding 1203 * skb (it's in io_queue), reset dev->tbusy and 1204 * revert to idle state. 1205 * 1206 * fi An instance of a channel statemachine. 1207 * event The event, just happened. 1208 * arg Generic pointer, casted from channel * upon call. 1209 */ 1210 static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg) 1211 { 1212 struct channel *ch = arg; 1213 struct net_device *dev = ch->netdev; 1214 struct ctcm_priv *priv = dev->ml_priv; 1215 struct mpc_group *grp = priv->mpcg; 1216 struct sk_buff *skb; 1217 int first = 1; 1218 int i; 1219 __u32 data_space; 1220 unsigned long duration; 1221 struct sk_buff *peekskb; 1222 int rc; 1223 struct th_header *header; 1224 struct pdu *p_header; 1225 unsigned long done_stamp = jiffies; 1226 1227 CTCM_PR_DEBUG("Enter %s: %s cp:%i\n", 1228 __func__, dev->name, smp_processor_id()); 1229 1230 duration = done_stamp - ch->prof.send_stamp; 1231 if (duration > ch->prof.tx_time) 1232 ch->prof.tx_time = duration; 1233 1234 if (ch->irb->scsw.cmd.count != 0) 1235 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, 1236 "%s(%s): TX not complete, remaining %d bytes", 1237 CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count); 1238 fsm_deltimer(&ch->timer); 1239 while ((skb = skb_dequeue(&ch->io_queue))) { 1240 priv->stats.tx_packets++; 1241 priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH; 1242 if (first) { 1243 priv->stats.tx_bytes += 2; 1244 first = 0; 1245 } 1246 refcount_dec(&skb->users); 1247 dev_kfree_skb_irq(skb); 1248 } 1249 spin_lock(&ch->collect_lock); 1250 clear_normalized_cda(&ch->ccw[4]); 1251 if ((ch->collect_len <= 0) || (grp->in_sweep != 0)) { 1252 spin_unlock(&ch->collect_lock); 1253 fsm_newstate(fi, CTC_STATE_TXIDLE); 1254 goto done; 1255 } 1256 1257 if (ctcm_checkalloc_buffer(ch)) { 1258 spin_unlock(&ch->collect_lock); 1259 goto done; 1260 } 1261 ch->trans_skb->data = ch->trans_skb_data; 1262 skb_reset_tail_pointer(ch->trans_skb); 1263 ch->trans_skb->len = 0; 1264 if (ch->prof.maxmulti < (ch->collect_len + TH_HEADER_LENGTH)) 1265 ch->prof.maxmulti = ch->collect_len + TH_HEADER_LENGTH; 1266 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue)) 1267 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue); 1268 i = 0; 1269 p_header = NULL; 1270 data_space = grp->group_max_buflen - TH_HEADER_LENGTH; 1271 1272 CTCM_PR_DBGDATA("%s: building trans_skb from collect_q" 1273 " data_space:%04x\n", 1274 __func__, data_space); 1275 1276 while ((skb = skb_dequeue(&ch->collect_queue))) { 1277 skb_put_data(ch->trans_skb, skb->data, skb->len); 1278 p_header = (struct pdu *) 1279 (skb_tail_pointer(ch->trans_skb) - skb->len); 1280 p_header->pdu_flag = 0x00; 1281 if (be16_to_cpu(skb->protocol) == ETH_P_SNAP) 1282 p_header->pdu_flag |= 0x60; 1283 else 1284 p_header->pdu_flag |= 0x20; 1285 1286 CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n", 1287 __func__, ch->trans_skb->len); 1288 CTCM_PR_DBGDATA("%s: pdu header and data for up" 1289 " to 32 bytes sent to vtam\n", __func__); 1290 CTCM_D3_DUMP((char *)p_header, min_t(int, skb->len, 32)); 1291 1292 ch->collect_len -= skb->len; 1293 data_space -= skb->len; 1294 priv->stats.tx_packets++; 1295 priv->stats.tx_bytes += skb->len; 1296 refcount_dec(&skb->users); 1297 dev_kfree_skb_any(skb); 1298 peekskb = skb_peek(&ch->collect_queue); 1299 if (peekskb->len > data_space) 1300 break; 1301 i++; 1302 } 1303 /* p_header points to the last one we handled */ 1304 if (p_header) 1305 p_header->pdu_flag |= PDU_LAST; /*Say it's the last one*/ 1306 header = kzalloc(TH_HEADER_LENGTH, gfp_type()); 1307 if (!header) { 1308 spin_unlock(&ch->collect_lock); 1309 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); 1310 goto done; 1311 } 1312 header->th_ch_flag = TH_HAS_PDU; /* Normal data */ 1313 ch->th_seq_num++; 1314 header->th_seq_num = ch->th_seq_num; 1315 1316 CTCM_PR_DBGDATA("%s: ToVTAM_th_seq= %08x\n" , 1317 __func__, ch->th_seq_num); 1318 1319 memcpy(skb_push(ch->trans_skb, TH_HEADER_LENGTH), header, 1320 TH_HEADER_LENGTH); /* put the TH on the packet */ 1321 1322 kfree(header); 1323 1324 CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n", 1325 __func__, ch->trans_skb->len); 1326 CTCM_PR_DBGDATA("%s: up-to-50 bytes of trans_skb " 1327 "data to vtam from collect_q\n", __func__); 1328 CTCM_D3_DUMP((char *)ch->trans_skb->data, 1329 min_t(int, ch->trans_skb->len, 50)); 1330 1331 spin_unlock(&ch->collect_lock); 1332 clear_normalized_cda(&ch->ccw[1]); 1333 1334 CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n", 1335 (void *)(unsigned long)ch->ccw[1].cda, 1336 ch->trans_skb->data); 1337 ch->ccw[1].count = ch->max_bufsize; 1338 1339 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) { 1340 dev_kfree_skb_any(ch->trans_skb); 1341 ch->trans_skb = NULL; 1342 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR, 1343 "%s: %s: IDAL alloc failed", 1344 CTCM_FUNTAIL, ch->id); 1345 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); 1346 return; 1347 } 1348 1349 CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n", 1350 (void *)(unsigned long)ch->ccw[1].cda, 1351 ch->trans_skb->data); 1352 1353 ch->ccw[1].count = ch->trans_skb->len; 1354 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 1355 ch->prof.send_stamp = jiffies; 1356 if (do_debug_ccw) 1357 ctcmpc_dumpit((char *)&ch->ccw[0], sizeof(struct ccw1) * 3); 1358 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); 1359 ch->prof.doios_multi++; 1360 if (rc != 0) { 1361 priv->stats.tx_dropped += i; 1362 priv->stats.tx_errors += i; 1363 fsm_deltimer(&ch->timer); 1364 ctcm_ccw_check_rc(ch, rc, "chained TX"); 1365 } 1366 done: 1367 ctcm_clear_busy(dev); 1368 return; 1369 } 1370 1371 /** 1372 * Got normal data, check for sanity, queue it up, allocate new buffer 1373 * trigger bottom half, and initiate next read. 1374 * 1375 * fi An instance of a channel statemachine. 1376 * event The event, just happened. 1377 * arg Generic pointer, casted from channel * upon call. 1378 */ 1379 static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg) 1380 { 1381 struct channel *ch = arg; 1382 struct net_device *dev = ch->netdev; 1383 struct ctcm_priv *priv = dev->ml_priv; 1384 struct mpc_group *grp = priv->mpcg; 1385 struct sk_buff *skb = ch->trans_skb; 1386 struct sk_buff *new_skb; 1387 unsigned long saveflags = 0; /* avoids compiler warning */ 1388 int len = ch->max_bufsize - ch->irb->scsw.cmd.count; 1389 1390 CTCM_PR_DEBUG("%s: %s: cp:%i %s maxbuf : %04x, len: %04x\n", 1391 CTCM_FUNTAIL, dev->name, smp_processor_id(), 1392 ch->id, ch->max_bufsize, len); 1393 fsm_deltimer(&ch->timer); 1394 1395 if (skb == NULL) { 1396 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, 1397 "%s(%s): TRANS_SKB = NULL", 1398 CTCM_FUNTAIL, dev->name); 1399 goto again; 1400 } 1401 1402 if (len < TH_HEADER_LENGTH) { 1403 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, 1404 "%s(%s): packet length %d to short", 1405 CTCM_FUNTAIL, dev->name, len); 1406 priv->stats.rx_dropped++; 1407 priv->stats.rx_length_errors++; 1408 } else { 1409 /* must have valid th header or game over */ 1410 __u32 block_len = len; 1411 len = TH_HEADER_LENGTH + XID2_LENGTH + 4; 1412 new_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC); 1413 1414 if (new_skb == NULL) { 1415 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, 1416 "%s(%d): skb allocation failed", 1417 CTCM_FUNTAIL, dev->name); 1418 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); 1419 goto again; 1420 } 1421 switch (fsm_getstate(grp->fsm)) { 1422 case MPCG_STATE_RESET: 1423 case MPCG_STATE_INOP: 1424 dev_kfree_skb_any(new_skb); 1425 break; 1426 case MPCG_STATE_FLOWC: 1427 case MPCG_STATE_READY: 1428 skb_put_data(new_skb, skb->data, block_len); 1429 skb_queue_tail(&ch->io_queue, new_skb); 1430 tasklet_schedule(&ch->ch_tasklet); 1431 break; 1432 default: 1433 skb_put_data(new_skb, skb->data, len); 1434 skb_queue_tail(&ch->io_queue, new_skb); 1435 tasklet_hi_schedule(&ch->ch_tasklet); 1436 break; 1437 } 1438 } 1439 1440 again: 1441 switch (fsm_getstate(grp->fsm)) { 1442 int rc, dolock; 1443 case MPCG_STATE_FLOWC: 1444 case MPCG_STATE_READY: 1445 if (ctcm_checkalloc_buffer(ch)) 1446 break; 1447 ch->trans_skb->data = ch->trans_skb_data; 1448 skb_reset_tail_pointer(ch->trans_skb); 1449 ch->trans_skb->len = 0; 1450 ch->ccw[1].count = ch->max_bufsize; 1451 if (do_debug_ccw) 1452 ctcmpc_dumpit((char *)&ch->ccw[0], 1453 sizeof(struct ccw1) * 3); 1454 dolock = !in_irq(); 1455 if (dolock) 1456 spin_lock_irqsave( 1457 get_ccwdev_lock(ch->cdev), saveflags); 1458 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); 1459 if (dolock) /* see remark about conditional locking */ 1460 spin_unlock_irqrestore( 1461 get_ccwdev_lock(ch->cdev), saveflags); 1462 if (rc != 0) 1463 ctcm_ccw_check_rc(ch, rc, "normal RX"); 1464 default: 1465 break; 1466 } 1467 1468 CTCM_PR_DEBUG("Exit %s: %s, ch=0x%p, id=%s\n", 1469 __func__, dev->name, ch, ch->id); 1470 1471 } 1472 1473 /** 1474 * Initialize connection by sending a __u16 of value 0. 1475 * 1476 * fi An instance of a channel statemachine. 1477 * event The event, just happened. 1478 * arg Generic pointer, casted from channel * upon call. 1479 */ 1480 static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg) 1481 { 1482 struct channel *ch = arg; 1483 struct net_device *dev = ch->netdev; 1484 struct ctcm_priv *priv = dev->ml_priv; 1485 struct mpc_group *gptr = priv->mpcg; 1486 1487 CTCM_PR_DEBUG("Enter %s: id=%s, ch=0x%p\n", 1488 __func__, ch->id, ch); 1489 1490 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_INFO, 1491 "%s: %s: chstate:%i, grpstate:%i, prot:%i\n", 1492 CTCM_FUNTAIL, ch->id, fsm_getstate(fi), 1493 fsm_getstate(gptr->fsm), ch->protocol); 1494 1495 if (fsm_getstate(fi) == CTC_STATE_TXIDLE) 1496 MPC_DBF_DEV_NAME(TRACE, dev, "remote side issued READ? "); 1497 1498 fsm_deltimer(&ch->timer); 1499 if (ctcm_checkalloc_buffer(ch)) 1500 goto done; 1501 1502 switch (fsm_getstate(fi)) { 1503 case CTC_STATE_STARTRETRY: 1504 case CTC_STATE_SETUPWAIT: 1505 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { 1506 ctcmpc_chx_rxidle(fi, event, arg); 1507 } else { 1508 fsm_newstate(fi, CTC_STATE_TXIDLE); 1509 fsm_event(priv->fsm, DEV_EVENT_TXUP, dev); 1510 } 1511 goto done; 1512 default: 1513 break; 1514 } 1515 1516 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) 1517 ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); 1518 1519 done: 1520 CTCM_PR_DEBUG("Exit %s: id=%s, ch=0x%p\n", 1521 __func__, ch->id, ch); 1522 return; 1523 } 1524 1525 /** 1526 * Got initial data, check it. If OK, 1527 * notify device statemachine that we are up and 1528 * running. 1529 * 1530 * fi An instance of a channel statemachine. 1531 * event The event, just happened. 1532 * arg Generic pointer, casted from channel * upon call. 1533 */ 1534 void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg) 1535 { 1536 struct channel *ch = arg; 1537 struct net_device *dev = ch->netdev; 1538 struct ctcm_priv *priv = dev->ml_priv; 1539 struct mpc_group *grp = priv->mpcg; 1540 int rc; 1541 unsigned long saveflags = 0; /* avoids compiler warning */ 1542 1543 fsm_deltimer(&ch->timer); 1544 CTCM_PR_DEBUG("%s: %s: %s: cp:%i, chstate:%i grpstate:%i\n", 1545 __func__, ch->id, dev->name, smp_processor_id(), 1546 fsm_getstate(fi), fsm_getstate(grp->fsm)); 1547 1548 fsm_newstate(fi, CTC_STATE_RXIDLE); 1549 /* XID processing complete */ 1550 1551 switch (fsm_getstate(grp->fsm)) { 1552 case MPCG_STATE_FLOWC: 1553 case MPCG_STATE_READY: 1554 if (ctcm_checkalloc_buffer(ch)) 1555 goto done; 1556 ch->trans_skb->data = ch->trans_skb_data; 1557 skb_reset_tail_pointer(ch->trans_skb); 1558 ch->trans_skb->len = 0; 1559 ch->ccw[1].count = ch->max_bufsize; 1560 CTCM_CCW_DUMP((char *)&ch->ccw[0], sizeof(struct ccw1) * 3); 1561 if (event == CTC_EVENT_START) 1562 /* see remark about conditional locking */ 1563 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 1564 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); 1565 if (event == CTC_EVENT_START) 1566 spin_unlock_irqrestore( 1567 get_ccwdev_lock(ch->cdev), saveflags); 1568 if (rc != 0) { 1569 fsm_newstate(fi, CTC_STATE_RXINIT); 1570 ctcm_ccw_check_rc(ch, rc, "initial RX"); 1571 goto done; 1572 } 1573 break; 1574 default: 1575 break; 1576 } 1577 1578 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); 1579 done: 1580 return; 1581 } 1582 1583 /* 1584 * ctcmpc channel FSM action 1585 * called from several points in ctcmpc_ch_fsm 1586 * ctcmpc only 1587 */ 1588 static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg) 1589 { 1590 struct channel *ch = arg; 1591 struct net_device *dev = ch->netdev; 1592 struct ctcm_priv *priv = dev->ml_priv; 1593 struct mpc_group *grp = priv->mpcg; 1594 1595 CTCM_PR_DEBUG("%s(%s): %s(ch=0x%p), cp=%i, ChStat:%s, GrpStat:%s\n", 1596 __func__, dev->name, ch->id, ch, smp_processor_id(), 1597 fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm)); 1598 1599 switch (fsm_getstate(grp->fsm)) { 1600 case MPCG_STATE_XID2INITW: 1601 /* ok..start yside xid exchanges */ 1602 if (!ch->in_mpcgroup) 1603 break; 1604 if (fsm_getstate(ch->fsm) == CH_XID0_PENDING) { 1605 fsm_deltimer(&grp->timer); 1606 fsm_addtimer(&grp->timer, 1607 MPC_XID_TIMEOUT_VALUE, 1608 MPCG_EVENT_TIMER, dev); 1609 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); 1610 1611 } else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1) 1612 /* attn rcvd before xid0 processed via bh */ 1613 fsm_newstate(ch->fsm, CH_XID7_PENDING1); 1614 break; 1615 case MPCG_STATE_XID2INITX: 1616 case MPCG_STATE_XID0IOWAIT: 1617 case MPCG_STATE_XID0IOWAIX: 1618 /* attn rcvd before xid0 processed on ch 1619 but mid-xid0 processing for group */ 1620 if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1) 1621 fsm_newstate(ch->fsm, CH_XID7_PENDING1); 1622 break; 1623 case MPCG_STATE_XID7INITW: 1624 case MPCG_STATE_XID7INITX: 1625 case MPCG_STATE_XID7INITI: 1626 case MPCG_STATE_XID7INITZ: 1627 switch (fsm_getstate(ch->fsm)) { 1628 case CH_XID7_PENDING: 1629 fsm_newstate(ch->fsm, CH_XID7_PENDING1); 1630 break; 1631 case CH_XID7_PENDING2: 1632 fsm_newstate(ch->fsm, CH_XID7_PENDING3); 1633 break; 1634 } 1635 fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev); 1636 break; 1637 } 1638 1639 return; 1640 } 1641 1642 /* 1643 * ctcmpc channel FSM action 1644 * called from one point in ctcmpc_ch_fsm 1645 * ctcmpc only 1646 */ 1647 static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg) 1648 { 1649 struct channel *ch = arg; 1650 struct net_device *dev = ch->netdev; 1651 struct ctcm_priv *priv = dev->ml_priv; 1652 struct mpc_group *grp = priv->mpcg; 1653 1654 CTCM_PR_DEBUG("%s(%s): %s\n ChState:%s GrpState:%s\n", 1655 __func__, dev->name, ch->id, 1656 fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm)); 1657 1658 fsm_deltimer(&ch->timer); 1659 1660 switch (fsm_getstate(grp->fsm)) { 1661 case MPCG_STATE_XID0IOWAIT: 1662 /* vtam wants to be primary.start yside xid exchanges*/ 1663 /* only receive one attn-busy at a time so must not */ 1664 /* change state each time */ 1665 grp->changed_side = 1; 1666 fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW); 1667 break; 1668 case MPCG_STATE_XID2INITW: 1669 if (grp->changed_side == 1) { 1670 grp->changed_side = 2; 1671 break; 1672 } 1673 /* process began via call to establish_conn */ 1674 /* so must report failure instead of reverting */ 1675 /* back to ready-for-xid passive state */ 1676 if (grp->estconnfunc) 1677 goto done; 1678 /* this attnbusy is NOT the result of xside xid */ 1679 /* collisions so yside must have been triggered */ 1680 /* by an ATTN that was not intended to start XID */ 1681 /* processing. Revert back to ready-for-xid and */ 1682 /* wait for ATTN interrupt to signal xid start */ 1683 if (fsm_getstate(ch->fsm) == CH_XID0_INPROGRESS) { 1684 fsm_newstate(ch->fsm, CH_XID0_PENDING) ; 1685 fsm_deltimer(&grp->timer); 1686 goto done; 1687 } 1688 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); 1689 goto done; 1690 case MPCG_STATE_XID2INITX: 1691 /* XID2 was received before ATTN Busy for second 1692 channel.Send yside xid for second channel. 1693 */ 1694 if (grp->changed_side == 1) { 1695 grp->changed_side = 2; 1696 break; 1697 } 1698 fallthrough; 1699 case MPCG_STATE_XID0IOWAIX: 1700 case MPCG_STATE_XID7INITW: 1701 case MPCG_STATE_XID7INITX: 1702 case MPCG_STATE_XID7INITI: 1703 case MPCG_STATE_XID7INITZ: 1704 default: 1705 /* multiple attn-busy indicates too out-of-sync */ 1706 /* and they are certainly not being received as part */ 1707 /* of valid mpc group negotiations.. */ 1708 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); 1709 goto done; 1710 } 1711 1712 if (grp->changed_side == 1) { 1713 fsm_deltimer(&grp->timer); 1714 fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE, 1715 MPCG_EVENT_TIMER, dev); 1716 } 1717 if (ch->in_mpcgroup) 1718 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); 1719 else 1720 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, 1721 "%s(%s): channel %s not added to group", 1722 CTCM_FUNTAIL, dev->name, ch->id); 1723 1724 done: 1725 return; 1726 } 1727 1728 /* 1729 * ctcmpc channel FSM action 1730 * called from several points in ctcmpc_ch_fsm 1731 * ctcmpc only 1732 */ 1733 static void ctcmpc_chx_resend(fsm_instance *fsm, int event, void *arg) 1734 { 1735 struct channel *ch = arg; 1736 struct net_device *dev = ch->netdev; 1737 struct ctcm_priv *priv = dev->ml_priv; 1738 struct mpc_group *grp = priv->mpcg; 1739 1740 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); 1741 return; 1742 } 1743 1744 /* 1745 * ctcmpc channel FSM action 1746 * called from several points in ctcmpc_ch_fsm 1747 * ctcmpc only 1748 */ 1749 static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg) 1750 { 1751 struct channel *ach = arg; 1752 struct net_device *dev = ach->netdev; 1753 struct ctcm_priv *priv = dev->ml_priv; 1754 struct mpc_group *grp = priv->mpcg; 1755 struct channel *wch = priv->channel[CTCM_WRITE]; 1756 struct channel *rch = priv->channel[CTCM_READ]; 1757 struct sk_buff *skb; 1758 struct th_sweep *header; 1759 int rc = 0; 1760 unsigned long saveflags = 0; 1761 1762 CTCM_PR_DEBUG("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n", 1763 __func__, smp_processor_id(), ach, ach->id); 1764 1765 if (grp->in_sweep == 0) 1766 goto done; 1767 1768 CTCM_PR_DBGDATA("%s: 1: ToVTAM_th_seq= %08x\n" , 1769 __func__, wch->th_seq_num); 1770 CTCM_PR_DBGDATA("%s: 1: FromVTAM_th_seq= %08x\n" , 1771 __func__, rch->th_seq_num); 1772 1773 if (fsm_getstate(wch->fsm) != CTC_STATE_TXIDLE) { 1774 /* give the previous IO time to complete */ 1775 fsm_addtimer(&wch->sweep_timer, 1776 200, CTC_EVENT_RSWEEP_TIMER, wch); 1777 goto done; 1778 } 1779 1780 skb = skb_dequeue(&wch->sweep_queue); 1781 if (!skb) 1782 goto done; 1783 1784 if (set_normalized_cda(&wch->ccw[4], skb->data)) { 1785 grp->in_sweep = 0; 1786 ctcm_clear_busy_do(dev); 1787 dev_kfree_skb_any(skb); 1788 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); 1789 goto done; 1790 } else { 1791 refcount_inc(&skb->users); 1792 skb_queue_tail(&wch->io_queue, skb); 1793 } 1794 1795 /* send out the sweep */ 1796 wch->ccw[4].count = skb->len; 1797 1798 header = (struct th_sweep *)skb->data; 1799 switch (header->th.th_ch_flag) { 1800 case TH_SWEEP_REQ: 1801 grp->sweep_req_pend_num--; 1802 break; 1803 case TH_SWEEP_RESP: 1804 grp->sweep_rsp_pend_num--; 1805 break; 1806 } 1807 1808 header->sw.th_last_seq = wch->th_seq_num; 1809 1810 CTCM_CCW_DUMP((char *)&wch->ccw[3], sizeof(struct ccw1) * 3); 1811 CTCM_PR_DBGDATA("%s: sweep packet\n", __func__); 1812 CTCM_D3_DUMP((char *)header, TH_SWEEP_LENGTH); 1813 1814 fsm_addtimer(&wch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, wch); 1815 fsm_newstate(wch->fsm, CTC_STATE_TX); 1816 1817 spin_lock_irqsave(get_ccwdev_lock(wch->cdev), saveflags); 1818 wch->prof.send_stamp = jiffies; 1819 rc = ccw_device_start(wch->cdev, &wch->ccw[3], 0, 0xff, 0); 1820 spin_unlock_irqrestore(get_ccwdev_lock(wch->cdev), saveflags); 1821 1822 if ((grp->sweep_req_pend_num == 0) && 1823 (grp->sweep_rsp_pend_num == 0)) { 1824 grp->in_sweep = 0; 1825 rch->th_seq_num = 0x00; 1826 wch->th_seq_num = 0x00; 1827 ctcm_clear_busy_do(dev); 1828 } 1829 1830 CTCM_PR_DBGDATA("%s: To-/From-VTAM_th_seq = %08x/%08x\n" , 1831 __func__, wch->th_seq_num, rch->th_seq_num); 1832 1833 if (rc != 0) 1834 ctcm_ccw_check_rc(wch, rc, "send sweep"); 1835 1836 done: 1837 return; 1838 } 1839 1840 1841 /* 1842 * The ctcmpc statemachine for a channel. 1843 */ 1844 1845 const fsm_node ctcmpc_ch_fsm[] = { 1846 { CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop }, 1847 { CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start }, 1848 { CTC_STATE_STOPPED, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1849 { CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1850 { CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop }, 1851 1852 { CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop }, 1853 { CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop }, 1854 { CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1855 { CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop }, 1856 { CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start }, 1857 { CTC_STATE_NOTOP, CTC_EVENT_UC_RCRESET, ctcm_chx_stop }, 1858 { CTC_STATE_NOTOP, CTC_EVENT_UC_RSRESET, ctcm_chx_stop }, 1859 { CTC_STATE_NOTOP, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1860 1861 { CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1862 { CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop }, 1863 { CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, 1864 { CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr }, 1865 { CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1866 { CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1867 1868 { CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio }, 1869 { CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode }, 1870 { CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, 1871 { CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1872 { CTC_STATE_STARTRETRY, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1873 1874 { CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1875 { CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop }, 1876 { CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio }, 1877 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1878 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1879 { CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode }, 1880 { CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1881 { CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1882 1883 { CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1884 { CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop }, 1885 { CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, ctcmpc_chx_rxidle }, 1886 { CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr }, 1887 { CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr }, 1888 { CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr }, 1889 { CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail }, 1890 { CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1891 { CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, ctcmpc_chx_firstio }, 1892 { CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1893 1894 { CH_XID0_PENDING, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1895 { CH_XID0_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1896 { CH_XID0_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio }, 1897 { CH_XID0_PENDING, CTC_EVENT_START, ctcm_action_nop }, 1898 { CH_XID0_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1899 { CH_XID0_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1900 { CH_XID0_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1901 { CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1902 { CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1903 { CH_XID0_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1904 1905 { CH_XID0_INPROGRESS, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1906 { CH_XID0_INPROGRESS, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1907 { CH_XID0_INPROGRESS, CTC_EVENT_STOP, ctcm_chx_haltio }, 1908 { CH_XID0_INPROGRESS, CTC_EVENT_START, ctcm_action_nop }, 1909 { CH_XID0_INPROGRESS, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1910 { CH_XID0_INPROGRESS, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1911 { CH_XID0_INPROGRESS, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1912 { CH_XID0_INPROGRESS, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1913 { CH_XID0_INPROGRESS, CTC_EVENT_ATTNBUSY, ctcmpc_chx_attnbusy }, 1914 { CH_XID0_INPROGRESS, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1915 { CH_XID0_INPROGRESS, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1916 1917 { CH_XID7_PENDING, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1918 { CH_XID7_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1919 { CH_XID7_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio }, 1920 { CH_XID7_PENDING, CTC_EVENT_START, ctcm_action_nop }, 1921 { CH_XID7_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1922 { CH_XID7_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1923 { CH_XID7_PENDING, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1924 { CH_XID7_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1925 { CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1926 { CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1927 { CH_XID7_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1928 { CH_XID7_PENDING, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1929 { CH_XID7_PENDING, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1930 1931 { CH_XID7_PENDING1, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1932 { CH_XID7_PENDING1, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1933 { CH_XID7_PENDING1, CTC_EVENT_STOP, ctcm_chx_haltio }, 1934 { CH_XID7_PENDING1, CTC_EVENT_START, ctcm_action_nop }, 1935 { CH_XID7_PENDING1, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1936 { CH_XID7_PENDING1, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1937 { CH_XID7_PENDING1, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1938 { CH_XID7_PENDING1, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1939 { CH_XID7_PENDING1, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1940 { CH_XID7_PENDING1, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1941 { CH_XID7_PENDING1, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1942 { CH_XID7_PENDING1, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1943 1944 { CH_XID7_PENDING2, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1945 { CH_XID7_PENDING2, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1946 { CH_XID7_PENDING2, CTC_EVENT_STOP, ctcm_chx_haltio }, 1947 { CH_XID7_PENDING2, CTC_EVENT_START, ctcm_action_nop }, 1948 { CH_XID7_PENDING2, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1949 { CH_XID7_PENDING2, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1950 { CH_XID7_PENDING2, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1951 { CH_XID7_PENDING2, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1952 { CH_XID7_PENDING2, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1953 { CH_XID7_PENDING2, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1954 { CH_XID7_PENDING2, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1955 { CH_XID7_PENDING2, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1956 1957 { CH_XID7_PENDING3, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1958 { CH_XID7_PENDING3, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1959 { CH_XID7_PENDING3, CTC_EVENT_STOP, ctcm_chx_haltio }, 1960 { CH_XID7_PENDING3, CTC_EVENT_START, ctcm_action_nop }, 1961 { CH_XID7_PENDING3, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1962 { CH_XID7_PENDING3, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1963 { CH_XID7_PENDING3, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1964 { CH_XID7_PENDING3, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1965 { CH_XID7_PENDING3, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1966 { CH_XID7_PENDING3, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1967 { CH_XID7_PENDING3, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1968 { CH_XID7_PENDING3, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1969 1970 { CH_XID7_PENDING4, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1971 { CH_XID7_PENDING4, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1972 { CH_XID7_PENDING4, CTC_EVENT_STOP, ctcm_chx_haltio }, 1973 { CH_XID7_PENDING4, CTC_EVENT_START, ctcm_action_nop }, 1974 { CH_XID7_PENDING4, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1975 { CH_XID7_PENDING4, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1976 { CH_XID7_PENDING4, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1977 { CH_XID7_PENDING4, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1978 { CH_XID7_PENDING4, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1979 { CH_XID7_PENDING4, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1980 { CH_XID7_PENDING4, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1981 { CH_XID7_PENDING4, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1982 1983 { CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, 1984 { CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop }, 1985 { CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1986 { CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc }, 1987 { CTC_STATE_RXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail }, 1988 { CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1989 { CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1990 { CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1991 1992 { CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1993 { CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop }, 1994 { CTC_STATE_TXINIT, CTC_EVENT_FINSTAT, ctcm_chx_txidle }, 1995 { CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr }, 1996 { CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr }, 1997 { CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr }, 1998 { CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1999 { CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2000 { CTC_STATE_TXINIT, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep }, 2001 2002 { CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, 2003 { CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop }, 2004 { CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio }, 2005 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_fail }, 2006 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail }, 2007 { CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2008 { CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2009 { CTC_STATE_TXIDLE, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep }, 2010 2011 { CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop }, 2012 { CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart }, 2013 { CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped }, 2014 { CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, 2015 { CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, 2016 { CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2017 { CTC_STATE_TERM, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 2018 { CTC_STATE_TERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2019 2020 { CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio }, 2021 { CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart }, 2022 { CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, 2023 { CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, 2024 { CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, 2025 { CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2026 { CTC_STATE_DTERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2027 2028 { CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio }, 2029 { CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop }, 2030 { CTC_STATE_TX, CTC_EVENT_FINSTAT, ctcmpc_chx_txdone }, 2031 { CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_fail }, 2032 { CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_fail }, 2033 { CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry }, 2034 { CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2035 { CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2036 { CTC_STATE_TX, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep }, 2037 { CTC_STATE_TX, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 2038 2039 { CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, 2040 { CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, 2041 { CTC_STATE_TXERR, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2042 { CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2043 { CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2044 }; 2045 2046 int mpc_ch_fsm_len = ARRAY_SIZE(ctcmpc_ch_fsm); 2047 2048 /* 2049 * Actions for interface - statemachine. 2050 */ 2051 2052 /** 2053 * Startup channels by sending CTC_EVENT_START to each channel. 2054 * 2055 * fi An instance of an interface statemachine. 2056 * event The event, just happened. 2057 * arg Generic pointer, casted from struct net_device * upon call. 2058 */ 2059 static void dev_action_start(fsm_instance *fi, int event, void *arg) 2060 { 2061 struct net_device *dev = arg; 2062 struct ctcm_priv *priv = dev->ml_priv; 2063 int direction; 2064 2065 CTCMY_DBF_DEV_NAME(SETUP, dev, ""); 2066 2067 fsm_deltimer(&priv->restart_timer); 2068 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); 2069 if (IS_MPC(priv)) 2070 priv->mpcg->channels_terminating = 0; 2071 for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) { 2072 struct channel *ch = priv->channel[direction]; 2073 fsm_event(ch->fsm, CTC_EVENT_START, ch); 2074 } 2075 } 2076 2077 /** 2078 * Shutdown channels by sending CTC_EVENT_STOP to each channel. 2079 * 2080 * fi An instance of an interface statemachine. 2081 * event The event, just happened. 2082 * arg Generic pointer, casted from struct net_device * upon call. 2083 */ 2084 static void dev_action_stop(fsm_instance *fi, int event, void *arg) 2085 { 2086 int direction; 2087 struct net_device *dev = arg; 2088 struct ctcm_priv *priv = dev->ml_priv; 2089 2090 CTCMY_DBF_DEV_NAME(SETUP, dev, ""); 2091 2092 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); 2093 for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) { 2094 struct channel *ch = priv->channel[direction]; 2095 fsm_event(ch->fsm, CTC_EVENT_STOP, ch); 2096 ch->th_seq_num = 0x00; 2097 CTCM_PR_DEBUG("%s: CH_th_seq= %08x\n", 2098 __func__, ch->th_seq_num); 2099 } 2100 if (IS_MPC(priv)) 2101 fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET); 2102 } 2103 2104 static void dev_action_restart(fsm_instance *fi, int event, void *arg) 2105 { 2106 int restart_timer; 2107 struct net_device *dev = arg; 2108 struct ctcm_priv *priv = dev->ml_priv; 2109 2110 CTCMY_DBF_DEV_NAME(TRACE, dev, ""); 2111 2112 if (IS_MPC(priv)) { 2113 restart_timer = CTCM_TIME_1_SEC; 2114 } else { 2115 restart_timer = CTCM_TIME_5_SEC; 2116 } 2117 dev_info(&dev->dev, "Restarting device\n"); 2118 2119 dev_action_stop(fi, event, arg); 2120 fsm_event(priv->fsm, DEV_EVENT_STOP, dev); 2121 if (IS_MPC(priv)) 2122 fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET); 2123 2124 /* going back into start sequence too quickly can */ 2125 /* result in the other side becoming unreachable due */ 2126 /* to sense reported when IO is aborted */ 2127 fsm_addtimer(&priv->restart_timer, restart_timer, 2128 DEV_EVENT_START, dev); 2129 } 2130 2131 /** 2132 * Called from channel statemachine 2133 * when a channel is up and running. 2134 * 2135 * fi An instance of an interface statemachine. 2136 * event The event, just happened. 2137 * arg Generic pointer, casted from struct net_device * upon call. 2138 */ 2139 static void dev_action_chup(fsm_instance *fi, int event, void *arg) 2140 { 2141 struct net_device *dev = arg; 2142 struct ctcm_priv *priv = dev->ml_priv; 2143 int dev_stat = fsm_getstate(fi); 2144 2145 CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE, 2146 "%s(%s): priv = %p [%d,%d]\n ", CTCM_FUNTAIL, 2147 dev->name, dev->ml_priv, dev_stat, event); 2148 2149 switch (fsm_getstate(fi)) { 2150 case DEV_STATE_STARTWAIT_RXTX: 2151 if (event == DEV_EVENT_RXUP) 2152 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX); 2153 else 2154 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX); 2155 break; 2156 case DEV_STATE_STARTWAIT_RX: 2157 if (event == DEV_EVENT_RXUP) { 2158 fsm_newstate(fi, DEV_STATE_RUNNING); 2159 dev_info(&dev->dev, 2160 "Connected with remote side\n"); 2161 ctcm_clear_busy(dev); 2162 } 2163 break; 2164 case DEV_STATE_STARTWAIT_TX: 2165 if (event == DEV_EVENT_TXUP) { 2166 fsm_newstate(fi, DEV_STATE_RUNNING); 2167 dev_info(&dev->dev, 2168 "Connected with remote side\n"); 2169 ctcm_clear_busy(dev); 2170 } 2171 break; 2172 case DEV_STATE_STOPWAIT_TX: 2173 if (event == DEV_EVENT_RXUP) 2174 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); 2175 break; 2176 case DEV_STATE_STOPWAIT_RX: 2177 if (event == DEV_EVENT_TXUP) 2178 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); 2179 break; 2180 } 2181 2182 if (IS_MPC(priv)) { 2183 if (event == DEV_EVENT_RXUP) 2184 mpc_channel_action(priv->channel[CTCM_READ], 2185 CTCM_READ, MPC_CHANNEL_ADD); 2186 else 2187 mpc_channel_action(priv->channel[CTCM_WRITE], 2188 CTCM_WRITE, MPC_CHANNEL_ADD); 2189 } 2190 } 2191 2192 /** 2193 * Called from device statemachine 2194 * when a channel has been shutdown. 2195 * 2196 * fi An instance of an interface statemachine. 2197 * event The event, just happened. 2198 * arg Generic pointer, casted from struct net_device * upon call. 2199 */ 2200 static void dev_action_chdown(fsm_instance *fi, int event, void *arg) 2201 { 2202 2203 struct net_device *dev = arg; 2204 struct ctcm_priv *priv = dev->ml_priv; 2205 2206 CTCMY_DBF_DEV_NAME(SETUP, dev, ""); 2207 2208 switch (fsm_getstate(fi)) { 2209 case DEV_STATE_RUNNING: 2210 if (event == DEV_EVENT_TXDOWN) 2211 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX); 2212 else 2213 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX); 2214 break; 2215 case DEV_STATE_STARTWAIT_RX: 2216 if (event == DEV_EVENT_TXDOWN) 2217 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); 2218 break; 2219 case DEV_STATE_STARTWAIT_TX: 2220 if (event == DEV_EVENT_RXDOWN) 2221 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); 2222 break; 2223 case DEV_STATE_STOPWAIT_RXTX: 2224 if (event == DEV_EVENT_TXDOWN) 2225 fsm_newstate(fi, DEV_STATE_STOPWAIT_RX); 2226 else 2227 fsm_newstate(fi, DEV_STATE_STOPWAIT_TX); 2228 break; 2229 case DEV_STATE_STOPWAIT_RX: 2230 if (event == DEV_EVENT_RXDOWN) 2231 fsm_newstate(fi, DEV_STATE_STOPPED); 2232 break; 2233 case DEV_STATE_STOPWAIT_TX: 2234 if (event == DEV_EVENT_TXDOWN) 2235 fsm_newstate(fi, DEV_STATE_STOPPED); 2236 break; 2237 } 2238 if (IS_MPC(priv)) { 2239 if (event == DEV_EVENT_RXDOWN) 2240 mpc_channel_action(priv->channel[CTCM_READ], 2241 CTCM_READ, MPC_CHANNEL_REMOVE); 2242 else 2243 mpc_channel_action(priv->channel[CTCM_WRITE], 2244 CTCM_WRITE, MPC_CHANNEL_REMOVE); 2245 } 2246 } 2247 2248 const fsm_node dev_fsm[] = { 2249 { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start }, 2250 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start }, 2251 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown }, 2252 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown }, 2253 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart }, 2254 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start }, 2255 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup }, 2256 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup }, 2257 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown }, 2258 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart }, 2259 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start }, 2260 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup }, 2261 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup }, 2262 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown }, 2263 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart }, 2264 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop }, 2265 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup }, 2266 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup }, 2267 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown }, 2268 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown }, 2269 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart }, 2270 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop }, 2271 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup }, 2272 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup }, 2273 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown }, 2274 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart }, 2275 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop }, 2276 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup }, 2277 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup }, 2278 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown }, 2279 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart }, 2280 { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop }, 2281 { DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown }, 2282 { DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown }, 2283 { DEV_STATE_RUNNING, DEV_EVENT_TXUP, ctcm_action_nop }, 2284 { DEV_STATE_RUNNING, DEV_EVENT_RXUP, ctcm_action_nop }, 2285 { DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart }, 2286 }; 2287 2288 int dev_fsm_len = ARRAY_SIZE(dev_fsm); 2289 2290 /* --- This is the END my friend --- */ 2291 2292