1 /* 2 * ssi_protocol.c 3 * 4 * Implementation of the SSI McSAAB improved protocol. 5 * 6 * Copyright (C) 2010 Nokia Corporation. All rights reserved. 7 * Copyright (C) 2013 Sebastian Reichel <sre@kernel.org> 8 * 9 * Contact: Carlos Chinea <carlos.chinea@nokia.com> 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License 13 * version 2 as published by the Free Software Foundation. 14 * 15 * This program is distributed in the hope that it will be useful, but 16 * WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 23 * 02110-1301 USA 24 */ 25 26 #include <linux/atomic.h> 27 #include <linux/clk.h> 28 #include <linux/device.h> 29 #include <linux/err.h> 30 #include <linux/gpio.h> 31 #include <linux/if_ether.h> 32 #include <linux/if_arp.h> 33 #include <linux/if_phonet.h> 34 #include <linux/init.h> 35 #include <linux/irq.h> 36 #include <linux/list.h> 37 #include <linux/module.h> 38 #include <linux/netdevice.h> 39 #include <linux/notifier.h> 40 #include <linux/scatterlist.h> 41 #include <linux/skbuff.h> 42 #include <linux/slab.h> 43 #include <linux/spinlock.h> 44 #include <linux/timer.h> 45 #include <linux/hsi/hsi.h> 46 #include <linux/hsi/ssi_protocol.h> 47 48 void ssi_waketest(struct hsi_client *cl, unsigned int enable); 49 50 #define SSIP_TXQUEUE_LEN 100 51 #define SSIP_MAX_MTU 65535 52 #define SSIP_DEFAULT_MTU 4000 53 #define PN_MEDIA_SOS 21 54 #define SSIP_MIN_PN_HDR 6 /* FIXME: Revisit */ 55 #define SSIP_WDTOUT 2000 /* FIXME: has to be 500 msecs */ 56 #define SSIP_KATOUT 15 /* 15 msecs */ 57 #define SSIP_MAX_CMDS 5 /* Number of pre-allocated commands buffers */ 58 #define SSIP_BYTES_TO_FRAMES(x) ((((x) - 1) >> 2) + 1) 59 #define SSIP_CMT_LOADER_SYNC 0x11223344 60 /* 61 * SSI protocol command definitions 62 */ 63 #define SSIP_COMMAND(data) ((data) >> 28) 64 #define SSIP_PAYLOAD(data) ((data) & 0xfffffff) 65 /* Commands */ 66 #define SSIP_SW_BREAK 0 67 #define SSIP_BOOTINFO_REQ 1 68 #define SSIP_BOOTINFO_RESP 2 69 #define SSIP_WAKETEST_RESULT 3 70 #define SSIP_START_TRANS 4 71 #define SSIP_READY 5 72 /* Payloads */ 73 #define SSIP_DATA_VERSION(data) ((data) & 0xff) 74 #define SSIP_LOCAL_VERID 1 75 #define SSIP_WAKETEST_OK 0 76 #define SSIP_WAKETEST_FAILED 1 77 #define SSIP_PDU_LENGTH(data) (((data) >> 8) & 0xffff) 78 #define SSIP_MSG_ID(data) ((data) & 0xff) 79 /* Generic Command */ 80 #define SSIP_CMD(cmd, payload) (((cmd) << 28) | ((payload) & 0xfffffff)) 81 /* Commands for the control channel */ 82 #define SSIP_BOOTINFO_REQ_CMD(ver) \ 83 SSIP_CMD(SSIP_BOOTINFO_REQ, SSIP_DATA_VERSION(ver)) 84 #define SSIP_BOOTINFO_RESP_CMD(ver) \ 85 SSIP_CMD(SSIP_BOOTINFO_RESP, SSIP_DATA_VERSION(ver)) 86 #define SSIP_START_TRANS_CMD(pdulen, id) \ 87 SSIP_CMD(SSIP_START_TRANS, (((pdulen) << 8) | SSIP_MSG_ID(id))) 88 #define SSIP_READY_CMD SSIP_CMD(SSIP_READY, 0) 89 #define SSIP_SWBREAK_CMD SSIP_CMD(SSIP_SW_BREAK, 0) 90 91 /* Main state machine states */ 92 enum { 93 INIT, 94 HANDSHAKE, 95 ACTIVE, 96 }; 97 98 /* Send state machine states */ 99 enum { 100 SEND_IDLE, 101 WAIT4READY, 102 SEND_READY, 103 SENDING, 104 SENDING_SWBREAK, 105 }; 106 107 /* Receive state machine states */ 108 enum { 109 RECV_IDLE, 110 RECV_READY, 111 RECEIVING, 112 }; 113 114 /** 115 * struct ssi_protocol - SSI protocol (McSAAB) data 116 * @main_state: Main state machine 117 * @send_state: TX state machine 118 * @recv_state: RX state machine 119 * @waketest: Flag to follow wake line test 120 * @rxid: RX data id 121 * @txid: TX data id 122 * @txqueue_len: TX queue length 123 * @tx_wd: TX watchdog 124 * @rx_wd: RX watchdog 125 * @keep_alive: Workaround for SSI HW bug 126 * @lock: To serialize access to this struct 127 * @netdev: Phonet network device 128 * @txqueue: TX data queue 129 * @cmdqueue: Queue of free commands 130 * @cl: HSI client own reference 131 * @link: Link for ssip_list 132 * @tx_usecount: Refcount to keep track the slaves that use the wake line 133 * @channel_id_cmd: HSI channel id for command stream 134 * @channel_id_data: HSI channel id for data stream 135 */ 136 struct ssi_protocol { 137 unsigned int main_state; 138 unsigned int send_state; 139 unsigned int recv_state; 140 unsigned int waketest:1; 141 u8 rxid; 142 u8 txid; 143 unsigned int txqueue_len; 144 struct timer_list tx_wd; 145 struct timer_list rx_wd; 146 struct timer_list keep_alive; /* wake-up workaround */ 147 spinlock_t lock; 148 struct net_device *netdev; 149 struct list_head txqueue; 150 struct list_head cmdqueue; 151 struct hsi_client *cl; 152 struct list_head link; 153 atomic_t tx_usecnt; 154 int channel_id_cmd; 155 int channel_id_data; 156 }; 157 158 /* List of ssi protocol instances */ 159 static LIST_HEAD(ssip_list); 160 161 static void ssip_rxcmd_complete(struct hsi_msg *msg); 162 163 static inline void ssip_set_cmd(struct hsi_msg *msg, u32 cmd) 164 { 165 u32 *data; 166 167 data = sg_virt(msg->sgt.sgl); 168 *data = cmd; 169 } 170 171 static inline u32 ssip_get_cmd(struct hsi_msg *msg) 172 { 173 u32 *data; 174 175 data = sg_virt(msg->sgt.sgl); 176 177 return *data; 178 } 179 180 static void ssip_skb_to_msg(struct sk_buff *skb, struct hsi_msg *msg) 181 { 182 skb_frag_t *frag; 183 struct scatterlist *sg; 184 int i; 185 186 BUG_ON(msg->sgt.nents != (unsigned int)(skb_shinfo(skb)->nr_frags + 1)); 187 188 sg = msg->sgt.sgl; 189 sg_set_buf(sg, skb->data, skb_headlen(skb)); 190 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 191 sg = sg_next(sg); 192 BUG_ON(!sg); 193 frag = &skb_shinfo(skb)->frags[i]; 194 sg_set_page(sg, frag->page.p, frag->size, frag->page_offset); 195 } 196 } 197 198 static void ssip_free_data(struct hsi_msg *msg) 199 { 200 struct sk_buff *skb; 201 202 skb = msg->context; 203 pr_debug("free data: msg %p context %p skb %p\n", msg, msg->context, 204 skb); 205 msg->destructor = NULL; 206 dev_kfree_skb(skb); 207 hsi_free_msg(msg); 208 } 209 210 static struct hsi_msg *ssip_alloc_data(struct ssi_protocol *ssi, 211 struct sk_buff *skb, gfp_t flags) 212 { 213 struct hsi_msg *msg; 214 215 msg = hsi_alloc_msg(skb_shinfo(skb)->nr_frags + 1, flags); 216 if (!msg) 217 return NULL; 218 ssip_skb_to_msg(skb, msg); 219 msg->destructor = ssip_free_data; 220 msg->channel = ssi->channel_id_data; 221 msg->context = skb; 222 223 return msg; 224 } 225 226 static inline void ssip_release_cmd(struct hsi_msg *msg) 227 { 228 struct ssi_protocol *ssi = hsi_client_drvdata(msg->cl); 229 230 dev_dbg(&msg->cl->device, "Release cmd 0x%08x\n", ssip_get_cmd(msg)); 231 spin_lock_bh(&ssi->lock); 232 list_add_tail(&msg->link, &ssi->cmdqueue); 233 spin_unlock_bh(&ssi->lock); 234 } 235 236 static struct hsi_msg *ssip_claim_cmd(struct ssi_protocol *ssi) 237 { 238 struct hsi_msg *msg; 239 240 BUG_ON(list_empty(&ssi->cmdqueue)); 241 242 spin_lock_bh(&ssi->lock); 243 msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link); 244 list_del(&msg->link); 245 spin_unlock_bh(&ssi->lock); 246 msg->destructor = ssip_release_cmd; 247 248 return msg; 249 } 250 251 static void ssip_free_cmds(struct ssi_protocol *ssi) 252 { 253 struct hsi_msg *msg, *tmp; 254 255 list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) { 256 list_del(&msg->link); 257 msg->destructor = NULL; 258 kfree(sg_virt(msg->sgt.sgl)); 259 hsi_free_msg(msg); 260 } 261 } 262 263 static int ssip_alloc_cmds(struct ssi_protocol *ssi) 264 { 265 struct hsi_msg *msg; 266 u32 *buf; 267 unsigned int i; 268 269 for (i = 0; i < SSIP_MAX_CMDS; i++) { 270 msg = hsi_alloc_msg(1, GFP_KERNEL); 271 if (!msg) 272 goto out; 273 buf = kmalloc(sizeof(*buf), GFP_KERNEL); 274 if (!buf) { 275 hsi_free_msg(msg); 276 goto out; 277 } 278 sg_init_one(msg->sgt.sgl, buf, sizeof(*buf)); 279 msg->channel = ssi->channel_id_cmd; 280 list_add_tail(&msg->link, &ssi->cmdqueue); 281 } 282 283 return 0; 284 out: 285 ssip_free_cmds(ssi); 286 287 return -ENOMEM; 288 } 289 290 static void ssip_set_rxstate(struct ssi_protocol *ssi, unsigned int state) 291 { 292 ssi->recv_state = state; 293 switch (state) { 294 case RECV_IDLE: 295 del_timer(&ssi->rx_wd); 296 if (ssi->send_state == SEND_IDLE) 297 del_timer(&ssi->keep_alive); 298 break; 299 case RECV_READY: 300 /* CMT speech workaround */ 301 if (atomic_read(&ssi->tx_usecnt)) 302 break; 303 /* Otherwise fall through */ 304 case RECEIVING: 305 mod_timer(&ssi->keep_alive, jiffies + 306 msecs_to_jiffies(SSIP_KATOUT)); 307 mod_timer(&ssi->rx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); 308 break; 309 default: 310 break; 311 } 312 } 313 314 static void ssip_set_txstate(struct ssi_protocol *ssi, unsigned int state) 315 { 316 ssi->send_state = state; 317 switch (state) { 318 case SEND_IDLE: 319 case SEND_READY: 320 del_timer(&ssi->tx_wd); 321 if (ssi->recv_state == RECV_IDLE) 322 del_timer(&ssi->keep_alive); 323 break; 324 case WAIT4READY: 325 case SENDING: 326 case SENDING_SWBREAK: 327 mod_timer(&ssi->keep_alive, 328 jiffies + msecs_to_jiffies(SSIP_KATOUT)); 329 mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); 330 break; 331 default: 332 break; 333 } 334 } 335 336 struct hsi_client *ssip_slave_get_master(struct hsi_client *slave) 337 { 338 struct hsi_client *master = ERR_PTR(-ENODEV); 339 struct ssi_protocol *ssi; 340 341 list_for_each_entry(ssi, &ssip_list, link) 342 if (slave->device.parent == ssi->cl->device.parent) { 343 master = ssi->cl; 344 break; 345 } 346 347 return master; 348 } 349 EXPORT_SYMBOL_GPL(ssip_slave_get_master); 350 351 int ssip_slave_start_tx(struct hsi_client *master) 352 { 353 struct ssi_protocol *ssi = hsi_client_drvdata(master); 354 355 dev_dbg(&master->device, "start TX %d\n", atomic_read(&ssi->tx_usecnt)); 356 spin_lock_bh(&ssi->lock); 357 if (ssi->send_state == SEND_IDLE) { 358 ssip_set_txstate(ssi, WAIT4READY); 359 hsi_start_tx(master); 360 } 361 spin_unlock_bh(&ssi->lock); 362 atomic_inc(&ssi->tx_usecnt); 363 364 return 0; 365 } 366 EXPORT_SYMBOL_GPL(ssip_slave_start_tx); 367 368 int ssip_slave_stop_tx(struct hsi_client *master) 369 { 370 struct ssi_protocol *ssi = hsi_client_drvdata(master); 371 372 WARN_ON_ONCE(atomic_read(&ssi->tx_usecnt) == 0); 373 374 if (atomic_dec_and_test(&ssi->tx_usecnt)) { 375 spin_lock_bh(&ssi->lock); 376 if ((ssi->send_state == SEND_READY) || 377 (ssi->send_state == WAIT4READY)) { 378 ssip_set_txstate(ssi, SEND_IDLE); 379 hsi_stop_tx(master); 380 } 381 spin_unlock_bh(&ssi->lock); 382 } 383 dev_dbg(&master->device, "stop TX %d\n", atomic_read(&ssi->tx_usecnt)); 384 385 return 0; 386 } 387 EXPORT_SYMBOL_GPL(ssip_slave_stop_tx); 388 389 int ssip_slave_running(struct hsi_client *master) 390 { 391 struct ssi_protocol *ssi = hsi_client_drvdata(master); 392 return netif_running(ssi->netdev); 393 } 394 EXPORT_SYMBOL_GPL(ssip_slave_running); 395 396 static void ssip_reset(struct hsi_client *cl) 397 { 398 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 399 struct list_head *head, *tmp; 400 struct hsi_msg *msg; 401 402 if (netif_running(ssi->netdev)) 403 netif_carrier_off(ssi->netdev); 404 hsi_flush(cl); 405 spin_lock_bh(&ssi->lock); 406 if (ssi->send_state != SEND_IDLE) 407 hsi_stop_tx(cl); 408 if (ssi->waketest) 409 ssi_waketest(cl, 0); 410 del_timer(&ssi->rx_wd); 411 del_timer(&ssi->tx_wd); 412 del_timer(&ssi->keep_alive); 413 ssi->main_state = 0; 414 ssi->send_state = 0; 415 ssi->recv_state = 0; 416 ssi->waketest = 0; 417 ssi->rxid = 0; 418 ssi->txid = 0; 419 list_for_each_safe(head, tmp, &ssi->txqueue) { 420 msg = list_entry(head, struct hsi_msg, link); 421 dev_dbg(&cl->device, "Pending TX data\n"); 422 list_del(head); 423 ssip_free_data(msg); 424 } 425 ssi->txqueue_len = 0; 426 spin_unlock_bh(&ssi->lock); 427 } 428 429 static void ssip_dump_state(struct hsi_client *cl) 430 { 431 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 432 struct hsi_msg *msg; 433 434 spin_lock_bh(&ssi->lock); 435 dev_err(&cl->device, "Main state: %d\n", ssi->main_state); 436 dev_err(&cl->device, "Recv state: %d\n", ssi->recv_state); 437 dev_err(&cl->device, "Send state: %d\n", ssi->send_state); 438 dev_err(&cl->device, "CMT %s\n", (ssi->main_state == ACTIVE) ? 439 "Online" : "Offline"); 440 dev_err(&cl->device, "Wake test %d\n", ssi->waketest); 441 dev_err(&cl->device, "Data RX id: %d\n", ssi->rxid); 442 dev_err(&cl->device, "Data TX id: %d\n", ssi->txid); 443 444 list_for_each_entry(msg, &ssi->txqueue, link) 445 dev_err(&cl->device, "pending TX data (%p)\n", msg); 446 spin_unlock_bh(&ssi->lock); 447 } 448 449 static void ssip_error(struct hsi_client *cl) 450 { 451 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 452 struct hsi_msg *msg; 453 454 ssip_dump_state(cl); 455 ssip_reset(cl); 456 msg = ssip_claim_cmd(ssi); 457 msg->complete = ssip_rxcmd_complete; 458 hsi_async_read(cl, msg); 459 } 460 461 static void ssip_keep_alive(unsigned long data) 462 { 463 struct hsi_client *cl = (struct hsi_client *)data; 464 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 465 466 dev_dbg(&cl->device, "Keep alive kick in: m(%d) r(%d) s(%d)\n", 467 ssi->main_state, ssi->recv_state, ssi->send_state); 468 469 spin_lock(&ssi->lock); 470 if (ssi->recv_state == RECV_IDLE) 471 switch (ssi->send_state) { 472 case SEND_READY: 473 if (atomic_read(&ssi->tx_usecnt) == 0) 474 break; 475 /* 476 * Fall through. Workaround for cmt-speech 477 * in that case we relay on audio timers. 478 */ 479 case SEND_IDLE: 480 spin_unlock(&ssi->lock); 481 return; 482 } 483 mod_timer(&ssi->keep_alive, jiffies + msecs_to_jiffies(SSIP_KATOUT)); 484 spin_unlock(&ssi->lock); 485 } 486 487 static void ssip_wd(unsigned long data) 488 { 489 struct hsi_client *cl = (struct hsi_client *)data; 490 491 dev_err(&cl->device, "Watchdog trigerred\n"); 492 ssip_error(cl); 493 } 494 495 static void ssip_send_bootinfo_req_cmd(struct hsi_client *cl) 496 { 497 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 498 struct hsi_msg *msg; 499 500 dev_dbg(&cl->device, "Issuing BOOT INFO REQ command\n"); 501 msg = ssip_claim_cmd(ssi); 502 ssip_set_cmd(msg, SSIP_BOOTINFO_REQ_CMD(SSIP_LOCAL_VERID)); 503 msg->complete = ssip_release_cmd; 504 hsi_async_write(cl, msg); 505 dev_dbg(&cl->device, "Issuing RX command\n"); 506 msg = ssip_claim_cmd(ssi); 507 msg->complete = ssip_rxcmd_complete; 508 hsi_async_read(cl, msg); 509 } 510 511 static void ssip_start_rx(struct hsi_client *cl) 512 { 513 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 514 struct hsi_msg *msg; 515 516 dev_dbg(&cl->device, "RX start M(%d) R(%d)\n", ssi->main_state, 517 ssi->recv_state); 518 spin_lock(&ssi->lock); 519 /* 520 * We can have two UP events in a row due to a short low 521 * high transition. Therefore we need to ignore the sencond UP event. 522 */ 523 if ((ssi->main_state != ACTIVE) || (ssi->recv_state == RECV_READY)) { 524 if (ssi->main_state == INIT) { 525 ssi->main_state = HANDSHAKE; 526 spin_unlock(&ssi->lock); 527 ssip_send_bootinfo_req_cmd(cl); 528 } else { 529 spin_unlock(&ssi->lock); 530 } 531 return; 532 } 533 ssip_set_rxstate(ssi, RECV_READY); 534 spin_unlock(&ssi->lock); 535 536 msg = ssip_claim_cmd(ssi); 537 ssip_set_cmd(msg, SSIP_READY_CMD); 538 msg->complete = ssip_release_cmd; 539 dev_dbg(&cl->device, "Send READY\n"); 540 hsi_async_write(cl, msg); 541 } 542 543 static void ssip_stop_rx(struct hsi_client *cl) 544 { 545 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 546 547 dev_dbg(&cl->device, "RX stop M(%d)\n", ssi->main_state); 548 spin_lock(&ssi->lock); 549 if (likely(ssi->main_state == ACTIVE)) 550 ssip_set_rxstate(ssi, RECV_IDLE); 551 spin_unlock(&ssi->lock); 552 } 553 554 static void ssip_free_strans(struct hsi_msg *msg) 555 { 556 ssip_free_data(msg->context); 557 ssip_release_cmd(msg); 558 } 559 560 static void ssip_strans_complete(struct hsi_msg *msg) 561 { 562 struct hsi_client *cl = msg->cl; 563 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 564 struct hsi_msg *data; 565 566 data = msg->context; 567 ssip_release_cmd(msg); 568 spin_lock(&ssi->lock); 569 ssip_set_txstate(ssi, SENDING); 570 spin_unlock(&ssi->lock); 571 hsi_async_write(cl, data); 572 } 573 574 static int ssip_xmit(struct hsi_client *cl) 575 { 576 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 577 struct hsi_msg *msg, *dmsg; 578 struct sk_buff *skb; 579 580 spin_lock_bh(&ssi->lock); 581 if (list_empty(&ssi->txqueue)) { 582 spin_unlock_bh(&ssi->lock); 583 return 0; 584 } 585 dmsg = list_first_entry(&ssi->txqueue, struct hsi_msg, link); 586 list_del(&dmsg->link); 587 ssi->txqueue_len--; 588 spin_unlock_bh(&ssi->lock); 589 590 msg = ssip_claim_cmd(ssi); 591 skb = dmsg->context; 592 msg->context = dmsg; 593 msg->complete = ssip_strans_complete; 594 msg->destructor = ssip_free_strans; 595 596 spin_lock_bh(&ssi->lock); 597 ssip_set_cmd(msg, SSIP_START_TRANS_CMD(SSIP_BYTES_TO_FRAMES(skb->len), 598 ssi->txid)); 599 ssi->txid++; 600 ssip_set_txstate(ssi, SENDING); 601 spin_unlock_bh(&ssi->lock); 602 603 dev_dbg(&cl->device, "Send STRANS (%d frames)\n", 604 SSIP_BYTES_TO_FRAMES(skb->len)); 605 606 return hsi_async_write(cl, msg); 607 } 608 609 /* In soft IRQ context */ 610 static void ssip_pn_rx(struct sk_buff *skb) 611 { 612 struct net_device *dev = skb->dev; 613 614 if (unlikely(!netif_running(dev))) { 615 dev_dbg(&dev->dev, "Drop RX packet\n"); 616 dev->stats.rx_dropped++; 617 dev_kfree_skb(skb); 618 return; 619 } 620 if (unlikely(!pskb_may_pull(skb, SSIP_MIN_PN_HDR))) { 621 dev_dbg(&dev->dev, "Error drop RX packet\n"); 622 dev->stats.rx_errors++; 623 dev->stats.rx_length_errors++; 624 dev_kfree_skb(skb); 625 return; 626 } 627 dev->stats.rx_packets++; 628 dev->stats.rx_bytes += skb->len; 629 630 /* length field is exchanged in network byte order */ 631 ((u16 *)skb->data)[2] = ntohs(((u16 *)skb->data)[2]); 632 dev_dbg(&dev->dev, "RX length fixed (%04x -> %u)\n", 633 ((u16 *)skb->data)[2], ntohs(((u16 *)skb->data)[2])); 634 635 skb->protocol = htons(ETH_P_PHONET); 636 skb_reset_mac_header(skb); 637 __skb_pull(skb, 1); 638 netif_rx(skb); 639 } 640 641 static void ssip_rx_data_complete(struct hsi_msg *msg) 642 { 643 struct hsi_client *cl = msg->cl; 644 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 645 struct sk_buff *skb; 646 647 if (msg->status == HSI_STATUS_ERROR) { 648 dev_err(&cl->device, "RX data error\n"); 649 ssip_free_data(msg); 650 ssip_error(cl); 651 return; 652 } 653 del_timer(&ssi->rx_wd); /* FIXME: Revisit */ 654 skb = msg->context; 655 ssip_pn_rx(skb); 656 hsi_free_msg(msg); 657 } 658 659 static void ssip_rx_bootinforeq(struct hsi_client *cl, u32 cmd) 660 { 661 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 662 struct hsi_msg *msg; 663 664 /* Workaroud: Ignore CMT Loader message leftover */ 665 if (cmd == SSIP_CMT_LOADER_SYNC) 666 return; 667 668 switch (ssi->main_state) { 669 case ACTIVE: 670 dev_err(&cl->device, "Boot info req on active state\n"); 671 ssip_error(cl); 672 /* Fall through */ 673 case INIT: 674 spin_lock(&ssi->lock); 675 ssi->main_state = HANDSHAKE; 676 if (!ssi->waketest) { 677 ssi->waketest = 1; 678 ssi_waketest(cl, 1); /* FIXME: To be removed */ 679 } 680 /* Start boot handshake watchdog */ 681 mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); 682 spin_unlock(&ssi->lock); 683 dev_dbg(&cl->device, "Send BOOTINFO_RESP\n"); 684 if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID) 685 dev_warn(&cl->device, "boot info req verid mismatch\n"); 686 msg = ssip_claim_cmd(ssi); 687 ssip_set_cmd(msg, SSIP_BOOTINFO_RESP_CMD(SSIP_LOCAL_VERID)); 688 msg->complete = ssip_release_cmd; 689 hsi_async_write(cl, msg); 690 break; 691 case HANDSHAKE: 692 /* Ignore */ 693 break; 694 default: 695 dev_dbg(&cl->device, "Wrong state M(%d)\n", ssi->main_state); 696 break; 697 } 698 } 699 700 static void ssip_rx_bootinforesp(struct hsi_client *cl, u32 cmd) 701 { 702 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 703 704 if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID) 705 dev_warn(&cl->device, "boot info resp verid mismatch\n"); 706 707 spin_lock(&ssi->lock); 708 if (ssi->main_state != ACTIVE) 709 /* Use tx_wd as a boot watchdog in non ACTIVE state */ 710 mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); 711 else 712 dev_dbg(&cl->device, "boot info resp ignored M(%d)\n", 713 ssi->main_state); 714 spin_unlock(&ssi->lock); 715 } 716 717 static void ssip_rx_waketest(struct hsi_client *cl, u32 cmd) 718 { 719 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 720 unsigned int wkres = SSIP_PAYLOAD(cmd); 721 722 spin_lock(&ssi->lock); 723 if (ssi->main_state != HANDSHAKE) { 724 dev_dbg(&cl->device, "wake lines test ignored M(%d)\n", 725 ssi->main_state); 726 spin_unlock(&ssi->lock); 727 return; 728 } 729 if (ssi->waketest) { 730 ssi->waketest = 0; 731 ssi_waketest(cl, 0); /* FIXME: To be removed */ 732 } 733 ssi->main_state = ACTIVE; 734 del_timer(&ssi->tx_wd); /* Stop boot handshake timer */ 735 spin_unlock(&ssi->lock); 736 737 dev_notice(&cl->device, "WAKELINES TEST %s\n", 738 wkres & SSIP_WAKETEST_FAILED ? "FAILED" : "OK"); 739 if (wkres & SSIP_WAKETEST_FAILED) { 740 ssip_error(cl); 741 return; 742 } 743 dev_dbg(&cl->device, "CMT is ONLINE\n"); 744 netif_wake_queue(ssi->netdev); 745 netif_carrier_on(ssi->netdev); 746 } 747 748 static void ssip_rx_ready(struct hsi_client *cl) 749 { 750 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 751 752 spin_lock(&ssi->lock); 753 if (unlikely(ssi->main_state != ACTIVE)) { 754 dev_dbg(&cl->device, "READY on wrong state: S(%d) M(%d)\n", 755 ssi->send_state, ssi->main_state); 756 spin_unlock(&ssi->lock); 757 return; 758 } 759 if (ssi->send_state != WAIT4READY) { 760 dev_dbg(&cl->device, "Ignore spurious READY command\n"); 761 spin_unlock(&ssi->lock); 762 return; 763 } 764 ssip_set_txstate(ssi, SEND_READY); 765 spin_unlock(&ssi->lock); 766 ssip_xmit(cl); 767 } 768 769 static void ssip_rx_strans(struct hsi_client *cl, u32 cmd) 770 { 771 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 772 struct sk_buff *skb; 773 struct hsi_msg *msg; 774 int len = SSIP_PDU_LENGTH(cmd); 775 776 dev_dbg(&cl->device, "RX strans: %d frames\n", len); 777 spin_lock(&ssi->lock); 778 if (unlikely(ssi->main_state != ACTIVE)) { 779 dev_err(&cl->device, "START TRANS wrong state: S(%d) M(%d)\n", 780 ssi->send_state, ssi->main_state); 781 spin_unlock(&ssi->lock); 782 return; 783 } 784 ssip_set_rxstate(ssi, RECEIVING); 785 if (unlikely(SSIP_MSG_ID(cmd) != ssi->rxid)) { 786 dev_err(&cl->device, "START TRANS id %d expeceted %d\n", 787 SSIP_MSG_ID(cmd), ssi->rxid); 788 spin_unlock(&ssi->lock); 789 goto out1; 790 } 791 ssi->rxid++; 792 spin_unlock(&ssi->lock); 793 skb = netdev_alloc_skb(ssi->netdev, len * 4); 794 if (unlikely(!skb)) { 795 dev_err(&cl->device, "No memory for rx skb\n"); 796 goto out1; 797 } 798 skb->dev = ssi->netdev; 799 skb_put(skb, len * 4); 800 msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC); 801 if (unlikely(!msg)) { 802 dev_err(&cl->device, "No memory for RX data msg\n"); 803 goto out2; 804 } 805 msg->complete = ssip_rx_data_complete; 806 hsi_async_read(cl, msg); 807 808 return; 809 out2: 810 dev_kfree_skb(skb); 811 out1: 812 ssip_error(cl); 813 } 814 815 static void ssip_rxcmd_complete(struct hsi_msg *msg) 816 { 817 struct hsi_client *cl = msg->cl; 818 u32 cmd = ssip_get_cmd(msg); 819 unsigned int cmdid = SSIP_COMMAND(cmd); 820 821 if (msg->status == HSI_STATUS_ERROR) { 822 dev_err(&cl->device, "RX error detected\n"); 823 ssip_release_cmd(msg); 824 ssip_error(cl); 825 return; 826 } 827 hsi_async_read(cl, msg); 828 dev_dbg(&cl->device, "RX cmd: 0x%08x\n", cmd); 829 switch (cmdid) { 830 case SSIP_SW_BREAK: 831 /* Ignored */ 832 break; 833 case SSIP_BOOTINFO_REQ: 834 ssip_rx_bootinforeq(cl, cmd); 835 break; 836 case SSIP_BOOTINFO_RESP: 837 ssip_rx_bootinforesp(cl, cmd); 838 break; 839 case SSIP_WAKETEST_RESULT: 840 ssip_rx_waketest(cl, cmd); 841 break; 842 case SSIP_START_TRANS: 843 ssip_rx_strans(cl, cmd); 844 break; 845 case SSIP_READY: 846 ssip_rx_ready(cl); 847 break; 848 default: 849 dev_warn(&cl->device, "command 0x%08x not supported\n", cmd); 850 break; 851 } 852 } 853 854 static void ssip_swbreak_complete(struct hsi_msg *msg) 855 { 856 struct hsi_client *cl = msg->cl; 857 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 858 859 ssip_release_cmd(msg); 860 spin_lock(&ssi->lock); 861 if (list_empty(&ssi->txqueue)) { 862 if (atomic_read(&ssi->tx_usecnt)) { 863 ssip_set_txstate(ssi, SEND_READY); 864 } else { 865 ssip_set_txstate(ssi, SEND_IDLE); 866 hsi_stop_tx(cl); 867 } 868 spin_unlock(&ssi->lock); 869 } else { 870 spin_unlock(&ssi->lock); 871 ssip_xmit(cl); 872 } 873 netif_wake_queue(ssi->netdev); 874 } 875 876 static void ssip_tx_data_complete(struct hsi_msg *msg) 877 { 878 struct hsi_client *cl = msg->cl; 879 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 880 struct hsi_msg *cmsg; 881 882 if (msg->status == HSI_STATUS_ERROR) { 883 dev_err(&cl->device, "TX data error\n"); 884 ssip_error(cl); 885 goto out; 886 } 887 spin_lock(&ssi->lock); 888 if (list_empty(&ssi->txqueue)) { 889 ssip_set_txstate(ssi, SENDING_SWBREAK); 890 spin_unlock(&ssi->lock); 891 cmsg = ssip_claim_cmd(ssi); 892 ssip_set_cmd(cmsg, SSIP_SWBREAK_CMD); 893 cmsg->complete = ssip_swbreak_complete; 894 dev_dbg(&cl->device, "Send SWBREAK\n"); 895 hsi_async_write(cl, cmsg); 896 } else { 897 spin_unlock(&ssi->lock); 898 ssip_xmit(cl); 899 } 900 out: 901 ssip_free_data(msg); 902 } 903 904 void ssip_port_event(struct hsi_client *cl, unsigned long event) 905 { 906 switch (event) { 907 case HSI_EVENT_START_RX: 908 ssip_start_rx(cl); 909 break; 910 case HSI_EVENT_STOP_RX: 911 ssip_stop_rx(cl); 912 break; 913 default: 914 return; 915 } 916 } 917 918 static int ssip_pn_open(struct net_device *dev) 919 { 920 struct hsi_client *cl = to_hsi_client(dev->dev.parent); 921 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 922 int err; 923 924 err = hsi_claim_port(cl, 1); 925 if (err < 0) { 926 dev_err(&cl->device, "SSI port already claimed\n"); 927 return err; 928 } 929 err = hsi_register_port_event(cl, ssip_port_event); 930 if (err < 0) { 931 dev_err(&cl->device, "Register HSI port event failed (%d)\n", 932 err); 933 return err; 934 } 935 dev_dbg(&cl->device, "Configuring SSI port\n"); 936 hsi_setup(cl); 937 spin_lock_bh(&ssi->lock); 938 if (!ssi->waketest) { 939 ssi->waketest = 1; 940 ssi_waketest(cl, 1); /* FIXME: To be removed */ 941 } 942 ssi->main_state = INIT; 943 spin_unlock_bh(&ssi->lock); 944 945 return 0; 946 } 947 948 static int ssip_pn_stop(struct net_device *dev) 949 { 950 struct hsi_client *cl = to_hsi_client(dev->dev.parent); 951 952 ssip_reset(cl); 953 hsi_unregister_port_event(cl); 954 hsi_release_port(cl); 955 956 return 0; 957 } 958 959 static int ssip_pn_set_mtu(struct net_device *dev, int new_mtu) 960 { 961 if (new_mtu > SSIP_MAX_MTU || new_mtu < PHONET_MIN_MTU) 962 return -EINVAL; 963 dev->mtu = new_mtu; 964 965 return 0; 966 } 967 968 static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev) 969 { 970 struct hsi_client *cl = to_hsi_client(dev->dev.parent); 971 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 972 struct hsi_msg *msg; 973 974 if ((skb->protocol != htons(ETH_P_PHONET)) || 975 (skb->len < SSIP_MIN_PN_HDR)) 976 goto drop; 977 /* Pad to 32-bits - FIXME: Revisit*/ 978 if ((skb->len & 3) && skb_pad(skb, 4 - (skb->len & 3))) 979 goto drop; 980 981 /* 982 * Modem sends Phonet messages over SSI with its own endianess... 983 * Assume that modem has the same endianess as we do. 984 */ 985 if (skb_cow_head(skb, 0)) 986 goto drop; 987 988 /* length field is exchanged in network byte order */ 989 ((u16 *)skb->data)[2] = htons(((u16 *)skb->data)[2]); 990 991 msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC); 992 if (!msg) { 993 dev_dbg(&cl->device, "Dropping tx data: No memory\n"); 994 goto drop; 995 } 996 msg->complete = ssip_tx_data_complete; 997 998 spin_lock_bh(&ssi->lock); 999 if (unlikely(ssi->main_state != ACTIVE)) { 1000 spin_unlock_bh(&ssi->lock); 1001 dev_dbg(&cl->device, "Dropping tx data: CMT is OFFLINE\n"); 1002 goto drop2; 1003 } 1004 list_add_tail(&msg->link, &ssi->txqueue); 1005 ssi->txqueue_len++; 1006 if (dev->tx_queue_len < ssi->txqueue_len) { 1007 dev_info(&cl->device, "TX queue full %d\n", ssi->txqueue_len); 1008 netif_stop_queue(dev); 1009 } 1010 if (ssi->send_state == SEND_IDLE) { 1011 ssip_set_txstate(ssi, WAIT4READY); 1012 spin_unlock_bh(&ssi->lock); 1013 dev_dbg(&cl->device, "Start TX qlen %d\n", ssi->txqueue_len); 1014 hsi_start_tx(cl); 1015 } else if (ssi->send_state == SEND_READY) { 1016 /* Needed for cmt-speech workaround */ 1017 dev_dbg(&cl->device, "Start TX on SEND READY qlen %d\n", 1018 ssi->txqueue_len); 1019 spin_unlock_bh(&ssi->lock); 1020 ssip_xmit(cl); 1021 } else { 1022 spin_unlock_bh(&ssi->lock); 1023 } 1024 dev->stats.tx_packets++; 1025 dev->stats.tx_bytes += skb->len; 1026 1027 return 0; 1028 drop2: 1029 hsi_free_msg(msg); 1030 drop: 1031 dev->stats.tx_dropped++; 1032 dev_kfree_skb(skb); 1033 1034 return 0; 1035 } 1036 1037 /* CMT reset event handler */ 1038 void ssip_reset_event(struct hsi_client *master) 1039 { 1040 struct ssi_protocol *ssi = hsi_client_drvdata(master); 1041 dev_err(&ssi->cl->device, "CMT reset detected!\n"); 1042 ssip_error(ssi->cl); 1043 } 1044 EXPORT_SYMBOL_GPL(ssip_reset_event); 1045 1046 static const struct net_device_ops ssip_pn_ops = { 1047 .ndo_open = ssip_pn_open, 1048 .ndo_stop = ssip_pn_stop, 1049 .ndo_start_xmit = ssip_pn_xmit, 1050 .ndo_change_mtu = ssip_pn_set_mtu, 1051 }; 1052 1053 static void ssip_pn_setup(struct net_device *dev) 1054 { 1055 dev->features = 0; 1056 dev->netdev_ops = &ssip_pn_ops; 1057 dev->type = ARPHRD_PHONET; 1058 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 1059 dev->mtu = SSIP_DEFAULT_MTU; 1060 dev->hard_header_len = 1; 1061 dev->dev_addr[0] = PN_MEDIA_SOS; 1062 dev->addr_len = 1; 1063 dev->tx_queue_len = SSIP_TXQUEUE_LEN; 1064 1065 dev->destructor = free_netdev; 1066 dev->header_ops = &phonet_header_ops; 1067 } 1068 1069 static int ssi_protocol_probe(struct device *dev) 1070 { 1071 static const char ifname[] = "phonet%d"; 1072 struct hsi_client *cl = to_hsi_client(dev); 1073 struct ssi_protocol *ssi; 1074 int err; 1075 1076 ssi = kzalloc(sizeof(*ssi), GFP_KERNEL); 1077 if (!ssi) { 1078 dev_err(dev, "No memory for ssi protocol\n"); 1079 return -ENOMEM; 1080 } 1081 1082 spin_lock_init(&ssi->lock); 1083 init_timer_deferrable(&ssi->rx_wd); 1084 init_timer_deferrable(&ssi->tx_wd); 1085 init_timer(&ssi->keep_alive); 1086 ssi->rx_wd.data = (unsigned long)cl; 1087 ssi->rx_wd.function = ssip_wd; 1088 ssi->tx_wd.data = (unsigned long)cl; 1089 ssi->tx_wd.function = ssip_wd; 1090 ssi->keep_alive.data = (unsigned long)cl; 1091 ssi->keep_alive.function = ssip_keep_alive; 1092 INIT_LIST_HEAD(&ssi->txqueue); 1093 INIT_LIST_HEAD(&ssi->cmdqueue); 1094 atomic_set(&ssi->tx_usecnt, 0); 1095 hsi_client_set_drvdata(cl, ssi); 1096 ssi->cl = cl; 1097 1098 ssi->channel_id_cmd = hsi_get_channel_id_by_name(cl, "mcsaab-control"); 1099 if (ssi->channel_id_cmd < 0) { 1100 err = ssi->channel_id_cmd; 1101 dev_err(dev, "Could not get cmd channel (%d)\n", err); 1102 goto out; 1103 } 1104 1105 ssi->channel_id_data = hsi_get_channel_id_by_name(cl, "mcsaab-data"); 1106 if (ssi->channel_id_data < 0) { 1107 err = ssi->channel_id_data; 1108 dev_err(dev, "Could not get data channel (%d)\n", err); 1109 goto out; 1110 } 1111 1112 err = ssip_alloc_cmds(ssi); 1113 if (err < 0) { 1114 dev_err(dev, "No memory for commands\n"); 1115 goto out; 1116 } 1117 1118 ssi->netdev = alloc_netdev(0, ifname, ssip_pn_setup); 1119 if (!ssi->netdev) { 1120 dev_err(dev, "No memory for netdev\n"); 1121 err = -ENOMEM; 1122 goto out1; 1123 } 1124 1125 SET_NETDEV_DEV(ssi->netdev, dev); 1126 netif_carrier_off(ssi->netdev); 1127 err = register_netdev(ssi->netdev); 1128 if (err < 0) { 1129 dev_err(dev, "Register netdev failed (%d)\n", err); 1130 goto out2; 1131 } 1132 1133 list_add(&ssi->link, &ssip_list); 1134 1135 dev_dbg(dev, "channel configuration: cmd=%d, data=%d\n", 1136 ssi->channel_id_cmd, ssi->channel_id_data); 1137 1138 return 0; 1139 out2: 1140 free_netdev(ssi->netdev); 1141 out1: 1142 ssip_free_cmds(ssi); 1143 out: 1144 kfree(ssi); 1145 1146 return err; 1147 } 1148 1149 static int ssi_protocol_remove(struct device *dev) 1150 { 1151 struct hsi_client *cl = to_hsi_client(dev); 1152 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 1153 1154 list_del(&ssi->link); 1155 unregister_netdev(ssi->netdev); 1156 ssip_free_cmds(ssi); 1157 hsi_client_set_drvdata(cl, NULL); 1158 kfree(ssi); 1159 1160 return 0; 1161 } 1162 1163 static struct hsi_client_driver ssip_driver = { 1164 .driver = { 1165 .name = "ssi-protocol", 1166 .owner = THIS_MODULE, 1167 .probe = ssi_protocol_probe, 1168 .remove = ssi_protocol_remove, 1169 }, 1170 }; 1171 1172 static int __init ssip_init(void) 1173 { 1174 pr_info("SSI protocol aka McSAAB added\n"); 1175 1176 return hsi_register_client_driver(&ssip_driver); 1177 } 1178 module_init(ssip_init); 1179 1180 static void __exit ssip_exit(void) 1181 { 1182 hsi_unregister_client_driver(&ssip_driver); 1183 pr_info("SSI protocol driver removed\n"); 1184 } 1185 module_exit(ssip_exit); 1186 1187 MODULE_ALIAS("hsi:ssi-protocol"); 1188 MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>"); 1189 MODULE_AUTHOR("Remi Denis-Courmont <remi.denis-courmont@nokia.com>"); 1190 MODULE_DESCRIPTION("SSI protocol improved aka McSAAB"); 1191 MODULE_LICENSE("GPL"); 1192