1 /* 2 * ssi_protocol.c 3 * 4 * Implementation of the SSI McSAAB improved protocol. 5 * 6 * Copyright (C) 2010 Nokia Corporation. All rights reserved. 7 * Copyright (C) 2013 Sebastian Reichel <sre@kernel.org> 8 * 9 * Contact: Carlos Chinea <carlos.chinea@nokia.com> 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License 13 * version 2 as published by the Free Software Foundation. 14 * 15 * This program is distributed in the hope that it will be useful, but 16 * WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 23 * 02110-1301 USA 24 */ 25 26 #include <linux/atomic.h> 27 #include <linux/clk.h> 28 #include <linux/device.h> 29 #include <linux/err.h> 30 #include <linux/gpio.h> 31 #include <linux/if_ether.h> 32 #include <linux/if_arp.h> 33 #include <linux/if_phonet.h> 34 #include <linux/init.h> 35 #include <linux/irq.h> 36 #include <linux/list.h> 37 #include <linux/module.h> 38 #include <linux/netdevice.h> 39 #include <linux/notifier.h> 40 #include <linux/scatterlist.h> 41 #include <linux/skbuff.h> 42 #include <linux/slab.h> 43 #include <linux/spinlock.h> 44 #include <linux/timer.h> 45 #include <linux/hsi/hsi.h> 46 #include <linux/hsi/ssi_protocol.h> 47 48 void ssi_waketest(struct hsi_client *cl, unsigned int enable); 49 50 #define SSIP_TXQUEUE_LEN 100 51 #define SSIP_MAX_MTU 65535 52 #define SSIP_DEFAULT_MTU 4000 53 #define PN_MEDIA_SOS 21 54 #define SSIP_MIN_PN_HDR 6 /* FIXME: Revisit */ 55 #define SSIP_WDTOUT 2000 /* FIXME: has to be 500 msecs */ 56 #define SSIP_KATOUT 15 /* 15 msecs */ 57 #define SSIP_MAX_CMDS 5 /* Number of pre-allocated commands buffers */ 58 #define SSIP_BYTES_TO_FRAMES(x) ((((x) - 1) >> 2) + 1) 59 #define SSIP_CMT_LOADER_SYNC 0x11223344 60 /* 61 * SSI protocol command definitions 62 */ 63 #define SSIP_COMMAND(data) ((data) >> 28) 64 #define SSIP_PAYLOAD(data) ((data) & 0xfffffff) 65 /* Commands */ 66 #define SSIP_SW_BREAK 0 67 #define SSIP_BOOTINFO_REQ 1 68 #define SSIP_BOOTINFO_RESP 2 69 #define SSIP_WAKETEST_RESULT 3 70 #define SSIP_START_TRANS 4 71 #define SSIP_READY 5 72 /* Payloads */ 73 #define SSIP_DATA_VERSION(data) ((data) & 0xff) 74 #define SSIP_LOCAL_VERID 1 75 #define SSIP_WAKETEST_OK 0 76 #define SSIP_WAKETEST_FAILED 1 77 #define SSIP_PDU_LENGTH(data) (((data) >> 8) & 0xffff) 78 #define SSIP_MSG_ID(data) ((data) & 0xff) 79 /* Generic Command */ 80 #define SSIP_CMD(cmd, payload) (((cmd) << 28) | ((payload) & 0xfffffff)) 81 /* Commands for the control channel */ 82 #define SSIP_BOOTINFO_REQ_CMD(ver) \ 83 SSIP_CMD(SSIP_BOOTINFO_REQ, SSIP_DATA_VERSION(ver)) 84 #define SSIP_BOOTINFO_RESP_CMD(ver) \ 85 SSIP_CMD(SSIP_BOOTINFO_RESP, SSIP_DATA_VERSION(ver)) 86 #define SSIP_START_TRANS_CMD(pdulen, id) \ 87 SSIP_CMD(SSIP_START_TRANS, (((pdulen) << 8) | SSIP_MSG_ID(id))) 88 #define SSIP_READY_CMD SSIP_CMD(SSIP_READY, 0) 89 #define SSIP_SWBREAK_CMD SSIP_CMD(SSIP_SW_BREAK, 0) 90 91 /* Main state machine states */ 92 enum { 93 INIT, 94 HANDSHAKE, 95 ACTIVE, 96 }; 97 98 /* Send state machine states */ 99 enum { 100 SEND_IDLE, 101 WAIT4READY, 102 SEND_READY, 103 SENDING, 104 SENDING_SWBREAK, 105 }; 106 107 /* Receive state machine states */ 108 enum { 109 RECV_IDLE, 110 RECV_READY, 111 RECEIVING, 112 }; 113 114 /** 115 * struct ssi_protocol - SSI protocol (McSAAB) data 116 * @main_state: Main state machine 117 * @send_state: TX state machine 118 * @recv_state: RX state machine 119 * @waketest: Flag to follow wake line test 120 * @rxid: RX data id 121 * @txid: TX data id 122 * @txqueue_len: TX queue length 123 * @tx_wd: TX watchdog 124 * @rx_wd: RX watchdog 125 * @keep_alive: Workaround for SSI HW bug 126 * @lock: To serialize access to this struct 127 * @netdev: Phonet network device 128 * @txqueue: TX data queue 129 * @cmdqueue: Queue of free commands 130 * @cl: HSI client own reference 131 * @link: Link for ssip_list 132 * @tx_usecount: Refcount to keep track the slaves that use the wake line 133 * @channel_id_cmd: HSI channel id for command stream 134 * @channel_id_data: HSI channel id for data stream 135 */ 136 struct ssi_protocol { 137 unsigned int main_state; 138 unsigned int send_state; 139 unsigned int recv_state; 140 unsigned int waketest:1; 141 u8 rxid; 142 u8 txid; 143 unsigned int txqueue_len; 144 struct timer_list tx_wd; 145 struct timer_list rx_wd; 146 struct timer_list keep_alive; /* wake-up workaround */ 147 spinlock_t lock; 148 struct net_device *netdev; 149 struct list_head txqueue; 150 struct list_head cmdqueue; 151 struct hsi_client *cl; 152 struct list_head link; 153 atomic_t tx_usecnt; 154 int channel_id_cmd; 155 int channel_id_data; 156 }; 157 158 /* List of ssi protocol instances */ 159 static LIST_HEAD(ssip_list); 160 161 static void ssip_rxcmd_complete(struct hsi_msg *msg); 162 163 static inline void ssip_set_cmd(struct hsi_msg *msg, u32 cmd) 164 { 165 u32 *data; 166 167 data = sg_virt(msg->sgt.sgl); 168 *data = cmd; 169 } 170 171 static inline u32 ssip_get_cmd(struct hsi_msg *msg) 172 { 173 u32 *data; 174 175 data = sg_virt(msg->sgt.sgl); 176 177 return *data; 178 } 179 180 static void ssip_skb_to_msg(struct sk_buff *skb, struct hsi_msg *msg) 181 { 182 skb_frag_t *frag; 183 struct scatterlist *sg; 184 int i; 185 186 BUG_ON(msg->sgt.nents != (unsigned int)(skb_shinfo(skb)->nr_frags + 1)); 187 188 sg = msg->sgt.sgl; 189 sg_set_buf(sg, skb->data, skb_headlen(skb)); 190 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 191 sg = sg_next(sg); 192 BUG_ON(!sg); 193 frag = &skb_shinfo(skb)->frags[i]; 194 sg_set_page(sg, frag->page.p, frag->size, frag->page_offset); 195 } 196 } 197 198 static void ssip_free_data(struct hsi_msg *msg) 199 { 200 struct sk_buff *skb; 201 202 skb = msg->context; 203 pr_debug("free data: msg %p context %p skb %p\n", msg, msg->context, 204 skb); 205 msg->destructor = NULL; 206 dev_kfree_skb(skb); 207 hsi_free_msg(msg); 208 } 209 210 static struct hsi_msg *ssip_alloc_data(struct ssi_protocol *ssi, 211 struct sk_buff *skb, gfp_t flags) 212 { 213 struct hsi_msg *msg; 214 215 msg = hsi_alloc_msg(skb_shinfo(skb)->nr_frags + 1, flags); 216 if (!msg) 217 return NULL; 218 ssip_skb_to_msg(skb, msg); 219 msg->destructor = ssip_free_data; 220 msg->channel = ssi->channel_id_data; 221 msg->context = skb; 222 223 return msg; 224 } 225 226 static inline void ssip_release_cmd(struct hsi_msg *msg) 227 { 228 struct ssi_protocol *ssi = hsi_client_drvdata(msg->cl); 229 230 dev_dbg(&msg->cl->device, "Release cmd 0x%08x\n", ssip_get_cmd(msg)); 231 spin_lock_bh(&ssi->lock); 232 list_add_tail(&msg->link, &ssi->cmdqueue); 233 spin_unlock_bh(&ssi->lock); 234 } 235 236 static struct hsi_msg *ssip_claim_cmd(struct ssi_protocol *ssi) 237 { 238 struct hsi_msg *msg; 239 240 BUG_ON(list_empty(&ssi->cmdqueue)); 241 242 spin_lock_bh(&ssi->lock); 243 msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link); 244 list_del(&msg->link); 245 spin_unlock_bh(&ssi->lock); 246 msg->destructor = ssip_release_cmd; 247 248 return msg; 249 } 250 251 static void ssip_free_cmds(struct ssi_protocol *ssi) 252 { 253 struct hsi_msg *msg, *tmp; 254 255 list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) { 256 list_del(&msg->link); 257 msg->destructor = NULL; 258 kfree(sg_virt(msg->sgt.sgl)); 259 hsi_free_msg(msg); 260 } 261 } 262 263 static int ssip_alloc_cmds(struct ssi_protocol *ssi) 264 { 265 struct hsi_msg *msg; 266 u32 *buf; 267 unsigned int i; 268 269 for (i = 0; i < SSIP_MAX_CMDS; i++) { 270 msg = hsi_alloc_msg(1, GFP_KERNEL); 271 if (!msg) 272 goto out; 273 buf = kmalloc(sizeof(*buf), GFP_KERNEL); 274 if (!buf) { 275 hsi_free_msg(msg); 276 goto out; 277 } 278 sg_init_one(msg->sgt.sgl, buf, sizeof(*buf)); 279 msg->channel = ssi->channel_id_cmd; 280 list_add_tail(&msg->link, &ssi->cmdqueue); 281 } 282 283 return 0; 284 out: 285 ssip_free_cmds(ssi); 286 287 return -ENOMEM; 288 } 289 290 static void ssip_set_rxstate(struct ssi_protocol *ssi, unsigned int state) 291 { 292 ssi->recv_state = state; 293 switch (state) { 294 case RECV_IDLE: 295 del_timer(&ssi->rx_wd); 296 if (ssi->send_state == SEND_IDLE) 297 del_timer(&ssi->keep_alive); 298 break; 299 case RECV_READY: 300 /* CMT speech workaround */ 301 if (atomic_read(&ssi->tx_usecnt)) 302 break; 303 /* Otherwise fall through */ 304 case RECEIVING: 305 mod_timer(&ssi->keep_alive, jiffies + 306 msecs_to_jiffies(SSIP_KATOUT)); 307 mod_timer(&ssi->rx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); 308 break; 309 default: 310 break; 311 } 312 } 313 314 static void ssip_set_txstate(struct ssi_protocol *ssi, unsigned int state) 315 { 316 ssi->send_state = state; 317 switch (state) { 318 case SEND_IDLE: 319 case SEND_READY: 320 del_timer(&ssi->tx_wd); 321 if (ssi->recv_state == RECV_IDLE) 322 del_timer(&ssi->keep_alive); 323 break; 324 case WAIT4READY: 325 case SENDING: 326 case SENDING_SWBREAK: 327 mod_timer(&ssi->keep_alive, 328 jiffies + msecs_to_jiffies(SSIP_KATOUT)); 329 mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); 330 break; 331 default: 332 break; 333 } 334 } 335 336 struct hsi_client *ssip_slave_get_master(struct hsi_client *slave) 337 { 338 struct hsi_client *master = ERR_PTR(-ENODEV); 339 struct ssi_protocol *ssi; 340 341 list_for_each_entry(ssi, &ssip_list, link) 342 if (slave->device.parent == ssi->cl->device.parent) { 343 master = ssi->cl; 344 break; 345 } 346 347 return master; 348 } 349 EXPORT_SYMBOL_GPL(ssip_slave_get_master); 350 351 int ssip_slave_start_tx(struct hsi_client *master) 352 { 353 struct ssi_protocol *ssi = hsi_client_drvdata(master); 354 355 dev_dbg(&master->device, "start TX %d\n", atomic_read(&ssi->tx_usecnt)); 356 spin_lock_bh(&ssi->lock); 357 if (ssi->send_state == SEND_IDLE) { 358 ssip_set_txstate(ssi, WAIT4READY); 359 hsi_start_tx(master); 360 } 361 spin_unlock_bh(&ssi->lock); 362 atomic_inc(&ssi->tx_usecnt); 363 364 return 0; 365 } 366 EXPORT_SYMBOL_GPL(ssip_slave_start_tx); 367 368 int ssip_slave_stop_tx(struct hsi_client *master) 369 { 370 struct ssi_protocol *ssi = hsi_client_drvdata(master); 371 372 WARN_ON_ONCE(atomic_read(&ssi->tx_usecnt) == 0); 373 374 if (atomic_dec_and_test(&ssi->tx_usecnt)) { 375 spin_lock_bh(&ssi->lock); 376 if ((ssi->send_state == SEND_READY) || 377 (ssi->send_state == WAIT4READY)) { 378 ssip_set_txstate(ssi, SEND_IDLE); 379 hsi_stop_tx(master); 380 } 381 spin_unlock_bh(&ssi->lock); 382 } 383 dev_dbg(&master->device, "stop TX %d\n", atomic_read(&ssi->tx_usecnt)); 384 385 return 0; 386 } 387 EXPORT_SYMBOL_GPL(ssip_slave_stop_tx); 388 389 int ssip_slave_running(struct hsi_client *master) 390 { 391 struct ssi_protocol *ssi = hsi_client_drvdata(master); 392 return netif_running(ssi->netdev); 393 } 394 EXPORT_SYMBOL_GPL(ssip_slave_running); 395 396 static void ssip_reset(struct hsi_client *cl) 397 { 398 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 399 struct list_head *head, *tmp; 400 struct hsi_msg *msg; 401 402 if (netif_running(ssi->netdev)) 403 netif_carrier_off(ssi->netdev); 404 hsi_flush(cl); 405 spin_lock_bh(&ssi->lock); 406 if (ssi->send_state != SEND_IDLE) 407 hsi_stop_tx(cl); 408 if (ssi->waketest) 409 ssi_waketest(cl, 0); 410 del_timer(&ssi->rx_wd); 411 del_timer(&ssi->tx_wd); 412 del_timer(&ssi->keep_alive); 413 ssi->main_state = 0; 414 ssi->send_state = 0; 415 ssi->recv_state = 0; 416 ssi->waketest = 0; 417 ssi->rxid = 0; 418 ssi->txid = 0; 419 list_for_each_safe(head, tmp, &ssi->txqueue) { 420 msg = list_entry(head, struct hsi_msg, link); 421 dev_dbg(&cl->device, "Pending TX data\n"); 422 list_del(head); 423 ssip_free_data(msg); 424 } 425 ssi->txqueue_len = 0; 426 spin_unlock_bh(&ssi->lock); 427 } 428 429 static void ssip_dump_state(struct hsi_client *cl) 430 { 431 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 432 struct hsi_msg *msg; 433 434 spin_lock_bh(&ssi->lock); 435 dev_err(&cl->device, "Main state: %d\n", ssi->main_state); 436 dev_err(&cl->device, "Recv state: %d\n", ssi->recv_state); 437 dev_err(&cl->device, "Send state: %d\n", ssi->send_state); 438 dev_err(&cl->device, "CMT %s\n", (ssi->main_state == ACTIVE) ? 439 "Online" : "Offline"); 440 dev_err(&cl->device, "Wake test %d\n", ssi->waketest); 441 dev_err(&cl->device, "Data RX id: %d\n", ssi->rxid); 442 dev_err(&cl->device, "Data TX id: %d\n", ssi->txid); 443 444 list_for_each_entry(msg, &ssi->txqueue, link) 445 dev_err(&cl->device, "pending TX data (%p)\n", msg); 446 spin_unlock_bh(&ssi->lock); 447 } 448 449 static void ssip_error(struct hsi_client *cl) 450 { 451 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 452 struct hsi_msg *msg; 453 454 ssip_dump_state(cl); 455 ssip_reset(cl); 456 msg = ssip_claim_cmd(ssi); 457 msg->complete = ssip_rxcmd_complete; 458 hsi_async_read(cl, msg); 459 } 460 461 static void ssip_keep_alive(unsigned long data) 462 { 463 struct hsi_client *cl = (struct hsi_client *)data; 464 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 465 466 dev_dbg(&cl->device, "Keep alive kick in: m(%d) r(%d) s(%d)\n", 467 ssi->main_state, ssi->recv_state, ssi->send_state); 468 469 spin_lock(&ssi->lock); 470 if (ssi->recv_state == RECV_IDLE) 471 switch (ssi->send_state) { 472 case SEND_READY: 473 if (atomic_read(&ssi->tx_usecnt) == 0) 474 break; 475 /* 476 * Fall through. Workaround for cmt-speech 477 * in that case we relay on audio timers. 478 */ 479 case SEND_IDLE: 480 spin_unlock(&ssi->lock); 481 return; 482 } 483 mod_timer(&ssi->keep_alive, jiffies + msecs_to_jiffies(SSIP_KATOUT)); 484 spin_unlock(&ssi->lock); 485 } 486 487 static void ssip_wd(unsigned long data) 488 { 489 struct hsi_client *cl = (struct hsi_client *)data; 490 491 dev_err(&cl->device, "Watchdog trigerred\n"); 492 ssip_error(cl); 493 } 494 495 static void ssip_send_bootinfo_req_cmd(struct hsi_client *cl) 496 { 497 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 498 struct hsi_msg *msg; 499 500 dev_dbg(&cl->device, "Issuing BOOT INFO REQ command\n"); 501 msg = ssip_claim_cmd(ssi); 502 ssip_set_cmd(msg, SSIP_BOOTINFO_REQ_CMD(SSIP_LOCAL_VERID)); 503 msg->complete = ssip_release_cmd; 504 hsi_async_write(cl, msg); 505 dev_dbg(&cl->device, "Issuing RX command\n"); 506 msg = ssip_claim_cmd(ssi); 507 msg->complete = ssip_rxcmd_complete; 508 hsi_async_read(cl, msg); 509 } 510 511 static void ssip_start_rx(struct hsi_client *cl) 512 { 513 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 514 struct hsi_msg *msg; 515 516 dev_dbg(&cl->device, "RX start M(%d) R(%d)\n", ssi->main_state, 517 ssi->recv_state); 518 spin_lock(&ssi->lock); 519 /* 520 * We can have two UP events in a row due to a short low 521 * high transition. Therefore we need to ignore the sencond UP event. 522 */ 523 if ((ssi->main_state != ACTIVE) || (ssi->recv_state == RECV_READY)) { 524 spin_unlock(&ssi->lock); 525 return; 526 } 527 ssip_set_rxstate(ssi, RECV_READY); 528 spin_unlock(&ssi->lock); 529 530 msg = ssip_claim_cmd(ssi); 531 ssip_set_cmd(msg, SSIP_READY_CMD); 532 msg->complete = ssip_release_cmd; 533 dev_dbg(&cl->device, "Send READY\n"); 534 hsi_async_write(cl, msg); 535 } 536 537 static void ssip_stop_rx(struct hsi_client *cl) 538 { 539 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 540 541 dev_dbg(&cl->device, "RX stop M(%d)\n", ssi->main_state); 542 spin_lock(&ssi->lock); 543 if (likely(ssi->main_state == ACTIVE)) 544 ssip_set_rxstate(ssi, RECV_IDLE); 545 spin_unlock(&ssi->lock); 546 } 547 548 static void ssip_free_strans(struct hsi_msg *msg) 549 { 550 ssip_free_data(msg->context); 551 ssip_release_cmd(msg); 552 } 553 554 static void ssip_strans_complete(struct hsi_msg *msg) 555 { 556 struct hsi_client *cl = msg->cl; 557 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 558 struct hsi_msg *data; 559 560 data = msg->context; 561 ssip_release_cmd(msg); 562 spin_lock(&ssi->lock); 563 ssip_set_txstate(ssi, SENDING); 564 spin_unlock(&ssi->lock); 565 hsi_async_write(cl, data); 566 } 567 568 static int ssip_xmit(struct hsi_client *cl) 569 { 570 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 571 struct hsi_msg *msg, *dmsg; 572 struct sk_buff *skb; 573 574 spin_lock_bh(&ssi->lock); 575 if (list_empty(&ssi->txqueue)) { 576 spin_unlock_bh(&ssi->lock); 577 return 0; 578 } 579 dmsg = list_first_entry(&ssi->txqueue, struct hsi_msg, link); 580 list_del(&dmsg->link); 581 ssi->txqueue_len--; 582 spin_unlock_bh(&ssi->lock); 583 584 msg = ssip_claim_cmd(ssi); 585 skb = dmsg->context; 586 msg->context = dmsg; 587 msg->complete = ssip_strans_complete; 588 msg->destructor = ssip_free_strans; 589 590 spin_lock_bh(&ssi->lock); 591 ssip_set_cmd(msg, SSIP_START_TRANS_CMD(SSIP_BYTES_TO_FRAMES(skb->len), 592 ssi->txid)); 593 ssi->txid++; 594 ssip_set_txstate(ssi, SENDING); 595 spin_unlock_bh(&ssi->lock); 596 597 dev_dbg(&cl->device, "Send STRANS (%d frames)\n", 598 SSIP_BYTES_TO_FRAMES(skb->len)); 599 600 return hsi_async_write(cl, msg); 601 } 602 603 /* In soft IRQ context */ 604 static void ssip_pn_rx(struct sk_buff *skb) 605 { 606 struct net_device *dev = skb->dev; 607 608 if (unlikely(!netif_running(dev))) { 609 dev_dbg(&dev->dev, "Drop RX packet\n"); 610 dev->stats.rx_dropped++; 611 dev_kfree_skb(skb); 612 return; 613 } 614 if (unlikely(!pskb_may_pull(skb, SSIP_MIN_PN_HDR))) { 615 dev_dbg(&dev->dev, "Error drop RX packet\n"); 616 dev->stats.rx_errors++; 617 dev->stats.rx_length_errors++; 618 dev_kfree_skb(skb); 619 return; 620 } 621 dev->stats.rx_packets++; 622 dev->stats.rx_bytes += skb->len; 623 624 /* length field is exchanged in network byte order */ 625 ((u16 *)skb->data)[2] = ntohs(((u16 *)skb->data)[2]); 626 dev_dbg(&dev->dev, "RX length fixed (%04x -> %u)\n", 627 ((u16 *)skb->data)[2], ntohs(((u16 *)skb->data)[2])); 628 629 skb->protocol = htons(ETH_P_PHONET); 630 skb_reset_mac_header(skb); 631 __skb_pull(skb, 1); 632 netif_rx(skb); 633 } 634 635 static void ssip_rx_data_complete(struct hsi_msg *msg) 636 { 637 struct hsi_client *cl = msg->cl; 638 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 639 struct sk_buff *skb; 640 641 if (msg->status == HSI_STATUS_ERROR) { 642 dev_err(&cl->device, "RX data error\n"); 643 ssip_free_data(msg); 644 ssip_error(cl); 645 return; 646 } 647 del_timer(&ssi->rx_wd); /* FIXME: Revisit */ 648 skb = msg->context; 649 ssip_pn_rx(skb); 650 hsi_free_msg(msg); 651 } 652 653 static void ssip_rx_bootinforeq(struct hsi_client *cl, u32 cmd) 654 { 655 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 656 struct hsi_msg *msg; 657 658 /* Workaroud: Ignore CMT Loader message leftover */ 659 if (cmd == SSIP_CMT_LOADER_SYNC) 660 return; 661 662 switch (ssi->main_state) { 663 case ACTIVE: 664 dev_err(&cl->device, "Boot info req on active state\n"); 665 ssip_error(cl); 666 /* Fall through */ 667 case INIT: 668 case HANDSHAKE: 669 spin_lock(&ssi->lock); 670 ssi->main_state = HANDSHAKE; 671 if (!ssi->waketest) { 672 ssi->waketest = 1; 673 ssi_waketest(cl, 1); /* FIXME: To be removed */ 674 } 675 /* Start boot handshake watchdog */ 676 mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); 677 spin_unlock(&ssi->lock); 678 dev_dbg(&cl->device, "Send BOOTINFO_RESP\n"); 679 if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID) 680 dev_warn(&cl->device, "boot info req verid mismatch\n"); 681 msg = ssip_claim_cmd(ssi); 682 ssip_set_cmd(msg, SSIP_BOOTINFO_RESP_CMD(SSIP_LOCAL_VERID)); 683 msg->complete = ssip_release_cmd; 684 hsi_async_write(cl, msg); 685 break; 686 default: 687 dev_dbg(&cl->device, "Wrong state M(%d)\n", ssi->main_state); 688 break; 689 } 690 } 691 692 static void ssip_rx_bootinforesp(struct hsi_client *cl, u32 cmd) 693 { 694 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 695 696 if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID) 697 dev_warn(&cl->device, "boot info resp verid mismatch\n"); 698 699 spin_lock(&ssi->lock); 700 if (ssi->main_state != ACTIVE) 701 /* Use tx_wd as a boot watchdog in non ACTIVE state */ 702 mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); 703 else 704 dev_dbg(&cl->device, "boot info resp ignored M(%d)\n", 705 ssi->main_state); 706 spin_unlock(&ssi->lock); 707 } 708 709 static void ssip_rx_waketest(struct hsi_client *cl, u32 cmd) 710 { 711 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 712 unsigned int wkres = SSIP_PAYLOAD(cmd); 713 714 spin_lock(&ssi->lock); 715 if (ssi->main_state != HANDSHAKE) { 716 dev_dbg(&cl->device, "wake lines test ignored M(%d)\n", 717 ssi->main_state); 718 spin_unlock(&ssi->lock); 719 return; 720 } 721 if (ssi->waketest) { 722 ssi->waketest = 0; 723 ssi_waketest(cl, 0); /* FIXME: To be removed */ 724 } 725 ssi->main_state = ACTIVE; 726 del_timer(&ssi->tx_wd); /* Stop boot handshake timer */ 727 spin_unlock(&ssi->lock); 728 729 dev_notice(&cl->device, "WAKELINES TEST %s\n", 730 wkres & SSIP_WAKETEST_FAILED ? "FAILED" : "OK"); 731 if (wkres & SSIP_WAKETEST_FAILED) { 732 ssip_error(cl); 733 return; 734 } 735 dev_dbg(&cl->device, "CMT is ONLINE\n"); 736 netif_wake_queue(ssi->netdev); 737 netif_carrier_on(ssi->netdev); 738 } 739 740 static void ssip_rx_ready(struct hsi_client *cl) 741 { 742 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 743 744 spin_lock(&ssi->lock); 745 if (unlikely(ssi->main_state != ACTIVE)) { 746 dev_dbg(&cl->device, "READY on wrong state: S(%d) M(%d)\n", 747 ssi->send_state, ssi->main_state); 748 spin_unlock(&ssi->lock); 749 return; 750 } 751 if (ssi->send_state != WAIT4READY) { 752 dev_dbg(&cl->device, "Ignore spurious READY command\n"); 753 spin_unlock(&ssi->lock); 754 return; 755 } 756 ssip_set_txstate(ssi, SEND_READY); 757 spin_unlock(&ssi->lock); 758 ssip_xmit(cl); 759 } 760 761 static void ssip_rx_strans(struct hsi_client *cl, u32 cmd) 762 { 763 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 764 struct sk_buff *skb; 765 struct hsi_msg *msg; 766 int len = SSIP_PDU_LENGTH(cmd); 767 768 dev_dbg(&cl->device, "RX strans: %d frames\n", len); 769 spin_lock(&ssi->lock); 770 if (unlikely(ssi->main_state != ACTIVE)) { 771 dev_err(&cl->device, "START TRANS wrong state: S(%d) M(%d)\n", 772 ssi->send_state, ssi->main_state); 773 spin_unlock(&ssi->lock); 774 return; 775 } 776 ssip_set_rxstate(ssi, RECEIVING); 777 if (unlikely(SSIP_MSG_ID(cmd) != ssi->rxid)) { 778 dev_err(&cl->device, "START TRANS id %d expected %d\n", 779 SSIP_MSG_ID(cmd), ssi->rxid); 780 spin_unlock(&ssi->lock); 781 goto out1; 782 } 783 ssi->rxid++; 784 spin_unlock(&ssi->lock); 785 skb = netdev_alloc_skb(ssi->netdev, len * 4); 786 if (unlikely(!skb)) { 787 dev_err(&cl->device, "No memory for rx skb\n"); 788 goto out1; 789 } 790 skb->dev = ssi->netdev; 791 skb_put(skb, len * 4); 792 msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC); 793 if (unlikely(!msg)) { 794 dev_err(&cl->device, "No memory for RX data msg\n"); 795 goto out2; 796 } 797 msg->complete = ssip_rx_data_complete; 798 hsi_async_read(cl, msg); 799 800 return; 801 out2: 802 dev_kfree_skb(skb); 803 out1: 804 ssip_error(cl); 805 } 806 807 static void ssip_rxcmd_complete(struct hsi_msg *msg) 808 { 809 struct hsi_client *cl = msg->cl; 810 u32 cmd = ssip_get_cmd(msg); 811 unsigned int cmdid = SSIP_COMMAND(cmd); 812 813 if (msg->status == HSI_STATUS_ERROR) { 814 dev_err(&cl->device, "RX error detected\n"); 815 ssip_release_cmd(msg); 816 ssip_error(cl); 817 return; 818 } 819 hsi_async_read(cl, msg); 820 dev_dbg(&cl->device, "RX cmd: 0x%08x\n", cmd); 821 switch (cmdid) { 822 case SSIP_SW_BREAK: 823 /* Ignored */ 824 break; 825 case SSIP_BOOTINFO_REQ: 826 ssip_rx_bootinforeq(cl, cmd); 827 break; 828 case SSIP_BOOTINFO_RESP: 829 ssip_rx_bootinforesp(cl, cmd); 830 break; 831 case SSIP_WAKETEST_RESULT: 832 ssip_rx_waketest(cl, cmd); 833 break; 834 case SSIP_START_TRANS: 835 ssip_rx_strans(cl, cmd); 836 break; 837 case SSIP_READY: 838 ssip_rx_ready(cl); 839 break; 840 default: 841 dev_warn(&cl->device, "command 0x%08x not supported\n", cmd); 842 break; 843 } 844 } 845 846 static void ssip_swbreak_complete(struct hsi_msg *msg) 847 { 848 struct hsi_client *cl = msg->cl; 849 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 850 851 ssip_release_cmd(msg); 852 spin_lock(&ssi->lock); 853 if (list_empty(&ssi->txqueue)) { 854 if (atomic_read(&ssi->tx_usecnt)) { 855 ssip_set_txstate(ssi, SEND_READY); 856 } else { 857 ssip_set_txstate(ssi, SEND_IDLE); 858 hsi_stop_tx(cl); 859 } 860 spin_unlock(&ssi->lock); 861 } else { 862 spin_unlock(&ssi->lock); 863 ssip_xmit(cl); 864 } 865 netif_wake_queue(ssi->netdev); 866 } 867 868 static void ssip_tx_data_complete(struct hsi_msg *msg) 869 { 870 struct hsi_client *cl = msg->cl; 871 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 872 struct hsi_msg *cmsg; 873 874 if (msg->status == HSI_STATUS_ERROR) { 875 dev_err(&cl->device, "TX data error\n"); 876 ssip_error(cl); 877 goto out; 878 } 879 spin_lock(&ssi->lock); 880 if (list_empty(&ssi->txqueue)) { 881 ssip_set_txstate(ssi, SENDING_SWBREAK); 882 spin_unlock(&ssi->lock); 883 cmsg = ssip_claim_cmd(ssi); 884 ssip_set_cmd(cmsg, SSIP_SWBREAK_CMD); 885 cmsg->complete = ssip_swbreak_complete; 886 dev_dbg(&cl->device, "Send SWBREAK\n"); 887 hsi_async_write(cl, cmsg); 888 } else { 889 spin_unlock(&ssi->lock); 890 ssip_xmit(cl); 891 } 892 out: 893 ssip_free_data(msg); 894 } 895 896 static void ssip_port_event(struct hsi_client *cl, unsigned long event) 897 { 898 switch (event) { 899 case HSI_EVENT_START_RX: 900 ssip_start_rx(cl); 901 break; 902 case HSI_EVENT_STOP_RX: 903 ssip_stop_rx(cl); 904 break; 905 default: 906 return; 907 } 908 } 909 910 static int ssip_pn_open(struct net_device *dev) 911 { 912 struct hsi_client *cl = to_hsi_client(dev->dev.parent); 913 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 914 int err; 915 916 err = hsi_claim_port(cl, 1); 917 if (err < 0) { 918 dev_err(&cl->device, "SSI port already claimed\n"); 919 return err; 920 } 921 err = hsi_register_port_event(cl, ssip_port_event); 922 if (err < 0) { 923 dev_err(&cl->device, "Register HSI port event failed (%d)\n", 924 err); 925 return err; 926 } 927 dev_dbg(&cl->device, "Configuring SSI port\n"); 928 hsi_setup(cl); 929 spin_lock_bh(&ssi->lock); 930 if (!ssi->waketest) { 931 ssi->waketest = 1; 932 ssi_waketest(cl, 1); /* FIXME: To be removed */ 933 } 934 ssi->main_state = HANDSHAKE; 935 spin_unlock_bh(&ssi->lock); 936 937 ssip_send_bootinfo_req_cmd(cl); 938 939 return 0; 940 } 941 942 static int ssip_pn_stop(struct net_device *dev) 943 { 944 struct hsi_client *cl = to_hsi_client(dev->dev.parent); 945 946 ssip_reset(cl); 947 hsi_unregister_port_event(cl); 948 hsi_release_port(cl); 949 950 return 0; 951 } 952 953 static int ssip_pn_set_mtu(struct net_device *dev, int new_mtu) 954 { 955 if (new_mtu > SSIP_MAX_MTU || new_mtu < PHONET_MIN_MTU) 956 return -EINVAL; 957 dev->mtu = new_mtu; 958 959 return 0; 960 } 961 962 static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev) 963 { 964 struct hsi_client *cl = to_hsi_client(dev->dev.parent); 965 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 966 struct hsi_msg *msg; 967 968 if ((skb->protocol != htons(ETH_P_PHONET)) || 969 (skb->len < SSIP_MIN_PN_HDR)) 970 goto drop; 971 /* Pad to 32-bits - FIXME: Revisit*/ 972 if ((skb->len & 3) && skb_pad(skb, 4 - (skb->len & 3))) 973 goto drop; 974 975 /* 976 * Modem sends Phonet messages over SSI with its own endianess... 977 * Assume that modem has the same endianess as we do. 978 */ 979 if (skb_cow_head(skb, 0)) 980 goto drop; 981 982 /* length field is exchanged in network byte order */ 983 ((u16 *)skb->data)[2] = htons(((u16 *)skb->data)[2]); 984 985 msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC); 986 if (!msg) { 987 dev_dbg(&cl->device, "Dropping tx data: No memory\n"); 988 goto drop; 989 } 990 msg->complete = ssip_tx_data_complete; 991 992 spin_lock_bh(&ssi->lock); 993 if (unlikely(ssi->main_state != ACTIVE)) { 994 spin_unlock_bh(&ssi->lock); 995 dev_dbg(&cl->device, "Dropping tx data: CMT is OFFLINE\n"); 996 goto drop2; 997 } 998 list_add_tail(&msg->link, &ssi->txqueue); 999 ssi->txqueue_len++; 1000 if (dev->tx_queue_len < ssi->txqueue_len) { 1001 dev_info(&cl->device, "TX queue full %d\n", ssi->txqueue_len); 1002 netif_stop_queue(dev); 1003 } 1004 if (ssi->send_state == SEND_IDLE) { 1005 ssip_set_txstate(ssi, WAIT4READY); 1006 spin_unlock_bh(&ssi->lock); 1007 dev_dbg(&cl->device, "Start TX qlen %d\n", ssi->txqueue_len); 1008 hsi_start_tx(cl); 1009 } else if (ssi->send_state == SEND_READY) { 1010 /* Needed for cmt-speech workaround */ 1011 dev_dbg(&cl->device, "Start TX on SEND READY qlen %d\n", 1012 ssi->txqueue_len); 1013 spin_unlock_bh(&ssi->lock); 1014 ssip_xmit(cl); 1015 } else { 1016 spin_unlock_bh(&ssi->lock); 1017 } 1018 dev->stats.tx_packets++; 1019 dev->stats.tx_bytes += skb->len; 1020 1021 return 0; 1022 drop2: 1023 hsi_free_msg(msg); 1024 drop: 1025 dev->stats.tx_dropped++; 1026 dev_kfree_skb(skb); 1027 1028 return 0; 1029 } 1030 1031 /* CMT reset event handler */ 1032 void ssip_reset_event(struct hsi_client *master) 1033 { 1034 struct ssi_protocol *ssi = hsi_client_drvdata(master); 1035 dev_err(&ssi->cl->device, "CMT reset detected!\n"); 1036 ssip_error(ssi->cl); 1037 } 1038 EXPORT_SYMBOL_GPL(ssip_reset_event); 1039 1040 static const struct net_device_ops ssip_pn_ops = { 1041 .ndo_open = ssip_pn_open, 1042 .ndo_stop = ssip_pn_stop, 1043 .ndo_start_xmit = ssip_pn_xmit, 1044 .ndo_change_mtu = ssip_pn_set_mtu, 1045 }; 1046 1047 static void ssip_pn_setup(struct net_device *dev) 1048 { 1049 dev->features = 0; 1050 dev->netdev_ops = &ssip_pn_ops; 1051 dev->type = ARPHRD_PHONET; 1052 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 1053 dev->mtu = SSIP_DEFAULT_MTU; 1054 dev->hard_header_len = 1; 1055 dev->dev_addr[0] = PN_MEDIA_SOS; 1056 dev->addr_len = 1; 1057 dev->tx_queue_len = SSIP_TXQUEUE_LEN; 1058 1059 dev->destructor = free_netdev; 1060 dev->header_ops = &phonet_header_ops; 1061 } 1062 1063 static int ssi_protocol_probe(struct device *dev) 1064 { 1065 static const char ifname[] = "phonet%d"; 1066 struct hsi_client *cl = to_hsi_client(dev); 1067 struct ssi_protocol *ssi; 1068 int err; 1069 1070 ssi = kzalloc(sizeof(*ssi), GFP_KERNEL); 1071 if (!ssi) { 1072 dev_err(dev, "No memory for ssi protocol\n"); 1073 return -ENOMEM; 1074 } 1075 1076 spin_lock_init(&ssi->lock); 1077 init_timer_deferrable(&ssi->rx_wd); 1078 init_timer_deferrable(&ssi->tx_wd); 1079 init_timer(&ssi->keep_alive); 1080 ssi->rx_wd.data = (unsigned long)cl; 1081 ssi->rx_wd.function = ssip_wd; 1082 ssi->tx_wd.data = (unsigned long)cl; 1083 ssi->tx_wd.function = ssip_wd; 1084 ssi->keep_alive.data = (unsigned long)cl; 1085 ssi->keep_alive.function = ssip_keep_alive; 1086 INIT_LIST_HEAD(&ssi->txqueue); 1087 INIT_LIST_HEAD(&ssi->cmdqueue); 1088 atomic_set(&ssi->tx_usecnt, 0); 1089 hsi_client_set_drvdata(cl, ssi); 1090 ssi->cl = cl; 1091 1092 ssi->channel_id_cmd = hsi_get_channel_id_by_name(cl, "mcsaab-control"); 1093 if (ssi->channel_id_cmd < 0) { 1094 err = ssi->channel_id_cmd; 1095 dev_err(dev, "Could not get cmd channel (%d)\n", err); 1096 goto out; 1097 } 1098 1099 ssi->channel_id_data = hsi_get_channel_id_by_name(cl, "mcsaab-data"); 1100 if (ssi->channel_id_data < 0) { 1101 err = ssi->channel_id_data; 1102 dev_err(dev, "Could not get data channel (%d)\n", err); 1103 goto out; 1104 } 1105 1106 err = ssip_alloc_cmds(ssi); 1107 if (err < 0) { 1108 dev_err(dev, "No memory for commands\n"); 1109 goto out; 1110 } 1111 1112 ssi->netdev = alloc_netdev(0, ifname, NET_NAME_UNKNOWN, ssip_pn_setup); 1113 if (!ssi->netdev) { 1114 dev_err(dev, "No memory for netdev\n"); 1115 err = -ENOMEM; 1116 goto out1; 1117 } 1118 1119 SET_NETDEV_DEV(ssi->netdev, dev); 1120 netif_carrier_off(ssi->netdev); 1121 err = register_netdev(ssi->netdev); 1122 if (err < 0) { 1123 dev_err(dev, "Register netdev failed (%d)\n", err); 1124 goto out2; 1125 } 1126 1127 list_add(&ssi->link, &ssip_list); 1128 1129 dev_dbg(dev, "channel configuration: cmd=%d, data=%d\n", 1130 ssi->channel_id_cmd, ssi->channel_id_data); 1131 1132 return 0; 1133 out2: 1134 free_netdev(ssi->netdev); 1135 out1: 1136 ssip_free_cmds(ssi); 1137 out: 1138 kfree(ssi); 1139 1140 return err; 1141 } 1142 1143 static int ssi_protocol_remove(struct device *dev) 1144 { 1145 struct hsi_client *cl = to_hsi_client(dev); 1146 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 1147 1148 list_del(&ssi->link); 1149 unregister_netdev(ssi->netdev); 1150 ssip_free_cmds(ssi); 1151 hsi_client_set_drvdata(cl, NULL); 1152 kfree(ssi); 1153 1154 return 0; 1155 } 1156 1157 static struct hsi_client_driver ssip_driver = { 1158 .driver = { 1159 .name = "ssi-protocol", 1160 .owner = THIS_MODULE, 1161 .probe = ssi_protocol_probe, 1162 .remove = ssi_protocol_remove, 1163 }, 1164 }; 1165 1166 static int __init ssip_init(void) 1167 { 1168 pr_info("SSI protocol aka McSAAB added\n"); 1169 1170 return hsi_register_client_driver(&ssip_driver); 1171 } 1172 module_init(ssip_init); 1173 1174 static void __exit ssip_exit(void) 1175 { 1176 hsi_unregister_client_driver(&ssip_driver); 1177 pr_info("SSI protocol driver removed\n"); 1178 } 1179 module_exit(ssip_exit); 1180 1181 MODULE_ALIAS("hsi:ssi-protocol"); 1182 MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>"); 1183 MODULE_AUTHOR("Remi Denis-Courmont <remi.denis-courmont@nokia.com>"); 1184 MODULE_DESCRIPTION("SSI protocol improved aka McSAAB"); 1185 MODULE_LICENSE("GPL"); 1186