1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * ssi_protocol.c 4 * 5 * Implementation of the SSI McSAAB improved protocol. 6 * 7 * Copyright (C) 2010 Nokia Corporation. All rights reserved. 8 * Copyright (C) 2013 Sebastian Reichel <sre@kernel.org> 9 * 10 * Contact: Carlos Chinea <carlos.chinea@nokia.com> 11 */ 12 13 #include <linux/atomic.h> 14 #include <linux/clk.h> 15 #include <linux/device.h> 16 #include <linux/err.h> 17 #include <linux/gpio.h> 18 #include <linux/if_ether.h> 19 #include <linux/if_arp.h> 20 #include <linux/if_phonet.h> 21 #include <linux/init.h> 22 #include <linux/irq.h> 23 #include <linux/list.h> 24 #include <linux/module.h> 25 #include <linux/netdevice.h> 26 #include <linux/notifier.h> 27 #include <linux/scatterlist.h> 28 #include <linux/skbuff.h> 29 #include <linux/slab.h> 30 #include <linux/spinlock.h> 31 #include <linux/timer.h> 32 #include <linux/hsi/hsi.h> 33 #include <linux/hsi/ssi_protocol.h> 34 35 void ssi_waketest(struct hsi_client *cl, unsigned int enable); 36 37 #define SSIP_TXQUEUE_LEN 100 38 #define SSIP_MAX_MTU 65535 39 #define SSIP_DEFAULT_MTU 4000 40 #define PN_MEDIA_SOS 21 41 #define SSIP_MIN_PN_HDR 6 /* FIXME: Revisit */ 42 #define SSIP_WDTOUT 2000 /* FIXME: has to be 500 msecs */ 43 #define SSIP_KATOUT 15 /* 15 msecs */ 44 #define SSIP_MAX_CMDS 5 /* Number of pre-allocated commands buffers */ 45 #define SSIP_BYTES_TO_FRAMES(x) ((((x) - 1) >> 2) + 1) 46 #define SSIP_CMT_LOADER_SYNC 0x11223344 47 /* 48 * SSI protocol command definitions 49 */ 50 #define SSIP_COMMAND(data) ((data) >> 28) 51 #define SSIP_PAYLOAD(data) ((data) & 0xfffffff) 52 /* Commands */ 53 #define SSIP_SW_BREAK 0 54 #define SSIP_BOOTINFO_REQ 1 55 #define SSIP_BOOTINFO_RESP 2 56 #define SSIP_WAKETEST_RESULT 3 57 #define SSIP_START_TRANS 4 58 #define SSIP_READY 5 59 /* Payloads */ 60 #define SSIP_DATA_VERSION(data) ((data) & 0xff) 61 #define SSIP_LOCAL_VERID 1 62 #define SSIP_WAKETEST_OK 0 63 #define SSIP_WAKETEST_FAILED 1 64 #define SSIP_PDU_LENGTH(data) (((data) >> 8) & 0xffff) 65 #define SSIP_MSG_ID(data) ((data) & 0xff) 66 /* Generic Command */ 67 #define SSIP_CMD(cmd, payload) (((cmd) << 28) | ((payload) & 0xfffffff)) 68 /* Commands for the control channel */ 69 #define SSIP_BOOTINFO_REQ_CMD(ver) \ 70 SSIP_CMD(SSIP_BOOTINFO_REQ, SSIP_DATA_VERSION(ver)) 71 #define SSIP_BOOTINFO_RESP_CMD(ver) \ 72 SSIP_CMD(SSIP_BOOTINFO_RESP, SSIP_DATA_VERSION(ver)) 73 #define SSIP_START_TRANS_CMD(pdulen, id) \ 74 SSIP_CMD(SSIP_START_TRANS, (((pdulen) << 8) | SSIP_MSG_ID(id))) 75 #define SSIP_READY_CMD SSIP_CMD(SSIP_READY, 0) 76 #define SSIP_SWBREAK_CMD SSIP_CMD(SSIP_SW_BREAK, 0) 77 78 #define SSIP_WAKETEST_FLAG 0 79 80 /* Main state machine states */ 81 enum { 82 INIT, 83 HANDSHAKE, 84 ACTIVE, 85 }; 86 87 /* Send state machine states */ 88 enum { 89 SEND_IDLE, 90 WAIT4READY, 91 SEND_READY, 92 SENDING, 93 SENDING_SWBREAK, 94 }; 95 96 /* Receive state machine states */ 97 enum { 98 RECV_IDLE, 99 RECV_READY, 100 RECEIVING, 101 }; 102 103 /** 104 * struct ssi_protocol - SSI protocol (McSAAB) data 105 * @main_state: Main state machine 106 * @send_state: TX state machine 107 * @recv_state: RX state machine 108 * @flags: Flags, currently only used to follow wake line test 109 * @rxid: RX data id 110 * @txid: TX data id 111 * @txqueue_len: TX queue length 112 * @tx_wd: TX watchdog 113 * @rx_wd: RX watchdog 114 * @keep_alive: Workaround for SSI HW bug 115 * @lock: To serialize access to this struct 116 * @netdev: Phonet network device 117 * @txqueue: TX data queue 118 * @cmdqueue: Queue of free commands 119 * @cl: HSI client own reference 120 * @link: Link for ssip_list 121 * @tx_usecount: Refcount to keep track the slaves that use the wake line 122 * @channel_id_cmd: HSI channel id for command stream 123 * @channel_id_data: HSI channel id for data stream 124 */ 125 struct ssi_protocol { 126 unsigned int main_state; 127 unsigned int send_state; 128 unsigned int recv_state; 129 unsigned long flags; 130 u8 rxid; 131 u8 txid; 132 unsigned int txqueue_len; 133 struct timer_list tx_wd; 134 struct timer_list rx_wd; 135 struct timer_list keep_alive; /* wake-up workaround */ 136 spinlock_t lock; 137 struct net_device *netdev; 138 struct list_head txqueue; 139 struct list_head cmdqueue; 140 struct work_struct work; 141 struct hsi_client *cl; 142 struct list_head link; 143 atomic_t tx_usecnt; 144 int channel_id_cmd; 145 int channel_id_data; 146 }; 147 148 /* List of ssi protocol instances */ 149 static LIST_HEAD(ssip_list); 150 151 static void ssip_rxcmd_complete(struct hsi_msg *msg); 152 153 static inline void ssip_set_cmd(struct hsi_msg *msg, u32 cmd) 154 { 155 u32 *data; 156 157 data = sg_virt(msg->sgt.sgl); 158 *data = cmd; 159 } 160 161 static inline u32 ssip_get_cmd(struct hsi_msg *msg) 162 { 163 u32 *data; 164 165 data = sg_virt(msg->sgt.sgl); 166 167 return *data; 168 } 169 170 static void ssip_skb_to_msg(struct sk_buff *skb, struct hsi_msg *msg) 171 { 172 skb_frag_t *frag; 173 struct scatterlist *sg; 174 int i; 175 176 BUG_ON(msg->sgt.nents != (unsigned int)(skb_shinfo(skb)->nr_frags + 1)); 177 178 sg = msg->sgt.sgl; 179 sg_set_buf(sg, skb->data, skb_headlen(skb)); 180 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 181 sg = sg_next(sg); 182 BUG_ON(!sg); 183 frag = &skb_shinfo(skb)->frags[i]; 184 sg_set_page(sg, skb_frag_page(frag), skb_frag_size(frag), 185 skb_frag_off(frag)); 186 } 187 } 188 189 static void ssip_free_data(struct hsi_msg *msg) 190 { 191 struct sk_buff *skb; 192 193 skb = msg->context; 194 pr_debug("free data: msg %p context %p skb %p\n", msg, msg->context, 195 skb); 196 msg->destructor = NULL; 197 dev_kfree_skb(skb); 198 hsi_free_msg(msg); 199 } 200 201 static struct hsi_msg *ssip_alloc_data(struct ssi_protocol *ssi, 202 struct sk_buff *skb, gfp_t flags) 203 { 204 struct hsi_msg *msg; 205 206 msg = hsi_alloc_msg(skb_shinfo(skb)->nr_frags + 1, flags); 207 if (!msg) 208 return NULL; 209 ssip_skb_to_msg(skb, msg); 210 msg->destructor = ssip_free_data; 211 msg->channel = ssi->channel_id_data; 212 msg->context = skb; 213 214 return msg; 215 } 216 217 static inline void ssip_release_cmd(struct hsi_msg *msg) 218 { 219 struct ssi_protocol *ssi = hsi_client_drvdata(msg->cl); 220 221 dev_dbg(&msg->cl->device, "Release cmd 0x%08x\n", ssip_get_cmd(msg)); 222 spin_lock_bh(&ssi->lock); 223 list_add_tail(&msg->link, &ssi->cmdqueue); 224 spin_unlock_bh(&ssi->lock); 225 } 226 227 static struct hsi_msg *ssip_claim_cmd(struct ssi_protocol *ssi) 228 { 229 struct hsi_msg *msg; 230 231 BUG_ON(list_empty(&ssi->cmdqueue)); 232 233 spin_lock_bh(&ssi->lock); 234 msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link); 235 list_del(&msg->link); 236 spin_unlock_bh(&ssi->lock); 237 msg->destructor = ssip_release_cmd; 238 239 return msg; 240 } 241 242 static void ssip_free_cmds(struct ssi_protocol *ssi) 243 { 244 struct hsi_msg *msg, *tmp; 245 246 list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) { 247 list_del(&msg->link); 248 msg->destructor = NULL; 249 kfree(sg_virt(msg->sgt.sgl)); 250 hsi_free_msg(msg); 251 } 252 } 253 254 static int ssip_alloc_cmds(struct ssi_protocol *ssi) 255 { 256 struct hsi_msg *msg; 257 u32 *buf; 258 unsigned int i; 259 260 for (i = 0; i < SSIP_MAX_CMDS; i++) { 261 msg = hsi_alloc_msg(1, GFP_KERNEL); 262 if (!msg) 263 goto out; 264 buf = kmalloc(sizeof(*buf), GFP_KERNEL); 265 if (!buf) { 266 hsi_free_msg(msg); 267 goto out; 268 } 269 sg_init_one(msg->sgt.sgl, buf, sizeof(*buf)); 270 msg->channel = ssi->channel_id_cmd; 271 list_add_tail(&msg->link, &ssi->cmdqueue); 272 } 273 274 return 0; 275 out: 276 ssip_free_cmds(ssi); 277 278 return -ENOMEM; 279 } 280 281 static void ssip_set_rxstate(struct ssi_protocol *ssi, unsigned int state) 282 { 283 ssi->recv_state = state; 284 switch (state) { 285 case RECV_IDLE: 286 del_timer(&ssi->rx_wd); 287 if (ssi->send_state == SEND_IDLE) 288 del_timer(&ssi->keep_alive); 289 break; 290 case RECV_READY: 291 /* CMT speech workaround */ 292 if (atomic_read(&ssi->tx_usecnt)) 293 break; 294 fallthrough; 295 case RECEIVING: 296 mod_timer(&ssi->keep_alive, jiffies + 297 msecs_to_jiffies(SSIP_KATOUT)); 298 mod_timer(&ssi->rx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); 299 break; 300 default: 301 break; 302 } 303 } 304 305 static void ssip_set_txstate(struct ssi_protocol *ssi, unsigned int state) 306 { 307 ssi->send_state = state; 308 switch (state) { 309 case SEND_IDLE: 310 case SEND_READY: 311 del_timer(&ssi->tx_wd); 312 if (ssi->recv_state == RECV_IDLE) 313 del_timer(&ssi->keep_alive); 314 break; 315 case WAIT4READY: 316 case SENDING: 317 case SENDING_SWBREAK: 318 mod_timer(&ssi->keep_alive, 319 jiffies + msecs_to_jiffies(SSIP_KATOUT)); 320 mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); 321 break; 322 default: 323 break; 324 } 325 } 326 327 struct hsi_client *ssip_slave_get_master(struct hsi_client *slave) 328 { 329 struct hsi_client *master = ERR_PTR(-ENODEV); 330 struct ssi_protocol *ssi; 331 332 list_for_each_entry(ssi, &ssip_list, link) 333 if (slave->device.parent == ssi->cl->device.parent) { 334 master = ssi->cl; 335 break; 336 } 337 338 return master; 339 } 340 EXPORT_SYMBOL_GPL(ssip_slave_get_master); 341 342 int ssip_slave_start_tx(struct hsi_client *master) 343 { 344 struct ssi_protocol *ssi = hsi_client_drvdata(master); 345 346 dev_dbg(&master->device, "start TX %d\n", atomic_read(&ssi->tx_usecnt)); 347 spin_lock_bh(&ssi->lock); 348 if (ssi->send_state == SEND_IDLE) { 349 ssip_set_txstate(ssi, WAIT4READY); 350 hsi_start_tx(master); 351 } 352 spin_unlock_bh(&ssi->lock); 353 atomic_inc(&ssi->tx_usecnt); 354 355 return 0; 356 } 357 EXPORT_SYMBOL_GPL(ssip_slave_start_tx); 358 359 int ssip_slave_stop_tx(struct hsi_client *master) 360 { 361 struct ssi_protocol *ssi = hsi_client_drvdata(master); 362 363 WARN_ON_ONCE(atomic_read(&ssi->tx_usecnt) == 0); 364 365 if (atomic_dec_and_test(&ssi->tx_usecnt)) { 366 spin_lock_bh(&ssi->lock); 367 if ((ssi->send_state == SEND_READY) || 368 (ssi->send_state == WAIT4READY)) { 369 ssip_set_txstate(ssi, SEND_IDLE); 370 hsi_stop_tx(master); 371 } 372 spin_unlock_bh(&ssi->lock); 373 } 374 dev_dbg(&master->device, "stop TX %d\n", atomic_read(&ssi->tx_usecnt)); 375 376 return 0; 377 } 378 EXPORT_SYMBOL_GPL(ssip_slave_stop_tx); 379 380 int ssip_slave_running(struct hsi_client *master) 381 { 382 struct ssi_protocol *ssi = hsi_client_drvdata(master); 383 return netif_running(ssi->netdev); 384 } 385 EXPORT_SYMBOL_GPL(ssip_slave_running); 386 387 static void ssip_reset(struct hsi_client *cl) 388 { 389 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 390 struct list_head *head, *tmp; 391 struct hsi_msg *msg; 392 393 if (netif_running(ssi->netdev)) 394 netif_carrier_off(ssi->netdev); 395 hsi_flush(cl); 396 spin_lock_bh(&ssi->lock); 397 if (ssi->send_state != SEND_IDLE) 398 hsi_stop_tx(cl); 399 spin_unlock_bh(&ssi->lock); 400 if (test_and_clear_bit(SSIP_WAKETEST_FLAG, &ssi->flags)) 401 ssi_waketest(cl, 0); /* FIXME: To be removed */ 402 spin_lock_bh(&ssi->lock); 403 del_timer(&ssi->rx_wd); 404 del_timer(&ssi->tx_wd); 405 del_timer(&ssi->keep_alive); 406 ssi->main_state = 0; 407 ssi->send_state = 0; 408 ssi->recv_state = 0; 409 ssi->flags = 0; 410 ssi->rxid = 0; 411 ssi->txid = 0; 412 list_for_each_safe(head, tmp, &ssi->txqueue) { 413 msg = list_entry(head, struct hsi_msg, link); 414 dev_dbg(&cl->device, "Pending TX data\n"); 415 list_del(head); 416 ssip_free_data(msg); 417 } 418 ssi->txqueue_len = 0; 419 spin_unlock_bh(&ssi->lock); 420 } 421 422 static void ssip_dump_state(struct hsi_client *cl) 423 { 424 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 425 struct hsi_msg *msg; 426 427 spin_lock_bh(&ssi->lock); 428 dev_err(&cl->device, "Main state: %d\n", ssi->main_state); 429 dev_err(&cl->device, "Recv state: %d\n", ssi->recv_state); 430 dev_err(&cl->device, "Send state: %d\n", ssi->send_state); 431 dev_err(&cl->device, "CMT %s\n", (ssi->main_state == ACTIVE) ? 432 "Online" : "Offline"); 433 dev_err(&cl->device, "Wake test %d\n", 434 test_bit(SSIP_WAKETEST_FLAG, &ssi->flags)); 435 dev_err(&cl->device, "Data RX id: %d\n", ssi->rxid); 436 dev_err(&cl->device, "Data TX id: %d\n", ssi->txid); 437 438 list_for_each_entry(msg, &ssi->txqueue, link) 439 dev_err(&cl->device, "pending TX data (%p)\n", msg); 440 spin_unlock_bh(&ssi->lock); 441 } 442 443 static void ssip_error(struct hsi_client *cl) 444 { 445 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 446 struct hsi_msg *msg; 447 448 ssip_dump_state(cl); 449 ssip_reset(cl); 450 msg = ssip_claim_cmd(ssi); 451 msg->complete = ssip_rxcmd_complete; 452 hsi_async_read(cl, msg); 453 } 454 455 static void ssip_keep_alive(struct timer_list *t) 456 { 457 struct ssi_protocol *ssi = from_timer(ssi, t, keep_alive); 458 struct hsi_client *cl = ssi->cl; 459 460 dev_dbg(&cl->device, "Keep alive kick in: m(%d) r(%d) s(%d)\n", 461 ssi->main_state, ssi->recv_state, ssi->send_state); 462 463 spin_lock(&ssi->lock); 464 if (ssi->recv_state == RECV_IDLE) 465 switch (ssi->send_state) { 466 case SEND_READY: 467 if (atomic_read(&ssi->tx_usecnt) == 0) 468 break; 469 fallthrough; 470 /* 471 * Workaround for cmt-speech in that case 472 * we relay on audio timers. 473 */ 474 case SEND_IDLE: 475 spin_unlock(&ssi->lock); 476 return; 477 } 478 mod_timer(&ssi->keep_alive, jiffies + msecs_to_jiffies(SSIP_KATOUT)); 479 spin_unlock(&ssi->lock); 480 } 481 482 static void ssip_rx_wd(struct timer_list *t) 483 { 484 struct ssi_protocol *ssi = from_timer(ssi, t, rx_wd); 485 struct hsi_client *cl = ssi->cl; 486 487 dev_err(&cl->device, "Watchdog triggered\n"); 488 ssip_error(cl); 489 } 490 491 static void ssip_tx_wd(struct timer_list *t) 492 { 493 struct ssi_protocol *ssi = from_timer(ssi, t, tx_wd); 494 struct hsi_client *cl = ssi->cl; 495 496 dev_err(&cl->device, "Watchdog triggered\n"); 497 ssip_error(cl); 498 } 499 500 static void ssip_send_bootinfo_req_cmd(struct hsi_client *cl) 501 { 502 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 503 struct hsi_msg *msg; 504 505 dev_dbg(&cl->device, "Issuing BOOT INFO REQ command\n"); 506 msg = ssip_claim_cmd(ssi); 507 ssip_set_cmd(msg, SSIP_BOOTINFO_REQ_CMD(SSIP_LOCAL_VERID)); 508 msg->complete = ssip_release_cmd; 509 hsi_async_write(cl, msg); 510 dev_dbg(&cl->device, "Issuing RX command\n"); 511 msg = ssip_claim_cmd(ssi); 512 msg->complete = ssip_rxcmd_complete; 513 hsi_async_read(cl, msg); 514 } 515 516 static void ssip_start_rx(struct hsi_client *cl) 517 { 518 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 519 struct hsi_msg *msg; 520 521 dev_dbg(&cl->device, "RX start M(%d) R(%d)\n", ssi->main_state, 522 ssi->recv_state); 523 spin_lock_bh(&ssi->lock); 524 /* 525 * We can have two UP events in a row due to a short low 526 * high transition. Therefore we need to ignore the sencond UP event. 527 */ 528 if ((ssi->main_state != ACTIVE) || (ssi->recv_state == RECV_READY)) { 529 spin_unlock_bh(&ssi->lock); 530 return; 531 } 532 ssip_set_rxstate(ssi, RECV_READY); 533 spin_unlock_bh(&ssi->lock); 534 535 msg = ssip_claim_cmd(ssi); 536 ssip_set_cmd(msg, SSIP_READY_CMD); 537 msg->complete = ssip_release_cmd; 538 dev_dbg(&cl->device, "Send READY\n"); 539 hsi_async_write(cl, msg); 540 } 541 542 static void ssip_stop_rx(struct hsi_client *cl) 543 { 544 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 545 546 dev_dbg(&cl->device, "RX stop M(%d)\n", ssi->main_state); 547 spin_lock_bh(&ssi->lock); 548 if (likely(ssi->main_state == ACTIVE)) 549 ssip_set_rxstate(ssi, RECV_IDLE); 550 spin_unlock_bh(&ssi->lock); 551 } 552 553 static void ssip_free_strans(struct hsi_msg *msg) 554 { 555 ssip_free_data(msg->context); 556 ssip_release_cmd(msg); 557 } 558 559 static void ssip_strans_complete(struct hsi_msg *msg) 560 { 561 struct hsi_client *cl = msg->cl; 562 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 563 struct hsi_msg *data; 564 565 data = msg->context; 566 ssip_release_cmd(msg); 567 spin_lock_bh(&ssi->lock); 568 ssip_set_txstate(ssi, SENDING); 569 spin_unlock_bh(&ssi->lock); 570 hsi_async_write(cl, data); 571 } 572 573 static int ssip_xmit(struct hsi_client *cl) 574 { 575 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 576 struct hsi_msg *msg, *dmsg; 577 struct sk_buff *skb; 578 579 spin_lock_bh(&ssi->lock); 580 if (list_empty(&ssi->txqueue)) { 581 spin_unlock_bh(&ssi->lock); 582 return 0; 583 } 584 dmsg = list_first_entry(&ssi->txqueue, struct hsi_msg, link); 585 list_del(&dmsg->link); 586 ssi->txqueue_len--; 587 spin_unlock_bh(&ssi->lock); 588 589 msg = ssip_claim_cmd(ssi); 590 skb = dmsg->context; 591 msg->context = dmsg; 592 msg->complete = ssip_strans_complete; 593 msg->destructor = ssip_free_strans; 594 595 spin_lock_bh(&ssi->lock); 596 ssip_set_cmd(msg, SSIP_START_TRANS_CMD(SSIP_BYTES_TO_FRAMES(skb->len), 597 ssi->txid)); 598 ssi->txid++; 599 ssip_set_txstate(ssi, SENDING); 600 spin_unlock_bh(&ssi->lock); 601 602 dev_dbg(&cl->device, "Send STRANS (%d frames)\n", 603 SSIP_BYTES_TO_FRAMES(skb->len)); 604 605 return hsi_async_write(cl, msg); 606 } 607 608 /* In soft IRQ context */ 609 static void ssip_pn_rx(struct sk_buff *skb) 610 { 611 struct net_device *dev = skb->dev; 612 613 if (unlikely(!netif_running(dev))) { 614 dev_dbg(&dev->dev, "Drop RX packet\n"); 615 dev->stats.rx_dropped++; 616 dev_kfree_skb(skb); 617 return; 618 } 619 if (unlikely(!pskb_may_pull(skb, SSIP_MIN_PN_HDR))) { 620 dev_dbg(&dev->dev, "Error drop RX packet\n"); 621 dev->stats.rx_errors++; 622 dev->stats.rx_length_errors++; 623 dev_kfree_skb(skb); 624 return; 625 } 626 dev->stats.rx_packets++; 627 dev->stats.rx_bytes += skb->len; 628 629 /* length field is exchanged in network byte order */ 630 ((u16 *)skb->data)[2] = ntohs(((u16 *)skb->data)[2]); 631 dev_dbg(&dev->dev, "RX length fixed (%04x -> %u)\n", 632 ((u16 *)skb->data)[2], ntohs(((u16 *)skb->data)[2])); 633 634 skb->protocol = htons(ETH_P_PHONET); 635 skb_reset_mac_header(skb); 636 __skb_pull(skb, 1); 637 netif_rx(skb); 638 } 639 640 static void ssip_rx_data_complete(struct hsi_msg *msg) 641 { 642 struct hsi_client *cl = msg->cl; 643 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 644 struct sk_buff *skb; 645 646 if (msg->status == HSI_STATUS_ERROR) { 647 dev_err(&cl->device, "RX data error\n"); 648 ssip_free_data(msg); 649 ssip_error(cl); 650 return; 651 } 652 del_timer(&ssi->rx_wd); /* FIXME: Revisit */ 653 skb = msg->context; 654 ssip_pn_rx(skb); 655 hsi_free_msg(msg); 656 } 657 658 static void ssip_rx_bootinforeq(struct hsi_client *cl, u32 cmd) 659 { 660 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 661 struct hsi_msg *msg; 662 663 /* Workaroud: Ignore CMT Loader message leftover */ 664 if (cmd == SSIP_CMT_LOADER_SYNC) 665 return; 666 667 switch (ssi->main_state) { 668 case ACTIVE: 669 dev_err(&cl->device, "Boot info req on active state\n"); 670 ssip_error(cl); 671 fallthrough; 672 case INIT: 673 case HANDSHAKE: 674 spin_lock_bh(&ssi->lock); 675 ssi->main_state = HANDSHAKE; 676 spin_unlock_bh(&ssi->lock); 677 678 if (!test_and_set_bit(SSIP_WAKETEST_FLAG, &ssi->flags)) 679 ssi_waketest(cl, 1); /* FIXME: To be removed */ 680 681 spin_lock_bh(&ssi->lock); 682 /* Start boot handshake watchdog */ 683 mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); 684 spin_unlock_bh(&ssi->lock); 685 dev_dbg(&cl->device, "Send BOOTINFO_RESP\n"); 686 if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID) 687 dev_warn(&cl->device, "boot info req verid mismatch\n"); 688 msg = ssip_claim_cmd(ssi); 689 ssip_set_cmd(msg, SSIP_BOOTINFO_RESP_CMD(SSIP_LOCAL_VERID)); 690 msg->complete = ssip_release_cmd; 691 hsi_async_write(cl, msg); 692 break; 693 default: 694 dev_dbg(&cl->device, "Wrong state M(%d)\n", ssi->main_state); 695 break; 696 } 697 } 698 699 static void ssip_rx_bootinforesp(struct hsi_client *cl, u32 cmd) 700 { 701 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 702 703 if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID) 704 dev_warn(&cl->device, "boot info resp verid mismatch\n"); 705 706 spin_lock_bh(&ssi->lock); 707 if (ssi->main_state != ACTIVE) 708 /* Use tx_wd as a boot watchdog in non ACTIVE state */ 709 mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); 710 else 711 dev_dbg(&cl->device, "boot info resp ignored M(%d)\n", 712 ssi->main_state); 713 spin_unlock_bh(&ssi->lock); 714 } 715 716 static void ssip_rx_waketest(struct hsi_client *cl, u32 cmd) 717 { 718 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 719 unsigned int wkres = SSIP_PAYLOAD(cmd); 720 721 spin_lock_bh(&ssi->lock); 722 if (ssi->main_state != HANDSHAKE) { 723 dev_dbg(&cl->device, "wake lines test ignored M(%d)\n", 724 ssi->main_state); 725 spin_unlock_bh(&ssi->lock); 726 return; 727 } 728 spin_unlock_bh(&ssi->lock); 729 730 if (test_and_clear_bit(SSIP_WAKETEST_FLAG, &ssi->flags)) 731 ssi_waketest(cl, 0); /* FIXME: To be removed */ 732 733 spin_lock_bh(&ssi->lock); 734 ssi->main_state = ACTIVE; 735 del_timer(&ssi->tx_wd); /* Stop boot handshake timer */ 736 spin_unlock_bh(&ssi->lock); 737 738 dev_notice(&cl->device, "WAKELINES TEST %s\n", 739 wkres & SSIP_WAKETEST_FAILED ? "FAILED" : "OK"); 740 if (wkres & SSIP_WAKETEST_FAILED) { 741 ssip_error(cl); 742 return; 743 } 744 dev_dbg(&cl->device, "CMT is ONLINE\n"); 745 netif_wake_queue(ssi->netdev); 746 netif_carrier_on(ssi->netdev); 747 } 748 749 static void ssip_rx_ready(struct hsi_client *cl) 750 { 751 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 752 753 spin_lock_bh(&ssi->lock); 754 if (unlikely(ssi->main_state != ACTIVE)) { 755 dev_dbg(&cl->device, "READY on wrong state: S(%d) M(%d)\n", 756 ssi->send_state, ssi->main_state); 757 spin_unlock_bh(&ssi->lock); 758 return; 759 } 760 if (ssi->send_state != WAIT4READY) { 761 dev_dbg(&cl->device, "Ignore spurious READY command\n"); 762 spin_unlock_bh(&ssi->lock); 763 return; 764 } 765 ssip_set_txstate(ssi, SEND_READY); 766 spin_unlock_bh(&ssi->lock); 767 ssip_xmit(cl); 768 } 769 770 static void ssip_rx_strans(struct hsi_client *cl, u32 cmd) 771 { 772 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 773 struct sk_buff *skb; 774 struct hsi_msg *msg; 775 int len = SSIP_PDU_LENGTH(cmd); 776 777 dev_dbg(&cl->device, "RX strans: %d frames\n", len); 778 spin_lock_bh(&ssi->lock); 779 if (unlikely(ssi->main_state != ACTIVE)) { 780 dev_err(&cl->device, "START TRANS wrong state: S(%d) M(%d)\n", 781 ssi->send_state, ssi->main_state); 782 spin_unlock_bh(&ssi->lock); 783 return; 784 } 785 ssip_set_rxstate(ssi, RECEIVING); 786 if (unlikely(SSIP_MSG_ID(cmd) != ssi->rxid)) { 787 dev_err(&cl->device, "START TRANS id %d expected %d\n", 788 SSIP_MSG_ID(cmd), ssi->rxid); 789 spin_unlock_bh(&ssi->lock); 790 goto out1; 791 } 792 ssi->rxid++; 793 spin_unlock_bh(&ssi->lock); 794 skb = netdev_alloc_skb(ssi->netdev, len * 4); 795 if (unlikely(!skb)) { 796 dev_err(&cl->device, "No memory for rx skb\n"); 797 goto out1; 798 } 799 skb_put(skb, len * 4); 800 msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC); 801 if (unlikely(!msg)) { 802 dev_err(&cl->device, "No memory for RX data msg\n"); 803 goto out2; 804 } 805 msg->complete = ssip_rx_data_complete; 806 hsi_async_read(cl, msg); 807 808 return; 809 out2: 810 dev_kfree_skb(skb); 811 out1: 812 ssip_error(cl); 813 } 814 815 static void ssip_rxcmd_complete(struct hsi_msg *msg) 816 { 817 struct hsi_client *cl = msg->cl; 818 u32 cmd = ssip_get_cmd(msg); 819 unsigned int cmdid = SSIP_COMMAND(cmd); 820 821 if (msg->status == HSI_STATUS_ERROR) { 822 dev_err(&cl->device, "RX error detected\n"); 823 ssip_release_cmd(msg); 824 ssip_error(cl); 825 return; 826 } 827 hsi_async_read(cl, msg); 828 dev_dbg(&cl->device, "RX cmd: 0x%08x\n", cmd); 829 switch (cmdid) { 830 case SSIP_SW_BREAK: 831 /* Ignored */ 832 break; 833 case SSIP_BOOTINFO_REQ: 834 ssip_rx_bootinforeq(cl, cmd); 835 break; 836 case SSIP_BOOTINFO_RESP: 837 ssip_rx_bootinforesp(cl, cmd); 838 break; 839 case SSIP_WAKETEST_RESULT: 840 ssip_rx_waketest(cl, cmd); 841 break; 842 case SSIP_START_TRANS: 843 ssip_rx_strans(cl, cmd); 844 break; 845 case SSIP_READY: 846 ssip_rx_ready(cl); 847 break; 848 default: 849 dev_warn(&cl->device, "command 0x%08x not supported\n", cmd); 850 break; 851 } 852 } 853 854 static void ssip_swbreak_complete(struct hsi_msg *msg) 855 { 856 struct hsi_client *cl = msg->cl; 857 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 858 859 ssip_release_cmd(msg); 860 spin_lock_bh(&ssi->lock); 861 if (list_empty(&ssi->txqueue)) { 862 if (atomic_read(&ssi->tx_usecnt)) { 863 ssip_set_txstate(ssi, SEND_READY); 864 } else { 865 ssip_set_txstate(ssi, SEND_IDLE); 866 hsi_stop_tx(cl); 867 } 868 spin_unlock_bh(&ssi->lock); 869 } else { 870 spin_unlock_bh(&ssi->lock); 871 ssip_xmit(cl); 872 } 873 netif_wake_queue(ssi->netdev); 874 } 875 876 static void ssip_tx_data_complete(struct hsi_msg *msg) 877 { 878 struct hsi_client *cl = msg->cl; 879 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 880 struct hsi_msg *cmsg; 881 882 if (msg->status == HSI_STATUS_ERROR) { 883 dev_err(&cl->device, "TX data error\n"); 884 ssip_error(cl); 885 goto out; 886 } 887 spin_lock_bh(&ssi->lock); 888 if (list_empty(&ssi->txqueue)) { 889 ssip_set_txstate(ssi, SENDING_SWBREAK); 890 spin_unlock_bh(&ssi->lock); 891 cmsg = ssip_claim_cmd(ssi); 892 ssip_set_cmd(cmsg, SSIP_SWBREAK_CMD); 893 cmsg->complete = ssip_swbreak_complete; 894 dev_dbg(&cl->device, "Send SWBREAK\n"); 895 hsi_async_write(cl, cmsg); 896 } else { 897 spin_unlock_bh(&ssi->lock); 898 ssip_xmit(cl); 899 } 900 out: 901 ssip_free_data(msg); 902 } 903 904 static void ssip_port_event(struct hsi_client *cl, unsigned long event) 905 { 906 switch (event) { 907 case HSI_EVENT_START_RX: 908 ssip_start_rx(cl); 909 break; 910 case HSI_EVENT_STOP_RX: 911 ssip_stop_rx(cl); 912 break; 913 default: 914 return; 915 } 916 } 917 918 static int ssip_pn_open(struct net_device *dev) 919 { 920 struct hsi_client *cl = to_hsi_client(dev->dev.parent); 921 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 922 int err; 923 924 err = hsi_claim_port(cl, 1); 925 if (err < 0) { 926 dev_err(&cl->device, "SSI port already claimed\n"); 927 return err; 928 } 929 err = hsi_register_port_event(cl, ssip_port_event); 930 if (err < 0) { 931 dev_err(&cl->device, "Register HSI port event failed (%d)\n", 932 err); 933 hsi_release_port(cl); 934 return err; 935 } 936 dev_dbg(&cl->device, "Configuring SSI port\n"); 937 hsi_setup(cl); 938 939 if (!test_and_set_bit(SSIP_WAKETEST_FLAG, &ssi->flags)) 940 ssi_waketest(cl, 1); /* FIXME: To be removed */ 941 942 spin_lock_bh(&ssi->lock); 943 ssi->main_state = HANDSHAKE; 944 spin_unlock_bh(&ssi->lock); 945 946 ssip_send_bootinfo_req_cmd(cl); 947 948 return 0; 949 } 950 951 static int ssip_pn_stop(struct net_device *dev) 952 { 953 struct hsi_client *cl = to_hsi_client(dev->dev.parent); 954 955 ssip_reset(cl); 956 hsi_unregister_port_event(cl); 957 hsi_release_port(cl); 958 959 return 0; 960 } 961 962 static void ssip_xmit_work(struct work_struct *work) 963 { 964 struct ssi_protocol *ssi = 965 container_of(work, struct ssi_protocol, work); 966 struct hsi_client *cl = ssi->cl; 967 968 ssip_xmit(cl); 969 } 970 971 static netdev_tx_t ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev) 972 { 973 struct hsi_client *cl = to_hsi_client(dev->dev.parent); 974 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 975 struct hsi_msg *msg; 976 977 if ((skb->protocol != htons(ETH_P_PHONET)) || 978 (skb->len < SSIP_MIN_PN_HDR)) 979 goto drop; 980 /* Pad to 32-bits - FIXME: Revisit*/ 981 if ((skb->len & 3) && skb_pad(skb, 4 - (skb->len & 3))) 982 goto inc_dropped; 983 984 /* 985 * Modem sends Phonet messages over SSI with its own endianness. 986 * Assume that modem has the same endianness as we do. 987 */ 988 if (skb_cow_head(skb, 0)) 989 goto drop; 990 991 /* length field is exchanged in network byte order */ 992 ((u16 *)skb->data)[2] = htons(((u16 *)skb->data)[2]); 993 994 msg = ssip_alloc_data(ssi, skb, GFP_ATOMIC); 995 if (!msg) { 996 dev_dbg(&cl->device, "Dropping tx data: No memory\n"); 997 goto drop; 998 } 999 msg->complete = ssip_tx_data_complete; 1000 1001 spin_lock_bh(&ssi->lock); 1002 if (unlikely(ssi->main_state != ACTIVE)) { 1003 spin_unlock_bh(&ssi->lock); 1004 dev_dbg(&cl->device, "Dropping tx data: CMT is OFFLINE\n"); 1005 goto drop2; 1006 } 1007 list_add_tail(&msg->link, &ssi->txqueue); 1008 ssi->txqueue_len++; 1009 if (dev->tx_queue_len < ssi->txqueue_len) { 1010 dev_info(&cl->device, "TX queue full %d\n", ssi->txqueue_len); 1011 netif_stop_queue(dev); 1012 } 1013 if (ssi->send_state == SEND_IDLE) { 1014 ssip_set_txstate(ssi, WAIT4READY); 1015 spin_unlock_bh(&ssi->lock); 1016 dev_dbg(&cl->device, "Start TX qlen %d\n", ssi->txqueue_len); 1017 hsi_start_tx(cl); 1018 } else if (ssi->send_state == SEND_READY) { 1019 /* Needed for cmt-speech workaround */ 1020 dev_dbg(&cl->device, "Start TX on SEND READY qlen %d\n", 1021 ssi->txqueue_len); 1022 spin_unlock_bh(&ssi->lock); 1023 schedule_work(&ssi->work); 1024 } else { 1025 spin_unlock_bh(&ssi->lock); 1026 } 1027 dev->stats.tx_packets++; 1028 dev->stats.tx_bytes += skb->len; 1029 1030 return NETDEV_TX_OK; 1031 drop2: 1032 hsi_free_msg(msg); 1033 drop: 1034 dev_kfree_skb(skb); 1035 inc_dropped: 1036 dev->stats.tx_dropped++; 1037 1038 return NETDEV_TX_OK; 1039 } 1040 1041 /* CMT reset event handler */ 1042 void ssip_reset_event(struct hsi_client *master) 1043 { 1044 struct ssi_protocol *ssi = hsi_client_drvdata(master); 1045 dev_err(&ssi->cl->device, "CMT reset detected!\n"); 1046 ssip_error(ssi->cl); 1047 } 1048 EXPORT_SYMBOL_GPL(ssip_reset_event); 1049 1050 static const struct net_device_ops ssip_pn_ops = { 1051 .ndo_open = ssip_pn_open, 1052 .ndo_stop = ssip_pn_stop, 1053 .ndo_start_xmit = ssip_pn_xmit, 1054 }; 1055 1056 static void ssip_pn_setup(struct net_device *dev) 1057 { 1058 static const u8 addr = PN_MEDIA_SOS; 1059 1060 dev->features = 0; 1061 dev->netdev_ops = &ssip_pn_ops; 1062 dev->type = ARPHRD_PHONET; 1063 dev->flags = IFF_POINTOPOINT | IFF_NOARP; 1064 dev->mtu = SSIP_DEFAULT_MTU; 1065 dev->hard_header_len = 1; 1066 dev->addr_len = 1; 1067 dev_addr_set(dev, &addr); 1068 dev->tx_queue_len = SSIP_TXQUEUE_LEN; 1069 1070 dev->needs_free_netdev = true; 1071 dev->header_ops = &phonet_header_ops; 1072 } 1073 1074 static int ssi_protocol_probe(struct device *dev) 1075 { 1076 static const char ifname[] = "phonet%d"; 1077 struct hsi_client *cl = to_hsi_client(dev); 1078 struct ssi_protocol *ssi; 1079 int err; 1080 1081 ssi = kzalloc(sizeof(*ssi), GFP_KERNEL); 1082 if (!ssi) 1083 return -ENOMEM; 1084 1085 spin_lock_init(&ssi->lock); 1086 timer_setup(&ssi->rx_wd, ssip_rx_wd, TIMER_DEFERRABLE); 1087 timer_setup(&ssi->tx_wd, ssip_tx_wd, TIMER_DEFERRABLE); 1088 timer_setup(&ssi->keep_alive, ssip_keep_alive, 0); 1089 INIT_LIST_HEAD(&ssi->txqueue); 1090 INIT_LIST_HEAD(&ssi->cmdqueue); 1091 atomic_set(&ssi->tx_usecnt, 0); 1092 hsi_client_set_drvdata(cl, ssi); 1093 ssi->cl = cl; 1094 INIT_WORK(&ssi->work, ssip_xmit_work); 1095 1096 ssi->channel_id_cmd = hsi_get_channel_id_by_name(cl, "mcsaab-control"); 1097 if (ssi->channel_id_cmd < 0) { 1098 err = ssi->channel_id_cmd; 1099 dev_err(dev, "Could not get cmd channel (%d)\n", err); 1100 goto out; 1101 } 1102 1103 ssi->channel_id_data = hsi_get_channel_id_by_name(cl, "mcsaab-data"); 1104 if (ssi->channel_id_data < 0) { 1105 err = ssi->channel_id_data; 1106 dev_err(dev, "Could not get data channel (%d)\n", err); 1107 goto out; 1108 } 1109 1110 err = ssip_alloc_cmds(ssi); 1111 if (err < 0) { 1112 dev_err(dev, "No memory for commands\n"); 1113 goto out; 1114 } 1115 1116 ssi->netdev = alloc_netdev(0, ifname, NET_NAME_UNKNOWN, ssip_pn_setup); 1117 if (!ssi->netdev) { 1118 dev_err(dev, "No memory for netdev\n"); 1119 err = -ENOMEM; 1120 goto out1; 1121 } 1122 1123 /* MTU range: 6 - 65535 */ 1124 ssi->netdev->min_mtu = PHONET_MIN_MTU; 1125 ssi->netdev->max_mtu = SSIP_MAX_MTU; 1126 1127 SET_NETDEV_DEV(ssi->netdev, dev); 1128 netif_carrier_off(ssi->netdev); 1129 err = register_netdev(ssi->netdev); 1130 if (err < 0) { 1131 dev_err(dev, "Register netdev failed (%d)\n", err); 1132 goto out2; 1133 } 1134 1135 list_add(&ssi->link, &ssip_list); 1136 1137 dev_dbg(dev, "channel configuration: cmd=%d, data=%d\n", 1138 ssi->channel_id_cmd, ssi->channel_id_data); 1139 1140 return 0; 1141 out2: 1142 free_netdev(ssi->netdev); 1143 out1: 1144 ssip_free_cmds(ssi); 1145 out: 1146 kfree(ssi); 1147 1148 return err; 1149 } 1150 1151 static int ssi_protocol_remove(struct device *dev) 1152 { 1153 struct hsi_client *cl = to_hsi_client(dev); 1154 struct ssi_protocol *ssi = hsi_client_drvdata(cl); 1155 1156 list_del(&ssi->link); 1157 unregister_netdev(ssi->netdev); 1158 ssip_free_cmds(ssi); 1159 hsi_client_set_drvdata(cl, NULL); 1160 kfree(ssi); 1161 1162 return 0; 1163 } 1164 1165 static struct hsi_client_driver ssip_driver = { 1166 .driver = { 1167 .name = "ssi-protocol", 1168 .owner = THIS_MODULE, 1169 .probe = ssi_protocol_probe, 1170 .remove = ssi_protocol_remove, 1171 }, 1172 }; 1173 1174 static int __init ssip_init(void) 1175 { 1176 pr_info("SSI protocol aka McSAAB added\n"); 1177 1178 return hsi_register_client_driver(&ssip_driver); 1179 } 1180 module_init(ssip_init); 1181 1182 static void __exit ssip_exit(void) 1183 { 1184 hsi_unregister_client_driver(&ssip_driver); 1185 pr_info("SSI protocol driver removed\n"); 1186 } 1187 module_exit(ssip_exit); 1188 1189 MODULE_ALIAS("hsi:ssi-protocol"); 1190 MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>"); 1191 MODULE_AUTHOR("Remi Denis-Courmont <remi.denis-courmont@nokia.com>"); 1192 MODULE_DESCRIPTION("SSI protocol improved aka McSAAB"); 1193 MODULE_LICENSE("GPL"); 1194