1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2021, MediaTek Inc. 4 * Copyright (c) 2021-2022, Intel Corporation. 5 * 6 * Authors: 7 * Amir Hanania <amir.hanania@intel.com> 8 * Haijun Liu <haijun.liu@mediatek.com> 9 * Moises Veleta <moises.veleta@intel.com> 10 * Ricardo Martinez <ricardo.martinez@linux.intel.com> 11 * 12 * Contributors: 13 * Andy Shevchenko <andriy.shevchenko@linux.intel.com> 14 * Chandrashekar Devegowda <chandrashekar.devegowda@intel.com> 15 * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com> 16 * Eliot Lee <eliot.lee@intel.com> 17 * Sreehari Kancharla <sreehari.kancharla@intel.com> 18 */ 19 20 #include <linux/bits.h> 21 #include <linux/bitfield.h> 22 #include <linux/device.h> 23 #include <linux/gfp.h> 24 #include <linux/kernel.h> 25 #include <linux/kthread.h> 26 #include <linux/list.h> 27 #include <linux/mutex.h> 28 #include <linux/netdevice.h> 29 #include <linux/skbuff.h> 30 #include <linux/spinlock.h> 31 #include <linux/wait.h> 32 #include <linux/wwan.h> 33 34 #include "t7xx_hif_cldma.h" 35 #include "t7xx_modem_ops.h" 36 #include "t7xx_port.h" 37 #include "t7xx_port_proxy.h" 38 #include "t7xx_state_monitor.h" 39 40 #define Q_IDX_CTRL 0 41 #define Q_IDX_MBIM 2 42 #define Q_IDX_AT_CMD 5 43 44 #define INVALID_SEQ_NUM GENMASK(15, 0) 45 46 #define for_each_proxy_port(i, p, proxy) \ 47 for (i = 0, (p) = &(proxy)->ports[i]; \ 48 i < (proxy)->port_count; \ 49 i++, (p) = &(proxy)->ports[i]) 50 51 static const struct t7xx_port_conf t7xx_md_port_conf[] = { 52 { 53 .tx_ch = PORT_CH_UART2_TX, 54 .rx_ch = PORT_CH_UART2_RX, 55 .txq_index = Q_IDX_AT_CMD, 56 .rxq_index = Q_IDX_AT_CMD, 57 .txq_exp_index = 0xff, 58 .rxq_exp_index = 0xff, 59 .path_id = CLDMA_ID_MD, 60 .ops = &wwan_sub_port_ops, 61 .name = "AT", 62 .port_type = WWAN_PORT_AT, 63 }, { 64 .tx_ch = PORT_CH_MBIM_TX, 65 .rx_ch = PORT_CH_MBIM_RX, 66 .txq_index = Q_IDX_MBIM, 67 .rxq_index = Q_IDX_MBIM, 68 .path_id = CLDMA_ID_MD, 69 .ops = &wwan_sub_port_ops, 70 .name = "MBIM", 71 .port_type = WWAN_PORT_MBIM, 72 }, { 73 .tx_ch = PORT_CH_CONTROL_TX, 74 .rx_ch = PORT_CH_CONTROL_RX, 75 .txq_index = Q_IDX_CTRL, 76 .rxq_index = Q_IDX_CTRL, 77 .path_id = CLDMA_ID_MD, 78 .ops = &ctl_port_ops, 79 .name = "t7xx_ctrl", 80 }, { 81 .tx_ch = PORT_CH_AP_CONTROL_TX, 82 .rx_ch = PORT_CH_AP_CONTROL_RX, 83 .txq_index = Q_IDX_CTRL, 84 .rxq_index = Q_IDX_CTRL, 85 .path_id = CLDMA_ID_AP, 86 .ops = &ctl_port_ops, 87 .name = "t7xx_ap_ctrl", 88 }, 89 }; 90 91 static struct t7xx_port_conf t7xx_early_port_conf[] = { 92 { 93 .tx_ch = 0xffff, 94 .rx_ch = 0xffff, 95 .txq_index = 1, 96 .rxq_index = 1, 97 .txq_exp_index = 1, 98 .rxq_exp_index = 1, 99 .path_id = CLDMA_ID_AP, 100 .is_early_port = true, 101 .name = "ttyDUMP", 102 }, 103 }; 104 105 static struct t7xx_port *t7xx_proxy_get_port_by_ch(struct port_proxy *port_prox, enum port_ch ch) 106 { 107 const struct t7xx_port_conf *port_conf; 108 struct t7xx_port *port; 109 int i; 110 111 for_each_proxy_port(i, port, port_prox) { 112 port_conf = port->port_conf; 113 if (port_conf->rx_ch == ch || port_conf->tx_ch == ch) 114 return port; 115 } 116 117 return NULL; 118 } 119 120 static u16 t7xx_port_next_rx_seq_num(struct t7xx_port *port, struct ccci_header *ccci_h) 121 { 122 u32 status = le32_to_cpu(ccci_h->status); 123 u16 seq_num, next_seq_num; 124 bool assert_bit; 125 126 seq_num = FIELD_GET(CCCI_H_SEQ_FLD, status); 127 next_seq_num = (seq_num + 1) & FIELD_MAX(CCCI_H_SEQ_FLD); 128 assert_bit = status & CCCI_H_AST_BIT; 129 if (!assert_bit || port->seq_nums[MTK_RX] == INVALID_SEQ_NUM) 130 return next_seq_num; 131 132 if (seq_num != port->seq_nums[MTK_RX]) 133 dev_warn_ratelimited(port->dev, 134 "seq num out-of-order %u != %u (header %X, len %X)\n", 135 seq_num, port->seq_nums[MTK_RX], 136 le32_to_cpu(ccci_h->packet_header), 137 le32_to_cpu(ccci_h->packet_len)); 138 139 return next_seq_num; 140 } 141 142 void t7xx_port_proxy_reset(struct port_proxy *port_prox) 143 { 144 struct t7xx_port *port; 145 int i; 146 147 for_each_proxy_port(i, port, port_prox) { 148 port->seq_nums[MTK_RX] = INVALID_SEQ_NUM; 149 port->seq_nums[MTK_TX] = 0; 150 } 151 } 152 153 static int t7xx_port_get_queue_no(struct t7xx_port *port) 154 { 155 const struct t7xx_port_conf *port_conf = port->port_conf; 156 struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl; 157 158 return t7xx_fsm_get_md_state(ctl) == MD_STATE_EXCEPTION ? 159 port_conf->txq_exp_index : port_conf->txq_index; 160 } 161 162 static void t7xx_port_struct_init(struct t7xx_port *port) 163 { 164 INIT_LIST_HEAD(&port->entry); 165 INIT_LIST_HEAD(&port->queue_entry); 166 skb_queue_head_init(&port->rx_skb_list); 167 init_waitqueue_head(&port->rx_wq); 168 port->seq_nums[MTK_RX] = INVALID_SEQ_NUM; 169 port->seq_nums[MTK_TX] = 0; 170 atomic_set(&port->usage_cnt, 0); 171 } 172 173 struct sk_buff *t7xx_port_alloc_skb(int payload) 174 { 175 struct sk_buff *skb = __dev_alloc_skb(payload + sizeof(struct ccci_header), GFP_KERNEL); 176 177 if (skb) 178 skb_reserve(skb, sizeof(struct ccci_header)); 179 180 return skb; 181 } 182 183 struct sk_buff *t7xx_ctrl_alloc_skb(int payload) 184 { 185 struct sk_buff *skb = t7xx_port_alloc_skb(payload + sizeof(struct ctrl_msg_header)); 186 187 if (skb) 188 skb_reserve(skb, sizeof(struct ctrl_msg_header)); 189 190 return skb; 191 } 192 193 /** 194 * t7xx_port_enqueue_skb() - Enqueue the received skb into the port's rx_skb_list. 195 * @port: port context. 196 * @skb: received skb. 197 * 198 * Return: 199 * * 0 - Success. 200 * * -ENOBUFS - Not enough buffer space. Caller will try again later, skb is not consumed. 201 */ 202 int t7xx_port_enqueue_skb(struct t7xx_port *port, struct sk_buff *skb) 203 { 204 unsigned long flags; 205 206 spin_lock_irqsave(&port->rx_wq.lock, flags); 207 if (port->rx_skb_list.qlen >= port->rx_length_th) { 208 spin_unlock_irqrestore(&port->rx_wq.lock, flags); 209 210 return -ENOBUFS; 211 } 212 __skb_queue_tail(&port->rx_skb_list, skb); 213 spin_unlock_irqrestore(&port->rx_wq.lock, flags); 214 215 wake_up_all(&port->rx_wq); 216 return 0; 217 } 218 219 int t7xx_get_port_mtu(struct t7xx_port *port) 220 { 221 enum cldma_id path_id = port->port_conf->path_id; 222 int tx_qno = t7xx_port_get_queue_no(port); 223 struct cldma_ctrl *md_ctrl; 224 225 md_ctrl = port->t7xx_dev->md->md_ctrl[path_id]; 226 return md_ctrl->tx_ring[tx_qno].pkt_size; 227 } 228 229 int t7xx_port_send_raw_skb(struct t7xx_port *port, struct sk_buff *skb) 230 { 231 enum cldma_id path_id = port->port_conf->path_id; 232 struct cldma_ctrl *md_ctrl; 233 int ret, tx_qno; 234 235 md_ctrl = port->t7xx_dev->md->md_ctrl[path_id]; 236 tx_qno = t7xx_port_get_queue_no(port); 237 ret = t7xx_cldma_send_skb(md_ctrl, tx_qno, skb); 238 if (ret) 239 dev_err(port->dev, "Failed to send skb: %d\n", ret); 240 241 return ret; 242 } 243 244 static int t7xx_port_send_ccci_skb(struct t7xx_port *port, struct sk_buff *skb, 245 unsigned int pkt_header, unsigned int ex_msg) 246 { 247 const struct t7xx_port_conf *port_conf = port->port_conf; 248 struct ccci_header *ccci_h; 249 u32 status; 250 int ret; 251 252 ccci_h = skb_push(skb, sizeof(*ccci_h)); 253 status = FIELD_PREP(CCCI_H_CHN_FLD, port_conf->tx_ch) | 254 FIELD_PREP(CCCI_H_SEQ_FLD, port->seq_nums[MTK_TX]) | CCCI_H_AST_BIT; 255 ccci_h->status = cpu_to_le32(status); 256 ccci_h->packet_header = cpu_to_le32(pkt_header); 257 ccci_h->packet_len = cpu_to_le32(skb->len); 258 ccci_h->ex_msg = cpu_to_le32(ex_msg); 259 260 ret = t7xx_port_send_raw_skb(port, skb); 261 if (ret) 262 return ret; 263 264 port->seq_nums[MTK_TX]++; 265 return 0; 266 } 267 268 int t7xx_port_send_ctl_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int msg, 269 unsigned int ex_msg) 270 { 271 struct ctrl_msg_header *ctrl_msg_h; 272 unsigned int msg_len = skb->len; 273 u32 pkt_header = 0; 274 275 ctrl_msg_h = skb_push(skb, sizeof(*ctrl_msg_h)); 276 ctrl_msg_h->ctrl_msg_id = cpu_to_le32(msg); 277 ctrl_msg_h->ex_msg = cpu_to_le32(ex_msg); 278 ctrl_msg_h->data_length = cpu_to_le32(msg_len); 279 280 if (!msg_len) 281 pkt_header = CCCI_HEADER_NO_DATA; 282 283 return t7xx_port_send_ccci_skb(port, skb, pkt_header, ex_msg); 284 } 285 286 int t7xx_port_send_skb(struct t7xx_port *port, struct sk_buff *skb, unsigned int pkt_header, 287 unsigned int ex_msg) 288 { 289 struct t7xx_fsm_ctl *ctl = port->t7xx_dev->md->fsm_ctl; 290 unsigned int fsm_state; 291 292 fsm_state = t7xx_fsm_get_ctl_state(ctl); 293 if (fsm_state != FSM_STATE_PRE_START) { 294 const struct t7xx_port_conf *port_conf = port->port_conf; 295 enum md_state md_state = t7xx_fsm_get_md_state(ctl); 296 297 switch (md_state) { 298 case MD_STATE_EXCEPTION: 299 if (port_conf->tx_ch != PORT_CH_MD_LOG_TX) 300 return -EBUSY; 301 break; 302 303 case MD_STATE_WAITING_FOR_HS1: 304 case MD_STATE_WAITING_FOR_HS2: 305 case MD_STATE_STOPPED: 306 case MD_STATE_WAITING_TO_STOP: 307 case MD_STATE_INVALID: 308 return -ENODEV; 309 310 default: 311 break; 312 } 313 } 314 315 return t7xx_port_send_ccci_skb(port, skb, pkt_header, ex_msg); 316 } 317 318 static void t7xx_proxy_setup_ch_mapping(struct port_proxy *port_prox) 319 { 320 struct t7xx_port *port; 321 322 int i, j; 323 324 for (i = 0; i < ARRAY_SIZE(port_prox->rx_ch_ports); i++) 325 INIT_LIST_HEAD(&port_prox->rx_ch_ports[i]); 326 327 for (j = 0; j < ARRAY_SIZE(port_prox->queue_ports); j++) { 328 for (i = 0; i < ARRAY_SIZE(port_prox->queue_ports[j]); i++) 329 INIT_LIST_HEAD(&port_prox->queue_ports[j][i]); 330 } 331 332 for_each_proxy_port(i, port, port_prox) { 333 const struct t7xx_port_conf *port_conf = port->port_conf; 334 enum cldma_id path_id = port_conf->path_id; 335 u8 ch_id; 336 337 ch_id = FIELD_GET(PORT_CH_ID_MASK, port_conf->rx_ch); 338 list_add_tail(&port->entry, &port_prox->rx_ch_ports[ch_id]); 339 list_add_tail(&port->queue_entry, 340 &port_prox->queue_ports[path_id][port_conf->rxq_index]); 341 } 342 } 343 344 static int t7xx_port_proxy_recv_skb_from_queue(struct t7xx_pci_dev *t7xx_dev, 345 struct cldma_queue *queue, struct sk_buff *skb) 346 { 347 struct port_proxy *port_prox = t7xx_dev->md->port_prox; 348 const struct t7xx_port_conf *port_conf; 349 struct t7xx_port *port; 350 int ret; 351 352 port = port_prox->ports; 353 port_conf = port->port_conf; 354 355 ret = port_conf->ops->recv_skb(port, skb); 356 if (ret < 0 && ret != -ENOBUFS) { 357 dev_err(port->dev, "drop on RX ch %d, %d\n", port_conf->rx_ch, ret); 358 dev_kfree_skb_any(skb); 359 } 360 361 return ret; 362 } 363 364 static struct t7xx_port *t7xx_port_proxy_find_port(struct t7xx_pci_dev *t7xx_dev, 365 struct cldma_queue *queue, u16 channel) 366 { 367 struct port_proxy *port_prox = t7xx_dev->md->port_prox; 368 struct list_head *port_list; 369 struct t7xx_port *port; 370 u8 ch_id; 371 372 ch_id = FIELD_GET(PORT_CH_ID_MASK, channel); 373 port_list = &port_prox->rx_ch_ports[ch_id]; 374 list_for_each_entry(port, port_list, entry) { 375 const struct t7xx_port_conf *port_conf = port->port_conf; 376 377 if (queue->md_ctrl->hif_id == port_conf->path_id && 378 channel == port_conf->rx_ch) 379 return port; 380 } 381 382 return NULL; 383 } 384 385 struct t7xx_port *t7xx_port_proxy_get_port_by_name(struct port_proxy *port_prox, char *port_name) 386 { 387 const struct t7xx_port_conf *port_conf; 388 struct t7xx_port *port; 389 int i; 390 391 for_each_proxy_port(i, port, port_prox) { 392 port_conf = port->port_conf; 393 394 if (!strncmp(port_conf->name, port_name, strlen(port_conf->name))) 395 return port; 396 } 397 398 return NULL; 399 } 400 401 /** 402 * t7xx_port_proxy_recv_skb() - Dispatch received skb. 403 * @queue: CLDMA queue. 404 * @skb: Socket buffer. 405 * 406 * Return: 407 ** 0 - Packet consumed. 408 ** -ERROR - Failed to process skb. 409 */ 410 static int t7xx_port_proxy_recv_skb(struct cldma_queue *queue, struct sk_buff *skb) 411 { 412 struct ccci_header *ccci_h = (struct ccci_header *)skb->data; 413 struct t7xx_pci_dev *t7xx_dev = queue->md_ctrl->t7xx_dev; 414 struct t7xx_fsm_ctl *ctl = t7xx_dev->md->fsm_ctl; 415 struct device *dev = queue->md_ctrl->dev; 416 const struct t7xx_port_conf *port_conf; 417 struct t7xx_port *port; 418 u16 seq_num, channel; 419 int ret; 420 421 if (queue->q_type == CLDMA_DEDICATED_Q) 422 return t7xx_port_proxy_recv_skb_from_queue(t7xx_dev, queue, skb); 423 424 channel = FIELD_GET(CCCI_H_CHN_FLD, le32_to_cpu(ccci_h->status)); 425 if (t7xx_fsm_get_md_state(ctl) == MD_STATE_INVALID) { 426 dev_err_ratelimited(dev, "Packet drop on channel 0x%x, modem not ready\n", channel); 427 goto drop_skb; 428 } 429 430 port = t7xx_port_proxy_find_port(t7xx_dev, queue, channel); 431 if (!port) { 432 dev_err_ratelimited(dev, "Packet drop on channel 0x%x, port not found\n", channel); 433 goto drop_skb; 434 } 435 436 seq_num = t7xx_port_next_rx_seq_num(port, ccci_h); 437 port_conf = port->port_conf; 438 if (!port->port_conf->is_early_port) 439 skb_pull(skb, sizeof(*ccci_h)); 440 441 ret = port_conf->ops->recv_skb(port, skb); 442 /* Error indicates to try again later */ 443 if (ret) { 444 skb_push(skb, sizeof(*ccci_h)); 445 return ret; 446 } 447 448 port->seq_nums[MTK_RX] = seq_num; 449 return 0; 450 451 drop_skb: 452 dev_kfree_skb_any(skb); 453 return 0; 454 } 455 456 /** 457 * t7xx_port_proxy_md_status_notify() - Notify all ports of state. 458 *@port_prox: The port_proxy pointer. 459 *@state: State. 460 * 461 * Called by t7xx_fsm. Used to dispatch modem status for all ports, 462 * which want to know MD state transition. 463 */ 464 void t7xx_port_proxy_md_status_notify(struct port_proxy *port_prox, unsigned int state) 465 { 466 struct t7xx_port *port; 467 int i; 468 469 for_each_proxy_port(i, port, port_prox) { 470 const struct t7xx_port_conf *port_conf = port->port_conf; 471 472 if (port_conf->ops->md_state_notify) 473 port_conf->ops->md_state_notify(port, state); 474 } 475 } 476 477 static void t7xx_proxy_init_all_ports(struct t7xx_modem *md) 478 { 479 struct port_proxy *port_prox = md->port_prox; 480 struct t7xx_port *port; 481 int i; 482 483 for_each_proxy_port(i, port, port_prox) { 484 const struct t7xx_port_conf *port_conf = port->port_conf; 485 486 t7xx_port_struct_init(port); 487 488 if (port_conf->tx_ch == PORT_CH_CONTROL_TX) 489 md->core_md.ctl_port = port; 490 491 if (port_conf->tx_ch == PORT_CH_AP_CONTROL_TX) 492 md->core_ap.ctl_port = port; 493 494 port->t7xx_dev = md->t7xx_dev; 495 port->dev = &md->t7xx_dev->pdev->dev; 496 spin_lock_init(&port->port_update_lock); 497 port->chan_enable = false; 498 499 if (port_conf->ops->init) 500 port_conf->ops->init(port); 501 } 502 503 t7xx_proxy_setup_ch_mapping(port_prox); 504 } 505 506 void t7xx_port_proxy_set_cfg(struct t7xx_modem *md, enum port_cfg_id cfg_id) 507 { 508 struct port_proxy *port_prox = md->port_prox; 509 const struct t7xx_port_conf *port_conf; 510 struct device *dev = port_prox->dev; 511 unsigned int port_count; 512 struct t7xx_port *port; 513 int i; 514 515 if (port_prox->cfg_id == cfg_id) 516 return; 517 518 if (port_prox->cfg_id != PORT_CFG_ID_INVALID) { 519 for_each_proxy_port(i, port, port_prox) 520 port->port_conf->ops->uninit(port); 521 522 devm_kfree(dev, port_prox->ports); 523 } 524 525 if (cfg_id == PORT_CFG_ID_EARLY) { 526 port_conf = t7xx_early_port_conf; 527 port_count = ARRAY_SIZE(t7xx_early_port_conf); 528 } else { 529 port_conf = t7xx_md_port_conf; 530 port_count = ARRAY_SIZE(t7xx_md_port_conf); 531 } 532 533 port_prox->ports = devm_kzalloc(dev, sizeof(struct t7xx_port) * port_count, GFP_KERNEL); 534 if (!port_prox->ports) 535 return; 536 537 for (i = 0; i < port_count; i++) 538 port_prox->ports[i].port_conf = &port_conf[i]; 539 540 port_prox->cfg_id = cfg_id; 541 port_prox->port_count = port_count; 542 t7xx_proxy_init_all_ports(md); 543 } 544 545 static int t7xx_proxy_alloc(struct t7xx_modem *md) 546 { 547 struct device *dev = &md->t7xx_dev->pdev->dev; 548 struct port_proxy *port_prox; 549 550 port_prox = devm_kzalloc(dev, sizeof(*port_prox), GFP_KERNEL); 551 if (!port_prox) 552 return -ENOMEM; 553 554 md->port_prox = port_prox; 555 port_prox->dev = dev; 556 t7xx_port_proxy_set_cfg(md, PORT_CFG_ID_EARLY); 557 558 return 0; 559 } 560 561 /** 562 * t7xx_port_proxy_init() - Initialize ports. 563 * @md: Modem. 564 * 565 * Create all port instances. 566 * 567 * Return: 568 * * 0 - Success. 569 * * -ERROR - Error code from failure sub-initializations. 570 */ 571 int t7xx_port_proxy_init(struct t7xx_modem *md) 572 { 573 int ret; 574 575 ret = t7xx_proxy_alloc(md); 576 if (ret) 577 return ret; 578 579 t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_AP], t7xx_port_proxy_recv_skb); 580 t7xx_cldma_set_recv_skb(md->md_ctrl[CLDMA_ID_MD], t7xx_port_proxy_recv_skb); 581 return 0; 582 } 583 584 void t7xx_port_proxy_uninit(struct port_proxy *port_prox) 585 { 586 struct t7xx_port *port; 587 int i; 588 589 for_each_proxy_port(i, port, port_prox) { 590 const struct t7xx_port_conf *port_conf = port->port_conf; 591 592 if (port_conf->ops->uninit) 593 port_conf->ops->uninit(port); 594 } 595 } 596 597 int t7xx_port_proxy_chl_enable_disable(struct port_proxy *port_prox, unsigned int ch_id, 598 bool en_flag) 599 { 600 struct t7xx_port *port = t7xx_proxy_get_port_by_ch(port_prox, ch_id); 601 const struct t7xx_port_conf *port_conf; 602 603 if (!port) 604 return -EINVAL; 605 606 port_conf = port->port_conf; 607 608 if (en_flag) { 609 if (port_conf->ops->enable_chl) 610 port_conf->ops->enable_chl(port); 611 } else { 612 if (port_conf->ops->disable_chl) 613 port_conf->ops->disable_chl(port); 614 } 615 616 return 0; 617 } 618