1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Texas Instruments Ethernet Switch Driver ethtool intf 4 * 5 * Copyright (C) 2019 Texas Instruments 6 */ 7 8 #include <linux/if_ether.h> 9 #include <linux/if_vlan.h> 10 #include <linux/kmemleak.h> 11 #include <linux/module.h> 12 #include <linux/netdevice.h> 13 #include <linux/net_tstamp.h> 14 #include <linux/phy.h> 15 #include <linux/pm_runtime.h> 16 #include <linux/skbuff.h> 17 18 #include "cpsw.h" 19 #include "cpts.h" 20 #include "cpsw_ale.h" 21 #include "cpsw_priv.h" 22 #include "davinci_cpdma.h" 23 24 struct cpsw_hw_stats { 25 u32 rxgoodframes; 26 u32 rxbroadcastframes; 27 u32 rxmulticastframes; 28 u32 rxpauseframes; 29 u32 rxcrcerrors; 30 u32 rxaligncodeerrors; 31 u32 rxoversizedframes; 32 u32 rxjabberframes; 33 u32 rxundersizedframes; 34 u32 rxfragments; 35 u32 __pad_0[2]; 36 u32 rxoctets; 37 u32 txgoodframes; 38 u32 txbroadcastframes; 39 u32 txmulticastframes; 40 u32 txpauseframes; 41 u32 txdeferredframes; 42 u32 txcollisionframes; 43 u32 txsinglecollframes; 44 u32 txmultcollframes; 45 u32 txexcessivecollisions; 46 u32 txlatecollisions; 47 u32 txunderrun; 48 u32 txcarriersenseerrors; 49 u32 txoctets; 50 u32 octetframes64; 51 u32 octetframes65t127; 52 u32 octetframes128t255; 53 u32 octetframes256t511; 54 u32 octetframes512t1023; 55 u32 octetframes1024tup; 56 u32 netoctets; 57 u32 rxsofoverruns; 58 u32 rxmofoverruns; 59 u32 rxdmaoverruns; 60 }; 61 62 struct cpsw_stats { 63 char stat_string[ETH_GSTRING_LEN]; 64 int type; 65 int sizeof_stat; 66 int stat_offset; 67 }; 68 69 enum { 70 CPSW_STATS, 71 CPDMA_RX_STATS, 72 CPDMA_TX_STATS, 73 }; 74 75 #define CPSW_STAT(m) CPSW_STATS, \ 76 sizeof_field(struct cpsw_hw_stats, m), \ 77 offsetof(struct cpsw_hw_stats, m) 78 #define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \ 79 sizeof_field(struct cpdma_chan_stats, m), \ 80 offsetof(struct cpdma_chan_stats, m) 81 #define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \ 82 sizeof_field(struct cpdma_chan_stats, m), \ 83 offsetof(struct cpdma_chan_stats, m) 84 85 static const struct cpsw_stats cpsw_gstrings_stats[] = { 86 { "Good Rx Frames", CPSW_STAT(rxgoodframes) }, 87 { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) }, 88 { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) }, 89 { "Pause Rx Frames", CPSW_STAT(rxpauseframes) }, 90 { "Rx CRC Errors", CPSW_STAT(rxcrcerrors) }, 91 { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) }, 92 { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) }, 93 { "Rx Jabbers", CPSW_STAT(rxjabberframes) }, 94 { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) }, 95 { "Rx Fragments", CPSW_STAT(rxfragments) }, 96 { "Rx Octets", CPSW_STAT(rxoctets) }, 97 { "Good Tx Frames", CPSW_STAT(txgoodframes) }, 98 { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) }, 99 { "Multicast Tx Frames", CPSW_STAT(txmulticastframes) }, 100 { "Pause Tx Frames", CPSW_STAT(txpauseframes) }, 101 { "Deferred Tx Frames", CPSW_STAT(txdeferredframes) }, 102 { "Collisions", CPSW_STAT(txcollisionframes) }, 103 { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) }, 104 { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) }, 105 { "Excessive Collisions", CPSW_STAT(txexcessivecollisions) }, 106 { "Late Collisions", CPSW_STAT(txlatecollisions) }, 107 { "Tx Underrun", CPSW_STAT(txunderrun) }, 108 { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) }, 109 { "Tx Octets", CPSW_STAT(txoctets) }, 110 { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) }, 111 { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) }, 112 { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) }, 113 { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) }, 114 { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) }, 115 { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) }, 116 { "Net Octets", CPSW_STAT(netoctets) }, 117 { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) }, 118 { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) }, 119 { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) }, 120 }; 121 122 static const struct cpsw_stats cpsw_gstrings_ch_stats[] = { 123 { "head_enqueue", CPDMA_RX_STAT(head_enqueue) }, 124 { "tail_enqueue", CPDMA_RX_STAT(tail_enqueue) }, 125 { "pad_enqueue", CPDMA_RX_STAT(pad_enqueue) }, 126 { "misqueued", CPDMA_RX_STAT(misqueued) }, 127 { "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) }, 128 { "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) }, 129 { "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) }, 130 { "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) }, 131 { "empty_dequeue", CPDMA_RX_STAT(empty_dequeue) }, 132 { "busy_dequeue", CPDMA_RX_STAT(busy_dequeue) }, 133 { "good_dequeue", CPDMA_RX_STAT(good_dequeue) }, 134 { "requeue", CPDMA_RX_STAT(requeue) }, 135 { "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) }, 136 }; 137 138 #define CPSW_STATS_COMMON_LEN ARRAY_SIZE(cpsw_gstrings_stats) 139 #define CPSW_STATS_CH_LEN ARRAY_SIZE(cpsw_gstrings_ch_stats) 140 141 u32 cpsw_get_msglevel(struct net_device *ndev) 142 { 143 struct cpsw_priv *priv = netdev_priv(ndev); 144 145 return priv->msg_enable; 146 } 147 148 void cpsw_set_msglevel(struct net_device *ndev, u32 value) 149 { 150 struct cpsw_priv *priv = netdev_priv(ndev); 151 152 priv->msg_enable = value; 153 } 154 155 int cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal) 156 { 157 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 158 159 coal->rx_coalesce_usecs = cpsw->coal_intvl; 160 return 0; 161 } 162 163 int cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal) 164 { 165 struct cpsw_priv *priv = netdev_priv(ndev); 166 u32 int_ctrl; 167 u32 num_interrupts = 0; 168 u32 prescale = 0; 169 u32 addnl_dvdr = 1; 170 u32 coal_intvl = 0; 171 struct cpsw_common *cpsw = priv->cpsw; 172 173 coal_intvl = coal->rx_coalesce_usecs; 174 175 int_ctrl = readl(&cpsw->wr_regs->int_control); 176 prescale = cpsw->bus_freq_mhz * 4; 177 178 if (!coal->rx_coalesce_usecs) { 179 int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN); 180 goto update_return; 181 } 182 183 if (coal_intvl < CPSW_CMINTMIN_INTVL) 184 coal_intvl = CPSW_CMINTMIN_INTVL; 185 186 if (coal_intvl > CPSW_CMINTMAX_INTVL) { 187 /* Interrupt pacer works with 4us Pulse, we can 188 * throttle further by dilating the 4us pulse. 189 */ 190 addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale; 191 192 if (addnl_dvdr > 1) { 193 prescale *= addnl_dvdr; 194 if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr)) 195 coal_intvl = (CPSW_CMINTMAX_INTVL 196 * addnl_dvdr); 197 } else { 198 addnl_dvdr = 1; 199 coal_intvl = CPSW_CMINTMAX_INTVL; 200 } 201 } 202 203 num_interrupts = (1000 * addnl_dvdr) / coal_intvl; 204 writel(num_interrupts, &cpsw->wr_regs->rx_imax); 205 writel(num_interrupts, &cpsw->wr_regs->tx_imax); 206 207 int_ctrl |= CPSW_INTPACEEN; 208 int_ctrl &= (~CPSW_INTPRESCALE_MASK); 209 int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK); 210 211 update_return: 212 writel(int_ctrl, &cpsw->wr_regs->int_control); 213 214 cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl); 215 cpsw->coal_intvl = coal_intvl; 216 217 return 0; 218 } 219 220 int cpsw_get_sset_count(struct net_device *ndev, int sset) 221 { 222 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 223 224 switch (sset) { 225 case ETH_SS_STATS: 226 return (CPSW_STATS_COMMON_LEN + 227 (cpsw->rx_ch_num + cpsw->tx_ch_num) * 228 CPSW_STATS_CH_LEN); 229 default: 230 return -EOPNOTSUPP; 231 } 232 } 233 234 static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir) 235 { 236 int ch_stats_len; 237 int line; 238 int i; 239 240 ch_stats_len = CPSW_STATS_CH_LEN * ch_num; 241 for (i = 0; i < ch_stats_len; i++) { 242 line = i % CPSW_STATS_CH_LEN; 243 snprintf(*p, ETH_GSTRING_LEN, 244 "%s DMA chan %ld: %s", rx_dir ? "Rx" : "Tx", 245 (long)(i / CPSW_STATS_CH_LEN), 246 cpsw_gstrings_ch_stats[line].stat_string); 247 *p += ETH_GSTRING_LEN; 248 } 249 } 250 251 void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data) 252 { 253 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 254 u8 *p = data; 255 int i; 256 257 switch (stringset) { 258 case ETH_SS_STATS: 259 for (i = 0; i < CPSW_STATS_COMMON_LEN; i++) { 260 memcpy(p, cpsw_gstrings_stats[i].stat_string, 261 ETH_GSTRING_LEN); 262 p += ETH_GSTRING_LEN; 263 } 264 265 cpsw_add_ch_strings(&p, cpsw->rx_ch_num, 1); 266 cpsw_add_ch_strings(&p, cpsw->tx_ch_num, 0); 267 break; 268 } 269 } 270 271 void cpsw_get_ethtool_stats(struct net_device *ndev, 272 struct ethtool_stats *stats, u64 *data) 273 { 274 u8 *p; 275 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 276 struct cpdma_chan_stats ch_stats; 277 int i, l, ch; 278 279 /* Collect Davinci CPDMA stats for Rx and Tx Channel */ 280 for (l = 0; l < CPSW_STATS_COMMON_LEN; l++) 281 data[l] = readl(cpsw->hw_stats + 282 cpsw_gstrings_stats[l].stat_offset); 283 284 for (ch = 0; ch < cpsw->rx_ch_num; ch++) { 285 cpdma_chan_get_stats(cpsw->rxv[ch].ch, &ch_stats); 286 for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) { 287 p = (u8 *)&ch_stats + 288 cpsw_gstrings_ch_stats[i].stat_offset; 289 data[l] = *(u32 *)p; 290 } 291 } 292 293 for (ch = 0; ch < cpsw->tx_ch_num; ch++) { 294 cpdma_chan_get_stats(cpsw->txv[ch].ch, &ch_stats); 295 for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) { 296 p = (u8 *)&ch_stats + 297 cpsw_gstrings_ch_stats[i].stat_offset; 298 data[l] = *(u32 *)p; 299 } 300 } 301 } 302 303 void cpsw_get_pauseparam(struct net_device *ndev, 304 struct ethtool_pauseparam *pause) 305 { 306 struct cpsw_priv *priv = netdev_priv(ndev); 307 308 pause->autoneg = AUTONEG_DISABLE; 309 pause->rx_pause = priv->rx_pause ? true : false; 310 pause->tx_pause = priv->tx_pause ? true : false; 311 } 312 313 void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 314 { 315 struct cpsw_priv *priv = netdev_priv(ndev); 316 struct cpsw_common *cpsw = priv->cpsw; 317 int slave_no = cpsw_slave_index(cpsw, priv); 318 319 wol->supported = 0; 320 wol->wolopts = 0; 321 322 if (cpsw->slaves[slave_no].phy) 323 phy_ethtool_get_wol(cpsw->slaves[slave_no].phy, wol); 324 } 325 326 int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 327 { 328 struct cpsw_priv *priv = netdev_priv(ndev); 329 struct cpsw_common *cpsw = priv->cpsw; 330 int slave_no = cpsw_slave_index(cpsw, priv); 331 332 if (cpsw->slaves[slave_no].phy) 333 return phy_ethtool_set_wol(cpsw->slaves[slave_no].phy, wol); 334 else 335 return -EOPNOTSUPP; 336 } 337 338 int cpsw_get_regs_len(struct net_device *ndev) 339 { 340 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 341 342 return cpsw_ale_get_num_entries(cpsw->ale) * 343 ALE_ENTRY_WORDS * sizeof(u32); 344 } 345 346 void cpsw_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *p) 347 { 348 u32 *reg = p; 349 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 350 351 /* update CPSW IP version */ 352 regs->version = cpsw->version; 353 354 cpsw_ale_dump(cpsw->ale, reg); 355 } 356 357 int cpsw_ethtool_op_begin(struct net_device *ndev) 358 { 359 struct cpsw_priv *priv = netdev_priv(ndev); 360 struct cpsw_common *cpsw = priv->cpsw; 361 int ret; 362 363 ret = pm_runtime_get_sync(cpsw->dev); 364 if (ret < 0) { 365 cpsw_err(priv, drv, "ethtool begin failed %d\n", ret); 366 pm_runtime_put_noidle(cpsw->dev); 367 } 368 369 return ret; 370 } 371 372 void cpsw_ethtool_op_complete(struct net_device *ndev) 373 { 374 struct cpsw_priv *priv = netdev_priv(ndev); 375 int ret; 376 377 ret = pm_runtime_put(priv->cpsw->dev); 378 if (ret < 0) 379 cpsw_err(priv, drv, "ethtool complete failed %d\n", ret); 380 } 381 382 void cpsw_get_channels(struct net_device *ndev, struct ethtool_channels *ch) 383 { 384 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 385 386 ch->max_rx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES; 387 ch->max_tx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES; 388 ch->max_combined = 0; 389 ch->max_other = 0; 390 ch->other_count = 0; 391 ch->rx_count = cpsw->rx_ch_num; 392 ch->tx_count = cpsw->tx_ch_num; 393 ch->combined_count = 0; 394 } 395 396 int cpsw_get_link_ksettings(struct net_device *ndev, 397 struct ethtool_link_ksettings *ecmd) 398 { 399 struct cpsw_priv *priv = netdev_priv(ndev); 400 struct cpsw_common *cpsw = priv->cpsw; 401 int slave_no = cpsw_slave_index(cpsw, priv); 402 403 if (!cpsw->slaves[slave_no].phy) 404 return -EOPNOTSUPP; 405 406 phy_ethtool_ksettings_get(cpsw->slaves[slave_no].phy, ecmd); 407 return 0; 408 } 409 410 int cpsw_set_link_ksettings(struct net_device *ndev, 411 const struct ethtool_link_ksettings *ecmd) 412 { 413 struct cpsw_priv *priv = netdev_priv(ndev); 414 struct cpsw_common *cpsw = priv->cpsw; 415 int slave_no = cpsw_slave_index(cpsw, priv); 416 417 if (!cpsw->slaves[slave_no].phy) 418 return -EOPNOTSUPP; 419 420 return phy_ethtool_ksettings_set(cpsw->slaves[slave_no].phy, ecmd); 421 } 422 423 int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata) 424 { 425 struct cpsw_priv *priv = netdev_priv(ndev); 426 struct cpsw_common *cpsw = priv->cpsw; 427 int slave_no = cpsw_slave_index(cpsw, priv); 428 429 if (cpsw->slaves[slave_no].phy) 430 return phy_ethtool_get_eee(cpsw->slaves[slave_no].phy, edata); 431 else 432 return -EOPNOTSUPP; 433 } 434 435 int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata) 436 { 437 struct cpsw_priv *priv = netdev_priv(ndev); 438 struct cpsw_common *cpsw = priv->cpsw; 439 int slave_no = cpsw_slave_index(cpsw, priv); 440 441 if (cpsw->slaves[slave_no].phy) 442 return phy_ethtool_set_eee(cpsw->slaves[slave_no].phy, edata); 443 else 444 return -EOPNOTSUPP; 445 } 446 447 int cpsw_nway_reset(struct net_device *ndev) 448 { 449 struct cpsw_priv *priv = netdev_priv(ndev); 450 struct cpsw_common *cpsw = priv->cpsw; 451 int slave_no = cpsw_slave_index(cpsw, priv); 452 453 if (cpsw->slaves[slave_no].phy) 454 return genphy_restart_aneg(cpsw->slaves[slave_no].phy); 455 else 456 return -EOPNOTSUPP; 457 } 458 459 static void cpsw_suspend_data_pass(struct net_device *ndev) 460 { 461 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 462 int i; 463 464 /* Disable NAPI scheduling */ 465 cpsw_intr_disable(cpsw); 466 467 /* Stop all transmit queues for every network device. 468 */ 469 for (i = 0; i < cpsw->data.slaves; i++) { 470 ndev = cpsw->slaves[i].ndev; 471 if (!(ndev && netif_running(ndev))) 472 continue; 473 474 netif_tx_stop_all_queues(ndev); 475 476 /* Barrier, so that stop_queue visible to other cpus */ 477 smp_mb__after_atomic(); 478 } 479 480 /* Handle rest of tx packets and stop cpdma channels */ 481 cpdma_ctlr_stop(cpsw->dma); 482 } 483 484 static int cpsw_resume_data_pass(struct net_device *ndev) 485 { 486 struct cpsw_priv *priv = netdev_priv(ndev); 487 struct cpsw_common *cpsw = priv->cpsw; 488 int i, ret; 489 490 /* After this receive is started */ 491 if (cpsw->usage_count) { 492 ret = cpsw_fill_rx_channels(priv); 493 if (ret) 494 return ret; 495 496 cpdma_ctlr_start(cpsw->dma); 497 cpsw_intr_enable(cpsw); 498 } 499 500 /* Resume transmit for every affected interface */ 501 for (i = 0; i < cpsw->data.slaves; i++) { 502 ndev = cpsw->slaves[i].ndev; 503 if (ndev && netif_running(ndev)) 504 netif_tx_start_all_queues(ndev); 505 } 506 507 return 0; 508 } 509 510 static int cpsw_check_ch_settings(struct cpsw_common *cpsw, 511 struct ethtool_channels *ch) 512 { 513 if (cpsw->quirk_irq) { 514 dev_err(cpsw->dev, "Maximum one tx/rx queue is allowed"); 515 return -EOPNOTSUPP; 516 } 517 518 if (ch->combined_count) 519 return -EINVAL; 520 521 /* verify we have at least one channel in each direction */ 522 if (!ch->rx_count || !ch->tx_count) 523 return -EINVAL; 524 525 if (ch->rx_count > cpsw->data.channels || 526 ch->tx_count > cpsw->data.channels) 527 return -EINVAL; 528 529 return 0; 530 } 531 532 static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx, 533 cpdma_handler_fn rx_handler) 534 { 535 struct cpsw_common *cpsw = priv->cpsw; 536 void (*handler)(void *, int, int); 537 struct netdev_queue *queue; 538 struct cpsw_vector *vec; 539 int ret, *ch, vch; 540 541 if (rx) { 542 ch = &cpsw->rx_ch_num; 543 vec = cpsw->rxv; 544 handler = rx_handler; 545 } else { 546 ch = &cpsw->tx_ch_num; 547 vec = cpsw->txv; 548 handler = cpsw_tx_handler; 549 } 550 551 while (*ch < ch_num) { 552 vch = rx ? *ch : 7 - *ch; 553 vec[*ch].ch = cpdma_chan_create(cpsw->dma, vch, handler, rx); 554 queue = netdev_get_tx_queue(priv->ndev, *ch); 555 queue->tx_maxrate = 0; 556 557 if (IS_ERR(vec[*ch].ch)) 558 return PTR_ERR(vec[*ch].ch); 559 560 if (!vec[*ch].ch) 561 return -EINVAL; 562 563 cpsw_info(priv, ifup, "created new %d %s channel\n", *ch, 564 (rx ? "rx" : "tx")); 565 (*ch)++; 566 } 567 568 while (*ch > ch_num) { 569 (*ch)--; 570 571 ret = cpdma_chan_destroy(vec[*ch].ch); 572 if (ret) 573 return ret; 574 575 cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch, 576 (rx ? "rx" : "tx")); 577 } 578 579 return 0; 580 } 581 582 static void cpsw_fail(struct cpsw_common *cpsw) 583 { 584 struct net_device *ndev; 585 int i; 586 587 for (i = 0; i < cpsw->data.slaves; i++) { 588 ndev = cpsw->slaves[i].ndev; 589 if (ndev) 590 dev_close(ndev); 591 } 592 } 593 594 int cpsw_set_channels_common(struct net_device *ndev, 595 struct ethtool_channels *chs, 596 cpdma_handler_fn rx_handler) 597 { 598 struct cpsw_priv *priv = netdev_priv(ndev); 599 struct cpsw_common *cpsw = priv->cpsw; 600 struct net_device *sl_ndev; 601 int i, new_pools, ret; 602 603 ret = cpsw_check_ch_settings(cpsw, chs); 604 if (ret < 0) 605 return ret; 606 607 cpsw_suspend_data_pass(ndev); 608 609 new_pools = (chs->rx_count != cpsw->rx_ch_num) && cpsw->usage_count; 610 611 ret = cpsw_update_channels_res(priv, chs->rx_count, 1, rx_handler); 612 if (ret) 613 goto err; 614 615 ret = cpsw_update_channels_res(priv, chs->tx_count, 0, rx_handler); 616 if (ret) 617 goto err; 618 619 for (i = 0; i < cpsw->data.slaves; i++) { 620 sl_ndev = cpsw->slaves[i].ndev; 621 if (!(sl_ndev && netif_running(sl_ndev))) 622 continue; 623 624 /* Inform stack about new count of queues */ 625 ret = netif_set_real_num_tx_queues(sl_ndev, cpsw->tx_ch_num); 626 if (ret) { 627 dev_err(priv->dev, "cannot set real number of tx queues\n"); 628 goto err; 629 } 630 631 ret = netif_set_real_num_rx_queues(sl_ndev, cpsw->rx_ch_num); 632 if (ret) { 633 dev_err(priv->dev, "cannot set real number of rx queues\n"); 634 goto err; 635 } 636 } 637 638 cpsw_split_res(cpsw); 639 640 if (new_pools) { 641 cpsw_destroy_xdp_rxqs(cpsw); 642 ret = cpsw_create_xdp_rxqs(cpsw); 643 if (ret) 644 goto err; 645 } 646 647 ret = cpsw_resume_data_pass(ndev); 648 if (!ret) 649 return 0; 650 err: 651 dev_err(priv->dev, "cannot update channels number, closing device\n"); 652 cpsw_fail(cpsw); 653 return ret; 654 } 655 656 void cpsw_get_ringparam(struct net_device *ndev, 657 struct ethtool_ringparam *ering) 658 { 659 struct cpsw_priv *priv = netdev_priv(ndev); 660 struct cpsw_common *cpsw = priv->cpsw; 661 662 /* not supported */ 663 ering->tx_max_pending = cpsw->descs_pool_size - CPSW_MAX_QUEUES; 664 ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma); 665 ering->rx_max_pending = cpsw->descs_pool_size - CPSW_MAX_QUEUES; 666 ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma); 667 } 668 669 int cpsw_set_ringparam(struct net_device *ndev, 670 struct ethtool_ringparam *ering) 671 { 672 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 673 int descs_num, ret; 674 675 /* ignore ering->tx_pending - only rx_pending adjustment is supported */ 676 677 if (ering->rx_mini_pending || ering->rx_jumbo_pending || 678 ering->rx_pending < CPSW_MAX_QUEUES || 679 ering->rx_pending > (cpsw->descs_pool_size - CPSW_MAX_QUEUES)) 680 return -EINVAL; 681 682 descs_num = cpdma_get_num_rx_descs(cpsw->dma); 683 if (ering->rx_pending == descs_num) 684 return 0; 685 686 cpsw_suspend_data_pass(ndev); 687 688 ret = cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending); 689 if (ret) { 690 if (cpsw_resume_data_pass(ndev)) 691 goto err; 692 693 return ret; 694 } 695 696 if (cpsw->usage_count) { 697 cpsw_destroy_xdp_rxqs(cpsw); 698 ret = cpsw_create_xdp_rxqs(cpsw); 699 if (ret) 700 goto err; 701 } 702 703 ret = cpsw_resume_data_pass(ndev); 704 if (!ret) 705 return 0; 706 err: 707 cpdma_set_num_rx_descs(cpsw->dma, descs_num); 708 dev_err(cpsw->dev, "cannot set ring params, closing device\n"); 709 cpsw_fail(cpsw); 710 return ret; 711 } 712 713 #if IS_ENABLED(CONFIG_TI_CPTS) 714 int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info) 715 { 716 struct cpsw_common *cpsw = ndev_to_cpsw(ndev); 717 718 info->so_timestamping = 719 SOF_TIMESTAMPING_TX_HARDWARE | 720 SOF_TIMESTAMPING_TX_SOFTWARE | 721 SOF_TIMESTAMPING_RX_HARDWARE | 722 SOF_TIMESTAMPING_RX_SOFTWARE | 723 SOF_TIMESTAMPING_SOFTWARE | 724 SOF_TIMESTAMPING_RAW_HARDWARE; 725 info->phc_index = cpsw->cpts->phc_index; 726 info->tx_types = 727 (1 << HWTSTAMP_TX_OFF) | 728 (1 << HWTSTAMP_TX_ON); 729 info->rx_filters = 730 (1 << HWTSTAMP_FILTER_NONE) | 731 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | 732 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); 733 return 0; 734 } 735 #else 736 int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info) 737 { 738 info->so_timestamping = 739 SOF_TIMESTAMPING_TX_SOFTWARE | 740 SOF_TIMESTAMPING_RX_SOFTWARE | 741 SOF_TIMESTAMPING_SOFTWARE; 742 info->phc_index = -1; 743 info->tx_types = 0; 744 info->rx_filters = 0; 745 return 0; 746 } 747 #endif 748