1 /* 2 * Copyright(c) 2015 EZchip Technologies. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * The full GNU General Public License is included in this distribution in 14 * the file called "COPYING". 15 */ 16 17 #include <linux/module.h> 18 #include <linux/etherdevice.h> 19 #include <linux/of_address.h> 20 #include <linux/of_irq.h> 21 #include <linux/of_net.h> 22 #include <linux/of_platform.h> 23 #include "nps_enet.h" 24 25 #define DRV_NAME "nps_mgt_enet" 26 27 static void nps_enet_clean_rx_fifo(struct net_device *ndev, u32 frame_len) 28 { 29 struct nps_enet_priv *priv = netdev_priv(ndev); 30 u32 i, len = DIV_ROUND_UP(frame_len, sizeof(u32)); 31 32 /* Empty Rx FIFO buffer by reading all words */ 33 for (i = 0; i < len; i++) 34 nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); 35 } 36 37 static void nps_enet_read_rx_fifo(struct net_device *ndev, 38 unsigned char *dst, u32 length) 39 { 40 struct nps_enet_priv *priv = netdev_priv(ndev); 41 s32 i, last = length & (sizeof(u32) - 1); 42 u32 *reg = (u32 *)dst, len = length / sizeof(u32); 43 bool dst_is_aligned = IS_ALIGNED((unsigned long)dst, sizeof(u32)); 44 45 /* In case dst is not aligned we need an intermediate buffer */ 46 if (dst_is_aligned) { 47 ioread32_rep(priv->regs_base + NPS_ENET_REG_RX_BUF, reg, len); 48 reg += len; 49 } 50 else { /* !dst_is_aligned */ 51 for (i = 0; i < len; i++, reg++) { 52 u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); 53 put_unaligned_be32(buf, reg); 54 } 55 } 56 /* copy last bytes (if any) */ 57 if (last) { 58 u32 buf; 59 ioread32_rep(priv->regs_base + NPS_ENET_REG_RX_BUF, &buf, 1); 60 memcpy((u8 *)reg, &buf, last); 61 } 62 } 63 64 static u32 nps_enet_rx_handler(struct net_device *ndev) 65 { 66 u32 frame_len, err = 0; 67 u32 work_done = 0; 68 struct nps_enet_priv *priv = netdev_priv(ndev); 69 struct sk_buff *skb; 70 u32 rx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL); 71 u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT; 72 u32 rx_ctrl_er = (rx_ctrl_value & RX_CTL_ER_MASK) >> RX_CTL_ER_SHIFT; 73 u32 rx_ctrl_crc = (rx_ctrl_value & RX_CTL_CRC_MASK) >> RX_CTL_CRC_SHIFT; 74 75 frame_len = (rx_ctrl_value & RX_CTL_NR_MASK) >> RX_CTL_NR_SHIFT; 76 77 /* Check if we got RX */ 78 if (!rx_ctrl_cr) 79 return work_done; 80 81 /* If we got here there is a work for us */ 82 work_done++; 83 84 /* Check Rx error */ 85 if (rx_ctrl_er) { 86 ndev->stats.rx_errors++; 87 err = 1; 88 } 89 90 /* Check Rx CRC error */ 91 if (rx_ctrl_crc) { 92 ndev->stats.rx_crc_errors++; 93 ndev->stats.rx_dropped++; 94 err = 1; 95 } 96 97 /* Check Frame length Min 64b */ 98 if (unlikely(frame_len < ETH_ZLEN)) { 99 ndev->stats.rx_length_errors++; 100 ndev->stats.rx_dropped++; 101 err = 1; 102 } 103 104 if (err) 105 goto rx_irq_clean; 106 107 /* Skb allocation */ 108 skb = netdev_alloc_skb_ip_align(ndev, frame_len); 109 if (unlikely(!skb)) { 110 ndev->stats.rx_errors++; 111 ndev->stats.rx_dropped++; 112 goto rx_irq_clean; 113 } 114 115 /* Copy frame from Rx fifo into the skb */ 116 nps_enet_read_rx_fifo(ndev, skb->data, frame_len); 117 118 skb_put(skb, frame_len); 119 skb->protocol = eth_type_trans(skb, ndev); 120 skb->ip_summed = CHECKSUM_UNNECESSARY; 121 122 ndev->stats.rx_packets++; 123 ndev->stats.rx_bytes += frame_len; 124 netif_receive_skb(skb); 125 126 goto rx_irq_frame_done; 127 128 rx_irq_clean: 129 /* Clean Rx fifo */ 130 nps_enet_clean_rx_fifo(ndev, frame_len); 131 132 rx_irq_frame_done: 133 /* Ack Rx ctrl register */ 134 nps_enet_reg_set(priv, NPS_ENET_REG_RX_CTL, 0); 135 136 return work_done; 137 } 138 139 static void nps_enet_tx_handler(struct net_device *ndev) 140 { 141 struct nps_enet_priv *priv = netdev_priv(ndev); 142 u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); 143 u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; 144 u32 tx_ctrl_et = (tx_ctrl_value & TX_CTL_ET_MASK) >> TX_CTL_ET_SHIFT; 145 u32 tx_ctrl_nt = (tx_ctrl_value & TX_CTL_NT_MASK) >> TX_CTL_NT_SHIFT; 146 147 /* Check if we got TX */ 148 if (!priv->tx_skb || tx_ctrl_ct) 149 return; 150 151 /* Ack Tx ctrl register */ 152 nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, 0); 153 154 /* Check Tx transmit error */ 155 if (unlikely(tx_ctrl_et)) { 156 ndev->stats.tx_errors++; 157 } else { 158 ndev->stats.tx_packets++; 159 ndev->stats.tx_bytes += tx_ctrl_nt; 160 } 161 162 dev_kfree_skb(priv->tx_skb); 163 priv->tx_skb = NULL; 164 165 if (netif_queue_stopped(ndev)) 166 netif_wake_queue(ndev); 167 } 168 169 /** 170 * nps_enet_poll - NAPI poll handler. 171 * @napi: Pointer to napi_struct structure. 172 * @budget: How many frames to process on one call. 173 * 174 * returns: Number of processed frames 175 */ 176 static int nps_enet_poll(struct napi_struct *napi, int budget) 177 { 178 struct net_device *ndev = napi->dev; 179 struct nps_enet_priv *priv = netdev_priv(ndev); 180 u32 work_done; 181 182 nps_enet_tx_handler(ndev); 183 work_done = nps_enet_rx_handler(ndev); 184 if (work_done < budget) { 185 u32 buf_int_enable_value = 0; 186 u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); 187 u32 tx_ctrl_ct = 188 (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; 189 190 napi_complete(napi); 191 192 /* set tx_done and rx_rdy bits */ 193 buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT; 194 buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT; 195 196 nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 197 buf_int_enable_value); 198 199 /* in case we will get a tx interrupt while interrupts 200 * are masked, we will lose it since the tx is edge interrupt. 201 * specifically, while executing the code section above, 202 * between nps_enet_tx_handler and the interrupts enable, all 203 * tx requests will be stuck until we will get an rx interrupt. 204 * the two code lines below will solve this situation by 205 * re-adding ourselves to the poll list. 206 */ 207 208 if (priv->tx_skb && !tx_ctrl_ct) 209 napi_reschedule(napi); 210 } 211 212 return work_done; 213 } 214 215 /** 216 * nps_enet_irq_handler - Global interrupt handler for ENET. 217 * @irq: irq number. 218 * @dev_instance: device instance. 219 * 220 * returns: IRQ_HANDLED for all cases. 221 * 222 * EZchip ENET has 2 interrupt causes, and depending on bits raised in 223 * CTRL registers we may tell what is a reason for interrupt to fire up. 224 * We got one for RX and the other for TX (completion). 225 */ 226 static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance) 227 { 228 struct net_device *ndev = dev_instance; 229 struct nps_enet_priv *priv = netdev_priv(ndev); 230 u32 rx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL); 231 u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); 232 u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; 233 u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT; 234 235 if ((!tx_ctrl_ct && priv->tx_skb) || rx_ctrl_cr) 236 if (likely(napi_schedule_prep(&priv->napi))) { 237 nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0); 238 __napi_schedule(&priv->napi); 239 } 240 241 return IRQ_HANDLED; 242 } 243 244 static void nps_enet_set_hw_mac_address(struct net_device *ndev) 245 { 246 struct nps_enet_priv *priv = netdev_priv(ndev); 247 u32 ge_mac_cfg_1_value = 0; 248 u32 *ge_mac_cfg_2_value = &priv->ge_mac_cfg_2_value; 249 250 /* set MAC address in HW */ 251 ge_mac_cfg_1_value |= ndev->dev_addr[0] << CFG_1_OCTET_0_SHIFT; 252 ge_mac_cfg_1_value |= ndev->dev_addr[1] << CFG_1_OCTET_1_SHIFT; 253 ge_mac_cfg_1_value |= ndev->dev_addr[2] << CFG_1_OCTET_2_SHIFT; 254 ge_mac_cfg_1_value |= ndev->dev_addr[3] << CFG_1_OCTET_3_SHIFT; 255 *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_OCTET_4_MASK) 256 | ndev->dev_addr[4] << CFG_2_OCTET_4_SHIFT; 257 *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_OCTET_5_MASK) 258 | ndev->dev_addr[5] << CFG_2_OCTET_5_SHIFT; 259 260 nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_1, 261 ge_mac_cfg_1_value); 262 263 nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2, 264 *ge_mac_cfg_2_value); 265 } 266 267 /** 268 * nps_enet_hw_reset - Reset the network device. 269 * @ndev: Pointer to the network device. 270 * 271 * This function reset the PCS and TX fifo. 272 * The programming model is to set the relevant reset bits 273 * wait for some time for this to propagate and then unset 274 * the reset bits. This way we ensure that reset procedure 275 * is done successfully by device. 276 */ 277 static void nps_enet_hw_reset(struct net_device *ndev) 278 { 279 struct nps_enet_priv *priv = netdev_priv(ndev); 280 u32 ge_rst_value = 0, phase_fifo_ctl_value = 0; 281 282 /* Pcs reset sequence*/ 283 ge_rst_value |= NPS_ENET_ENABLE << RST_GMAC_0_SHIFT; 284 nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value); 285 usleep_range(10, 20); 286 nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value); 287 288 /* Tx fifo reset sequence */ 289 phase_fifo_ctl_value |= NPS_ENET_ENABLE << PHASE_FIFO_CTL_RST_SHIFT; 290 phase_fifo_ctl_value |= NPS_ENET_ENABLE << PHASE_FIFO_CTL_INIT_SHIFT; 291 nps_enet_reg_set(priv, NPS_ENET_REG_PHASE_FIFO_CTL, 292 phase_fifo_ctl_value); 293 usleep_range(10, 20); 294 phase_fifo_ctl_value = 0; 295 nps_enet_reg_set(priv, NPS_ENET_REG_PHASE_FIFO_CTL, 296 phase_fifo_ctl_value); 297 } 298 299 static void nps_enet_hw_enable_control(struct net_device *ndev) 300 { 301 struct nps_enet_priv *priv = netdev_priv(ndev); 302 u32 ge_mac_cfg_0_value = 0, buf_int_enable_value = 0; 303 u32 *ge_mac_cfg_2_value = &priv->ge_mac_cfg_2_value; 304 u32 *ge_mac_cfg_3_value = &priv->ge_mac_cfg_3_value; 305 s32 max_frame_length; 306 307 /* Enable Rx and Tx statistics */ 308 *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_STAT_EN_MASK) 309 | NPS_ENET_GE_MAC_CFG_2_STAT_EN << CFG_2_STAT_EN_SHIFT; 310 311 /* Discard packets with different MAC address */ 312 *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_DISK_DA_MASK) 313 | NPS_ENET_ENABLE << CFG_2_DISK_DA_SHIFT; 314 315 /* Discard multicast packets */ 316 *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_DISK_MC_MASK) 317 | NPS_ENET_ENABLE << CFG_2_DISK_MC_SHIFT; 318 319 nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2, 320 *ge_mac_cfg_2_value); 321 322 /* Discard Packets bigger than max frame length */ 323 max_frame_length = ETH_HLEN + ndev->mtu + ETH_FCS_LEN; 324 if (max_frame_length <= NPS_ENET_MAX_FRAME_LENGTH) { 325 *ge_mac_cfg_3_value = 326 (*ge_mac_cfg_3_value & ~CFG_3_MAX_LEN_MASK) 327 | max_frame_length << CFG_3_MAX_LEN_SHIFT; 328 } 329 330 /* Enable interrupts */ 331 buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT; 332 buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT; 333 nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 334 buf_int_enable_value); 335 336 /* Write device MAC address to HW */ 337 nps_enet_set_hw_mac_address(ndev); 338 339 /* Rx and Tx HW features */ 340 ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_PAD_EN_SHIFT; 341 ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_CRC_EN_SHIFT; 342 ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_CRC_STRIP_SHIFT; 343 344 /* IFG configuration */ 345 ge_mac_cfg_0_value |= 346 NPS_ENET_GE_MAC_CFG_0_RX_IFG << CFG_0_RX_IFG_SHIFT; 347 ge_mac_cfg_0_value |= 348 NPS_ENET_GE_MAC_CFG_0_TX_IFG << CFG_0_TX_IFG_SHIFT; 349 350 /* preamble configuration */ 351 ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_PR_CHECK_EN_SHIFT; 352 ge_mac_cfg_0_value |= 353 NPS_ENET_GE_MAC_CFG_0_TX_PR_LEN << CFG_0_TX_PR_LEN_SHIFT; 354 355 /* enable flow control frames */ 356 ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_FC_EN_SHIFT; 357 ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_FC_EN_SHIFT; 358 ge_mac_cfg_0_value |= 359 NPS_ENET_GE_MAC_CFG_0_TX_FC_RETR << CFG_0_TX_FC_RETR_SHIFT; 360 *ge_mac_cfg_3_value = (*ge_mac_cfg_3_value & ~CFG_3_CF_DROP_MASK) 361 | NPS_ENET_ENABLE << CFG_3_CF_DROP_SHIFT; 362 363 /* Enable Rx and Tx */ 364 ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_EN_SHIFT; 365 ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_EN_SHIFT; 366 367 nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_3, 368 *ge_mac_cfg_3_value); 369 nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_0, 370 ge_mac_cfg_0_value); 371 } 372 373 static void nps_enet_hw_disable_control(struct net_device *ndev) 374 { 375 struct nps_enet_priv *priv = netdev_priv(ndev); 376 377 /* Disable interrupts */ 378 nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0); 379 380 /* Disable Rx and Tx */ 381 nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_0, 0); 382 } 383 384 static void nps_enet_send_frame(struct net_device *ndev, 385 struct sk_buff *skb) 386 { 387 struct nps_enet_priv *priv = netdev_priv(ndev); 388 u32 tx_ctrl_value = 0; 389 short length = skb->len; 390 u32 i, len = DIV_ROUND_UP(length, sizeof(u32)); 391 u32 *src = (void *)skb->data; 392 bool src_is_aligned = IS_ALIGNED((unsigned long)src, sizeof(u32)); 393 394 /* In case src is not aligned we need an intermediate buffer */ 395 if (src_is_aligned) 396 iowrite32_rep(priv->regs_base + NPS_ENET_REG_TX_BUF, src, len); 397 else /* !src_is_aligned */ 398 for (i = 0; i < len; i++, src++) 399 nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, 400 get_unaligned_be32(src)); 401 402 /* Write the length of the Frame */ 403 tx_ctrl_value |= length << TX_CTL_NT_SHIFT; 404 405 tx_ctrl_value |= NPS_ENET_ENABLE << TX_CTL_CT_SHIFT; 406 /* Send Frame */ 407 nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, tx_ctrl_value); 408 } 409 410 /** 411 * nps_enet_set_mac_address - Set the MAC address for this device. 412 * @ndev: Pointer to net_device structure. 413 * @p: 6 byte Address to be written as MAC address. 414 * 415 * This function copies the HW address from the sockaddr structure to the 416 * net_device structure and updates the address in HW. 417 * 418 * returns: -EBUSY if the net device is busy or 0 if the address is set 419 * successfully. 420 */ 421 static s32 nps_enet_set_mac_address(struct net_device *ndev, void *p) 422 { 423 struct sockaddr *addr = p; 424 s32 res; 425 426 if (netif_running(ndev)) 427 return -EBUSY; 428 429 res = eth_mac_addr(ndev, p); 430 if (!res) { 431 ether_addr_copy(ndev->dev_addr, addr->sa_data); 432 nps_enet_set_hw_mac_address(ndev); 433 } 434 435 return res; 436 } 437 438 /** 439 * nps_enet_set_rx_mode - Change the receive filtering mode. 440 * @ndev: Pointer to the network device. 441 * 442 * This function enables/disables promiscuous mode 443 */ 444 static void nps_enet_set_rx_mode(struct net_device *ndev) 445 { 446 struct nps_enet_priv *priv = netdev_priv(ndev); 447 u32 ge_mac_cfg_2_value = priv->ge_mac_cfg_2_value; 448 449 if (ndev->flags & IFF_PROMISC) { 450 ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_DA_MASK) 451 | NPS_ENET_DISABLE << CFG_2_DISK_DA_SHIFT; 452 ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_MC_MASK) 453 | NPS_ENET_DISABLE << CFG_2_DISK_MC_SHIFT; 454 455 } else { 456 ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_DA_MASK) 457 | NPS_ENET_ENABLE << CFG_2_DISK_DA_SHIFT; 458 ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_MC_MASK) 459 | NPS_ENET_ENABLE << CFG_2_DISK_MC_SHIFT; 460 461 } 462 463 nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2, ge_mac_cfg_2_value); 464 } 465 466 /** 467 * nps_enet_open - Open the network device. 468 * @ndev: Pointer to the network device. 469 * 470 * returns: 0, on success or non-zero error value on failure. 471 * 472 * This function sets the MAC address, requests and enables an IRQ 473 * for the ENET device and starts the Tx queue. 474 */ 475 static s32 nps_enet_open(struct net_device *ndev) 476 { 477 struct nps_enet_priv *priv = netdev_priv(ndev); 478 s32 err; 479 480 /* Reset private variables */ 481 priv->tx_skb = NULL; 482 priv->ge_mac_cfg_2_value = 0; 483 priv->ge_mac_cfg_3_value = 0; 484 485 /* ge_mac_cfg_3 default values */ 486 priv->ge_mac_cfg_3_value |= 487 NPS_ENET_GE_MAC_CFG_3_RX_IFG_TH << CFG_3_RX_IFG_TH_SHIFT; 488 489 priv->ge_mac_cfg_3_value |= 490 NPS_ENET_GE_MAC_CFG_3_MAX_LEN << CFG_3_MAX_LEN_SHIFT; 491 492 /* Disable HW device */ 493 nps_enet_hw_disable_control(ndev); 494 495 /* irq Rx allocation */ 496 err = request_irq(priv->irq, nps_enet_irq_handler, 497 0, "enet-rx-tx", ndev); 498 if (err) 499 return err; 500 501 napi_enable(&priv->napi); 502 503 /* Enable HW device */ 504 nps_enet_hw_reset(ndev); 505 nps_enet_hw_enable_control(ndev); 506 507 netif_start_queue(ndev); 508 509 return 0; 510 } 511 512 /** 513 * nps_enet_stop - Close the network device. 514 * @ndev: Pointer to the network device. 515 * 516 * This function stops the Tx queue, disables interrupts for the ENET device. 517 */ 518 static s32 nps_enet_stop(struct net_device *ndev) 519 { 520 struct nps_enet_priv *priv = netdev_priv(ndev); 521 522 napi_disable(&priv->napi); 523 netif_stop_queue(ndev); 524 nps_enet_hw_disable_control(ndev); 525 free_irq(priv->irq, ndev); 526 527 return 0; 528 } 529 530 /** 531 * nps_enet_start_xmit - Starts the data transmission. 532 * @skb: sk_buff pointer that contains data to be Transmitted. 533 * @ndev: Pointer to net_device structure. 534 * 535 * returns: NETDEV_TX_OK, on success 536 * NETDEV_TX_BUSY, if any of the descriptors are not free. 537 * 538 * This function is invoked from upper layers to initiate transmission. 539 */ 540 static netdev_tx_t nps_enet_start_xmit(struct sk_buff *skb, 541 struct net_device *ndev) 542 { 543 struct nps_enet_priv *priv = netdev_priv(ndev); 544 545 /* This driver handles one frame at a time */ 546 netif_stop_queue(ndev); 547 548 priv->tx_skb = skb; 549 550 /* make sure tx_skb is actually written to the memory 551 * before the HW is informed and the IRQ is fired. 552 */ 553 wmb(); 554 555 nps_enet_send_frame(ndev, skb); 556 557 return NETDEV_TX_OK; 558 } 559 560 #ifdef CONFIG_NET_POLL_CONTROLLER 561 static void nps_enet_poll_controller(struct net_device *ndev) 562 { 563 disable_irq(ndev->irq); 564 nps_enet_irq_handler(ndev->irq, ndev); 565 enable_irq(ndev->irq); 566 } 567 #endif 568 569 static const struct net_device_ops nps_netdev_ops = { 570 .ndo_open = nps_enet_open, 571 .ndo_stop = nps_enet_stop, 572 .ndo_start_xmit = nps_enet_start_xmit, 573 .ndo_set_mac_address = nps_enet_set_mac_address, 574 .ndo_set_rx_mode = nps_enet_set_rx_mode, 575 #ifdef CONFIG_NET_POLL_CONTROLLER 576 .ndo_poll_controller = nps_enet_poll_controller, 577 #endif 578 }; 579 580 static s32 nps_enet_probe(struct platform_device *pdev) 581 { 582 struct device *dev = &pdev->dev; 583 struct net_device *ndev; 584 struct nps_enet_priv *priv; 585 s32 err = 0; 586 const char *mac_addr; 587 struct resource *res_regs; 588 589 if (!dev->of_node) 590 return -ENODEV; 591 592 ndev = alloc_etherdev(sizeof(struct nps_enet_priv)); 593 if (!ndev) 594 return -ENOMEM; 595 596 platform_set_drvdata(pdev, ndev); 597 SET_NETDEV_DEV(ndev, dev); 598 priv = netdev_priv(ndev); 599 600 /* The EZ NET specific entries in the device structure. */ 601 ndev->netdev_ops = &nps_netdev_ops; 602 ndev->watchdog_timeo = (400 * HZ / 1000); 603 /* FIXME :: no multicast support yet */ 604 ndev->flags &= ~IFF_MULTICAST; 605 606 res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 607 priv->regs_base = devm_ioremap_resource(dev, res_regs); 608 if (IS_ERR(priv->regs_base)) { 609 err = PTR_ERR(priv->regs_base); 610 goto out_netdev; 611 } 612 dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs_base); 613 614 /* set kernel MAC address to dev */ 615 mac_addr = of_get_mac_address(dev->of_node); 616 if (mac_addr) 617 ether_addr_copy(ndev->dev_addr, mac_addr); 618 else 619 eth_hw_addr_random(ndev); 620 621 /* Get IRQ number */ 622 priv->irq = platform_get_irq(pdev, 0); 623 if (!priv->irq) { 624 dev_err(dev, "failed to retrieve <irq Rx-Tx> value from device tree\n"); 625 err = -ENODEV; 626 goto out_netdev; 627 } 628 629 netif_napi_add(ndev, &priv->napi, nps_enet_poll, 630 NPS_ENET_NAPI_POLL_WEIGHT); 631 632 /* Register the driver. Should be the last thing in probe */ 633 err = register_netdev(ndev); 634 if (err) { 635 dev_err(dev, "Failed to register ndev for %s, err = 0x%08x\n", 636 ndev->name, (s32)err); 637 goto out_netif_api; 638 } 639 640 dev_info(dev, "(rx/tx=%d)\n", priv->irq); 641 return 0; 642 643 out_netif_api: 644 netif_napi_del(&priv->napi); 645 out_netdev: 646 if (err) 647 free_netdev(ndev); 648 649 return err; 650 } 651 652 static s32 nps_enet_remove(struct platform_device *pdev) 653 { 654 struct net_device *ndev = platform_get_drvdata(pdev); 655 struct nps_enet_priv *priv = netdev_priv(ndev); 656 657 unregister_netdev(ndev); 658 free_netdev(ndev); 659 netif_napi_del(&priv->napi); 660 661 return 0; 662 } 663 664 static const struct of_device_id nps_enet_dt_ids[] = { 665 { .compatible = "ezchip,nps-mgt-enet" }, 666 { /* Sentinel */ } 667 }; 668 669 static struct platform_driver nps_enet_driver = { 670 .probe = nps_enet_probe, 671 .remove = nps_enet_remove, 672 .driver = { 673 .name = DRV_NAME, 674 .of_match_table = nps_enet_dt_ids, 675 }, 676 }; 677 678 module_platform_driver(nps_enet_driver); 679 680 MODULE_AUTHOR("EZchip Semiconductor"); 681 MODULE_LICENSE("GPL v2"); 682