1 /* 2 * drivers/net/ethernet/beckhoff/ec_bhf.c 3 * 4 * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl> 5 * 6 * This software is licensed under the terms of the GNU General Public 7 * License version 2, as published by the Free Software Foundation, and 8 * may be copied, distributed, and modified under those terms. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 */ 16 17 /* This is a driver for EtherCAT master module present on CCAT FPGA. 18 * Those can be found on Bechhoff CX50xx industrial PCs. 19 */ 20 21 #if 0 22 #define DEBUG 23 #endif 24 #include <linux/kernel.h> 25 #include <linux/module.h> 26 #include <linux/moduleparam.h> 27 #include <linux/pci.h> 28 #include <linux/init.h> 29 30 #include <linux/netdevice.h> 31 #include <linux/etherdevice.h> 32 #include <linux/ip.h> 33 #include <linux/skbuff.h> 34 #include <linux/hrtimer.h> 35 #include <linux/interrupt.h> 36 #include <linux/stat.h> 37 38 #define TIMER_INTERVAL_NSEC 20000 39 40 #define INFO_BLOCK_SIZE 0x10 41 #define INFO_BLOCK_TYPE 0x0 42 #define INFO_BLOCK_REV 0x2 43 #define INFO_BLOCK_BLK_CNT 0x4 44 #define INFO_BLOCK_TX_CHAN 0x4 45 #define INFO_BLOCK_RX_CHAN 0x5 46 #define INFO_BLOCK_OFFSET 0x8 47 48 #define EC_MII_OFFSET 0x4 49 #define EC_FIFO_OFFSET 0x8 50 #define EC_MAC_OFFSET 0xc 51 52 #define MAC_FRAME_ERR_CNT 0x0 53 #define MAC_RX_ERR_CNT 0x1 54 #define MAC_CRC_ERR_CNT 0x2 55 #define MAC_LNK_LST_ERR_CNT 0x3 56 #define MAC_TX_FRAME_CNT 0x10 57 #define MAC_RX_FRAME_CNT 0x14 58 #define MAC_TX_FIFO_LVL 0x20 59 #define MAC_DROPPED_FRMS 0x28 60 #define MAC_CONNECTED_CCAT_FLAG 0x78 61 62 #define MII_MAC_ADDR 0x8 63 #define MII_MAC_FILT_FLAG 0xe 64 #define MII_LINK_STATUS 0xf 65 66 #define FIFO_TX_REG 0x0 67 #define FIFO_TX_RESET 0x8 68 #define FIFO_RX_REG 0x10 69 #define FIFO_RX_ADDR_VALID (1u << 31) 70 #define FIFO_RX_RESET 0x18 71 72 #define DMA_CHAN_OFFSET 0x1000 73 #define DMA_CHAN_SIZE 0x8 74 75 #define DMA_WINDOW_SIZE_MASK 0xfffffffc 76 77 static struct pci_device_id ids[] = { 78 { PCI_DEVICE(0x15ec, 0x5000), }, 79 { 0, } 80 }; 81 MODULE_DEVICE_TABLE(pci, ids); 82 83 struct rx_header { 84 #define RXHDR_NEXT_ADDR_MASK 0xffffffu 85 #define RXHDR_NEXT_VALID (1u << 31) 86 __le32 next; 87 #define RXHDR_NEXT_RECV_FLAG 0x1 88 __le32 recv; 89 #define RXHDR_LEN_MASK 0xfffu 90 __le16 len; 91 __le16 port; 92 __le32 reserved; 93 u8 timestamp[8]; 94 } __packed; 95 96 #define PKT_PAYLOAD_SIZE 0x7e8 97 struct rx_desc { 98 struct rx_header header; 99 u8 data[PKT_PAYLOAD_SIZE]; 100 } __packed; 101 102 struct tx_header { 103 __le16 len; 104 #define TX_HDR_PORT_0 0x1 105 #define TX_HDR_PORT_1 0x2 106 u8 port; 107 u8 ts_enable; 108 #define TX_HDR_SENT 0x1 109 __le32 sent; 110 u8 timestamp[8]; 111 } __packed; 112 113 struct tx_desc { 114 struct tx_header header; 115 u8 data[PKT_PAYLOAD_SIZE]; 116 } __packed; 117 118 #define FIFO_SIZE 64 119 120 static long polling_frequency = TIMER_INTERVAL_NSEC; 121 122 struct bhf_dma { 123 u8 *buf; 124 size_t len; 125 dma_addr_t buf_phys; 126 127 u8 *alloc; 128 size_t alloc_len; 129 dma_addr_t alloc_phys; 130 }; 131 132 struct ec_bhf_priv { 133 struct net_device *net_dev; 134 135 struct pci_dev *dev; 136 137 void __iomem *io; 138 void __iomem *dma_io; 139 140 struct hrtimer hrtimer; 141 142 int tx_dma_chan; 143 int rx_dma_chan; 144 void __iomem *ec_io; 145 void __iomem *fifo_io; 146 void __iomem *mii_io; 147 void __iomem *mac_io; 148 149 struct bhf_dma rx_buf; 150 struct rx_desc *rx_descs; 151 int rx_dnext; 152 int rx_dcount; 153 154 struct bhf_dma tx_buf; 155 struct tx_desc *tx_descs; 156 int tx_dcount; 157 int tx_dnext; 158 159 u64 stat_rx_bytes; 160 u64 stat_tx_bytes; 161 }; 162 163 #define PRIV_TO_DEV(priv) (&(priv)->dev->dev) 164 165 #define ETHERCAT_MASTER_ID 0x14 166 167 static void ec_bhf_print_status(struct ec_bhf_priv *priv) 168 { 169 struct device *dev = PRIV_TO_DEV(priv); 170 171 dev_dbg(dev, "Frame error counter: %d\n", 172 ioread8(priv->mac_io + MAC_FRAME_ERR_CNT)); 173 dev_dbg(dev, "RX error counter: %d\n", 174 ioread8(priv->mac_io + MAC_RX_ERR_CNT)); 175 dev_dbg(dev, "CRC error counter: %d\n", 176 ioread8(priv->mac_io + MAC_CRC_ERR_CNT)); 177 dev_dbg(dev, "TX frame counter: %d\n", 178 ioread32(priv->mac_io + MAC_TX_FRAME_CNT)); 179 dev_dbg(dev, "RX frame counter: %d\n", 180 ioread32(priv->mac_io + MAC_RX_FRAME_CNT)); 181 dev_dbg(dev, "TX fifo level: %d\n", 182 ioread8(priv->mac_io + MAC_TX_FIFO_LVL)); 183 dev_dbg(dev, "Dropped frames: %d\n", 184 ioread8(priv->mac_io + MAC_DROPPED_FRMS)); 185 dev_dbg(dev, "Connected with CCAT slot: %d\n", 186 ioread8(priv->mac_io + MAC_CONNECTED_CCAT_FLAG)); 187 dev_dbg(dev, "Link status: %d\n", 188 ioread8(priv->mii_io + MII_LINK_STATUS)); 189 } 190 191 static void ec_bhf_reset(struct ec_bhf_priv *priv) 192 { 193 iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT); 194 iowrite8(0, priv->mac_io + MAC_RX_ERR_CNT); 195 iowrite8(0, priv->mac_io + MAC_CRC_ERR_CNT); 196 iowrite8(0, priv->mac_io + MAC_LNK_LST_ERR_CNT); 197 iowrite32(0, priv->mac_io + MAC_TX_FRAME_CNT); 198 iowrite32(0, priv->mac_io + MAC_RX_FRAME_CNT); 199 iowrite8(0, priv->mac_io + MAC_DROPPED_FRMS); 200 201 iowrite8(0, priv->fifo_io + FIFO_TX_RESET); 202 iowrite8(0, priv->fifo_io + FIFO_RX_RESET); 203 204 iowrite8(0, priv->mac_io + MAC_TX_FIFO_LVL); 205 } 206 207 static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc) 208 { 209 u32 len = le16_to_cpu(desc->header.len) + sizeof(desc->header); 210 u32 addr = (u8 *)desc - priv->tx_buf.buf; 211 212 iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG); 213 214 dev_dbg(PRIV_TO_DEV(priv), "Done sending packet\n"); 215 } 216 217 static int ec_bhf_desc_sent(struct tx_desc *desc) 218 { 219 return le32_to_cpu(desc->header.sent) & TX_HDR_SENT; 220 } 221 222 static void ec_bhf_process_tx(struct ec_bhf_priv *priv) 223 { 224 if (unlikely(netif_queue_stopped(priv->net_dev))) { 225 /* Make sure that we perceive changes to tx_dnext. */ 226 smp_rmb(); 227 228 if (ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) 229 netif_wake_queue(priv->net_dev); 230 } 231 } 232 233 static int ec_bhf_pkt_received(struct rx_desc *desc) 234 { 235 return le32_to_cpu(desc->header.recv) & RXHDR_NEXT_RECV_FLAG; 236 } 237 238 static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc) 239 { 240 iowrite32(FIFO_RX_ADDR_VALID | ((u8 *)(desc) - priv->rx_buf.buf), 241 priv->fifo_io + FIFO_RX_REG); 242 } 243 244 static void ec_bhf_process_rx(struct ec_bhf_priv *priv) 245 { 246 struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext]; 247 struct device *dev = PRIV_TO_DEV(priv); 248 249 while (ec_bhf_pkt_received(desc)) { 250 int pkt_size = (le16_to_cpu(desc->header.len) & 251 RXHDR_LEN_MASK) - sizeof(struct rx_header) - 4; 252 u8 *data = desc->data; 253 struct sk_buff *skb; 254 255 skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size); 256 dev_dbg(dev, "Received packet, size: %d\n", pkt_size); 257 258 if (skb) { 259 memcpy(skb_put(skb, pkt_size), data, pkt_size); 260 skb->protocol = eth_type_trans(skb, priv->net_dev); 261 dev_dbg(dev, "Protocol type: %x\n", skb->protocol); 262 263 priv->stat_rx_bytes += pkt_size; 264 265 netif_rx(skb); 266 } else { 267 dev_err_ratelimited(dev, 268 "Couldn't allocate a skb_buff for a packet of size %u\n", 269 pkt_size); 270 } 271 272 desc->header.recv = 0; 273 274 ec_bhf_add_rx_desc(priv, desc); 275 276 priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount; 277 desc = &priv->rx_descs[priv->rx_dnext]; 278 } 279 280 } 281 282 static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer) 283 { 284 struct ec_bhf_priv *priv = container_of(timer, struct ec_bhf_priv, 285 hrtimer); 286 ec_bhf_process_rx(priv); 287 ec_bhf_process_tx(priv); 288 289 if (!netif_running(priv->net_dev)) 290 return HRTIMER_NORESTART; 291 292 hrtimer_forward_now(timer, ktime_set(0, polling_frequency)); 293 return HRTIMER_RESTART; 294 } 295 296 static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv) 297 { 298 struct device *dev = PRIV_TO_DEV(priv); 299 unsigned block_count, i; 300 void __iomem *ec_info; 301 302 dev_dbg(dev, "Info block:\n"); 303 dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io)); 304 dev_dbg(dev, "Revision of function: %x\n", 305 (unsigned)ioread16(priv->io + INFO_BLOCK_REV)); 306 307 block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT); 308 dev_dbg(dev, "Number of function blocks: %x\n", block_count); 309 310 for (i = 0; i < block_count; i++) { 311 u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE + 312 INFO_BLOCK_TYPE); 313 if (type == ETHERCAT_MASTER_ID) 314 break; 315 } 316 if (i == block_count) { 317 dev_err(dev, "EtherCAT master with DMA block not found\n"); 318 return -ENODEV; 319 } 320 dev_dbg(dev, "EtherCAT master with DMA block found at pos: %d\n", i); 321 322 ec_info = priv->io + i * INFO_BLOCK_SIZE; 323 dev_dbg(dev, "EtherCAT master revision: %d\n", 324 ioread16(ec_info + INFO_BLOCK_REV)); 325 326 priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN); 327 dev_dbg(dev, "EtherCAT master tx dma channel: %d\n", 328 priv->tx_dma_chan); 329 330 priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN); 331 dev_dbg(dev, "EtherCAT master rx dma channel: %d\n", 332 priv->rx_dma_chan); 333 334 priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET); 335 priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET); 336 priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET); 337 priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET); 338 339 dev_dbg(dev, 340 "EtherCAT block addres: %p, fifo address: %p, mii address: %p, mac address: %p\n", 341 priv->ec_io, priv->fifo_io, priv->mii_io, priv->mac_io); 342 343 return 0; 344 } 345 346 static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb, 347 struct net_device *net_dev) 348 { 349 struct ec_bhf_priv *priv = netdev_priv(net_dev); 350 struct tx_desc *desc; 351 unsigned len; 352 353 dev_dbg(PRIV_TO_DEV(priv), "Starting xmit\n"); 354 355 desc = &priv->tx_descs[priv->tx_dnext]; 356 357 skb_copy_and_csum_dev(skb, desc->data); 358 len = skb->len; 359 360 memset(&desc->header, 0, sizeof(desc->header)); 361 desc->header.len = cpu_to_le16(len); 362 desc->header.port = TX_HDR_PORT_0; 363 364 ec_bhf_send_packet(priv, desc); 365 366 priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount; 367 368 if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) { 369 /* Make sure that update updates to tx_dnext are perceived 370 * by timer routine. 371 */ 372 smp_wmb(); 373 374 netif_stop_queue(net_dev); 375 376 dev_dbg(PRIV_TO_DEV(priv), "Stopping netif queue\n"); 377 ec_bhf_print_status(priv); 378 } 379 380 priv->stat_tx_bytes += len; 381 382 dev_kfree_skb(skb); 383 384 return NETDEV_TX_OK; 385 } 386 387 static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv, 388 struct bhf_dma *buf, 389 int channel, 390 int size) 391 { 392 int offset = channel * DMA_CHAN_SIZE + DMA_CHAN_OFFSET; 393 struct device *dev = PRIV_TO_DEV(priv); 394 u32 mask; 395 396 iowrite32(0xffffffff, priv->dma_io + offset); 397 398 mask = ioread32(priv->dma_io + offset); 399 mask &= DMA_WINDOW_SIZE_MASK; 400 dev_dbg(dev, "Read mask %x for channel %d\n", mask, channel); 401 402 /* We want to allocate a chunk of memory that is: 403 * - aligned to the mask we just read 404 * - is of size 2^mask bytes (at most) 405 * In order to ensure that we will allocate buffer of 406 * 2 * 2^mask bytes. 407 */ 408 buf->len = min_t(int, ~mask + 1, size); 409 buf->alloc_len = 2 * buf->len; 410 411 dev_dbg(dev, "Allocating %d bytes for channel %d", 412 (int)buf->alloc_len, channel); 413 buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys, 414 GFP_KERNEL); 415 if (buf->alloc == NULL) { 416 dev_info(dev, "Failed to allocate buffer\n"); 417 return -ENOMEM; 418 } 419 420 buf->buf_phys = (buf->alloc_phys + buf->len) & mask; 421 buf->buf = buf->alloc + (buf->buf_phys - buf->alloc_phys); 422 423 iowrite32(0, priv->dma_io + offset + 4); 424 iowrite32(buf->buf_phys, priv->dma_io + offset); 425 dev_dbg(dev, "Buffer: %x and read from dev: %x", 426 (unsigned)buf->buf_phys, ioread32(priv->dma_io + offset)); 427 428 return 0; 429 } 430 431 static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv) 432 { 433 int i = 0; 434 435 priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc); 436 priv->tx_descs = (struct tx_desc *) priv->tx_buf.buf; 437 priv->tx_dnext = 0; 438 439 for (i = 0; i < priv->tx_dcount; i++) 440 priv->tx_descs[i].header.sent = cpu_to_le32(TX_HDR_SENT); 441 } 442 443 static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv) 444 { 445 int i; 446 447 priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc); 448 priv->rx_descs = (struct rx_desc *) priv->rx_buf.buf; 449 priv->rx_dnext = 0; 450 451 for (i = 0; i < priv->rx_dcount; i++) { 452 struct rx_desc *desc = &priv->rx_descs[i]; 453 u32 next; 454 455 if (i != priv->rx_dcount - 1) 456 next = (u8 *)(desc + 1) - priv->rx_buf.buf; 457 else 458 next = 0; 459 next |= RXHDR_NEXT_VALID; 460 desc->header.next = cpu_to_le32(next); 461 desc->header.recv = 0; 462 ec_bhf_add_rx_desc(priv, desc); 463 } 464 } 465 466 static int ec_bhf_open(struct net_device *net_dev) 467 { 468 struct ec_bhf_priv *priv = netdev_priv(net_dev); 469 struct device *dev = PRIV_TO_DEV(priv); 470 int err = 0; 471 472 dev_info(dev, "Opening device\n"); 473 474 ec_bhf_reset(priv); 475 476 err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan, 477 FIFO_SIZE * sizeof(struct rx_desc)); 478 if (err) { 479 dev_err(dev, "Failed to allocate rx buffer\n"); 480 goto out; 481 } 482 ec_bhf_setup_rx_descs(priv); 483 484 dev_info(dev, "RX buffer allocated, address: %x\n", 485 (unsigned)priv->rx_buf.buf_phys); 486 487 err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan, 488 FIFO_SIZE * sizeof(struct tx_desc)); 489 if (err) { 490 dev_err(dev, "Failed to allocate tx buffer\n"); 491 goto error_rx_free; 492 } 493 dev_dbg(dev, "TX buffer allocated, addres: %x\n", 494 (unsigned)priv->tx_buf.buf_phys); 495 496 iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG); 497 498 ec_bhf_setup_tx_descs(priv); 499 500 netif_start_queue(net_dev); 501 502 hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 503 priv->hrtimer.function = ec_bhf_timer_fun; 504 hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency), 505 HRTIMER_MODE_REL); 506 507 dev_info(PRIV_TO_DEV(priv), "Device open\n"); 508 509 ec_bhf_print_status(priv); 510 511 return 0; 512 513 error_rx_free: 514 dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc, 515 priv->rx_buf.alloc_len); 516 out: 517 return err; 518 } 519 520 static int ec_bhf_stop(struct net_device *net_dev) 521 { 522 struct ec_bhf_priv *priv = netdev_priv(net_dev); 523 struct device *dev = PRIV_TO_DEV(priv); 524 525 hrtimer_cancel(&priv->hrtimer); 526 527 ec_bhf_reset(priv); 528 529 netif_tx_disable(net_dev); 530 531 dma_free_coherent(dev, priv->tx_buf.alloc_len, 532 priv->tx_buf.alloc, priv->tx_buf.alloc_phys); 533 dma_free_coherent(dev, priv->rx_buf.alloc_len, 534 priv->rx_buf.alloc, priv->rx_buf.alloc_phys); 535 536 return 0; 537 } 538 539 static struct rtnl_link_stats64 * 540 ec_bhf_get_stats(struct net_device *net_dev, 541 struct rtnl_link_stats64 *stats) 542 { 543 struct ec_bhf_priv *priv = netdev_priv(net_dev); 544 545 stats->rx_errors = ioread8(priv->mac_io + MAC_RX_ERR_CNT) + 546 ioread8(priv->mac_io + MAC_CRC_ERR_CNT) + 547 ioread8(priv->mac_io + MAC_FRAME_ERR_CNT); 548 stats->rx_packets = ioread32(priv->mac_io + MAC_RX_FRAME_CNT); 549 stats->tx_packets = ioread32(priv->mac_io + MAC_TX_FRAME_CNT); 550 stats->rx_dropped = ioread8(priv->mac_io + MAC_DROPPED_FRMS); 551 552 stats->tx_bytes = priv->stat_tx_bytes; 553 stats->rx_bytes = priv->stat_rx_bytes; 554 555 return stats; 556 } 557 558 static const struct net_device_ops ec_bhf_netdev_ops = { 559 .ndo_start_xmit = ec_bhf_start_xmit, 560 .ndo_open = ec_bhf_open, 561 .ndo_stop = ec_bhf_stop, 562 .ndo_get_stats64 = ec_bhf_get_stats, 563 .ndo_change_mtu = eth_change_mtu, 564 .ndo_validate_addr = eth_validate_addr, 565 .ndo_set_mac_address = eth_mac_addr 566 }; 567 568 static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id) 569 { 570 struct net_device *net_dev; 571 struct ec_bhf_priv *priv; 572 void __iomem *dma_io; 573 void __iomem *io; 574 int err = 0; 575 576 err = pci_enable_device(dev); 577 if (err) 578 return err; 579 580 pci_set_master(dev); 581 582 err = pci_set_dma_mask(dev, DMA_BIT_MASK(32)); 583 if (err) { 584 dev_err(&dev->dev, 585 "Required dma mask not supported, failed to initialize device\n"); 586 err = -EIO; 587 goto err_disable_dev; 588 } 589 590 err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32)); 591 if (err) { 592 dev_err(&dev->dev, 593 "Required dma mask not supported, failed to initialize device\n"); 594 goto err_disable_dev; 595 } 596 597 err = pci_request_regions(dev, "ec_bhf"); 598 if (err) { 599 dev_err(&dev->dev, "Failed to request pci memory regions\n"); 600 goto err_disable_dev; 601 } 602 603 io = pci_iomap(dev, 0, 0); 604 if (!io) { 605 dev_err(&dev->dev, "Failed to map pci card memory bar 0"); 606 err = -EIO; 607 goto err_release_regions; 608 } 609 610 dma_io = pci_iomap(dev, 2, 0); 611 if (!dma_io) { 612 dev_err(&dev->dev, "Failed to map pci card memory bar 2"); 613 err = -EIO; 614 goto err_unmap; 615 } 616 617 net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv)); 618 if (net_dev == NULL) { 619 err = -ENOMEM; 620 goto err_unmap_dma_io; 621 } 622 623 pci_set_drvdata(dev, net_dev); 624 SET_NETDEV_DEV(net_dev, &dev->dev); 625 626 net_dev->features = 0; 627 net_dev->flags |= IFF_NOARP; 628 629 net_dev->netdev_ops = &ec_bhf_netdev_ops; 630 631 priv = netdev_priv(net_dev); 632 priv->net_dev = net_dev; 633 priv->io = io; 634 priv->dma_io = dma_io; 635 priv->dev = dev; 636 637 err = ec_bhf_setup_offsets(priv); 638 if (err < 0) 639 goto err_free_net_dev; 640 641 memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6); 642 643 dev_dbg(&dev->dev, "CX5020 Ethercat master address: %pM\n", 644 net_dev->dev_addr); 645 646 err = register_netdev(net_dev); 647 if (err < 0) 648 goto err_free_net_dev; 649 650 return 0; 651 652 err_free_net_dev: 653 free_netdev(net_dev); 654 err_unmap_dma_io: 655 pci_iounmap(dev, dma_io); 656 err_unmap: 657 pci_iounmap(dev, io); 658 err_release_regions: 659 pci_release_regions(dev); 660 err_disable_dev: 661 pci_clear_master(dev); 662 pci_disable_device(dev); 663 664 return err; 665 } 666 667 static void ec_bhf_remove(struct pci_dev *dev) 668 { 669 struct net_device *net_dev = pci_get_drvdata(dev); 670 struct ec_bhf_priv *priv = netdev_priv(net_dev); 671 672 unregister_netdev(net_dev); 673 free_netdev(net_dev); 674 675 pci_iounmap(dev, priv->dma_io); 676 pci_iounmap(dev, priv->io); 677 pci_release_regions(dev); 678 pci_clear_master(dev); 679 pci_disable_device(dev); 680 } 681 682 static struct pci_driver pci_driver = { 683 .name = "ec_bhf", 684 .id_table = ids, 685 .probe = ec_bhf_probe, 686 .remove = ec_bhf_remove, 687 }; 688 689 static int __init ec_bhf_init(void) 690 { 691 return pci_register_driver(&pci_driver); 692 } 693 694 static void __exit ec_bhf_exit(void) 695 { 696 pci_unregister_driver(&pci_driver); 697 } 698 699 module_init(ec_bhf_init); 700 module_exit(ec_bhf_exit); 701 702 module_param(polling_frequency, long, S_IRUGO); 703 MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns"); 704 705 MODULE_LICENSE("GPL"); 706 MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>"); 707