1 /* 2 * Copyright 2008 Cisco Systems, Inc. All rights reserved. 3 * Copyright 2007 Nuova Systems, Inc. All rights reserved. 4 * 5 * This program is free software; you may redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; version 2 of the License. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 16 * SOFTWARE. 17 */ 18 19 #include <linux/kernel.h> 20 #include <linux/errno.h> 21 #include <linux/types.h> 22 #include <linux/pci.h> 23 #include <linux/delay.h> 24 #include <linux/if_ether.h> 25 #include "vnic_resource.h" 26 #include "vnic_devcmd.h" 27 #include "vnic_dev.h" 28 #include "vnic_stats.h" 29 30 struct vnic_res { 31 void __iomem *vaddr; 32 unsigned int count; 33 }; 34 35 struct vnic_dev { 36 void *priv; 37 struct pci_dev *pdev; 38 struct vnic_res res[RES_TYPE_MAX]; 39 enum vnic_dev_intr_mode intr_mode; 40 struct vnic_devcmd __iomem *devcmd; 41 struct vnic_devcmd_notify *notify; 42 struct vnic_devcmd_notify notify_copy; 43 dma_addr_t notify_pa; 44 u32 *linkstatus; 45 dma_addr_t linkstatus_pa; 46 struct vnic_stats *stats; 47 dma_addr_t stats_pa; 48 struct vnic_devcmd_fw_info *fw_info; 49 dma_addr_t fw_info_pa; 50 }; 51 52 #define VNIC_MAX_RES_HDR_SIZE \ 53 (sizeof(struct vnic_resource_header) + \ 54 sizeof(struct vnic_resource) * RES_TYPE_MAX) 55 #define VNIC_RES_STRIDE 128 56 57 void *vnic_dev_priv(struct vnic_dev *vdev) 58 { 59 return vdev->priv; 60 } 61 62 static int vnic_dev_discover_res(struct vnic_dev *vdev, 63 struct vnic_dev_bar *bar) 64 { 65 struct vnic_resource_header __iomem *rh; 66 struct vnic_resource __iomem *r; 67 u8 type; 68 69 if (bar->len < VNIC_MAX_RES_HDR_SIZE) { 70 printk(KERN_ERR "vNIC BAR0 res hdr length error\n"); 71 return -EINVAL; 72 } 73 74 rh = bar->vaddr; 75 if (!rh) { 76 printk(KERN_ERR "vNIC BAR0 res hdr not mem-mapped\n"); 77 return -EINVAL; 78 } 79 80 if (ioread32(&rh->magic) != VNIC_RES_MAGIC || 81 ioread32(&rh->version) != VNIC_RES_VERSION) { 82 printk(KERN_ERR "vNIC BAR0 res magic/version error " 83 "exp (%lx/%lx) curr (%x/%x)\n", 84 VNIC_RES_MAGIC, VNIC_RES_VERSION, 85 ioread32(&rh->magic), ioread32(&rh->version)); 86 return -EINVAL; 87 } 88 89 r = (struct vnic_resource __iomem *)(rh + 1); 90 91 while ((type = ioread8(&r->type)) != RES_TYPE_EOL) { 92 93 u8 bar_num = ioread8(&r->bar); 94 u32 bar_offset = ioread32(&r->bar_offset); 95 u32 count = ioread32(&r->count); 96 u32 len; 97 98 r++; 99 100 if (bar_num != 0) /* only mapping in BAR0 resources */ 101 continue; 102 103 switch (type) { 104 case RES_TYPE_WQ: 105 case RES_TYPE_RQ: 106 case RES_TYPE_CQ: 107 case RES_TYPE_INTR_CTRL: 108 /* each count is stride bytes long */ 109 len = count * VNIC_RES_STRIDE; 110 if (len + bar_offset > bar->len) { 111 printk(KERN_ERR "vNIC BAR0 resource %d " 112 "out-of-bounds, offset 0x%x + " 113 "size 0x%x > bar len 0x%lx\n", 114 type, bar_offset, 115 len, 116 bar->len); 117 return -EINVAL; 118 } 119 break; 120 case RES_TYPE_INTR_PBA_LEGACY: 121 case RES_TYPE_DEVCMD: 122 len = count; 123 break; 124 default: 125 continue; 126 } 127 128 vdev->res[type].count = count; 129 vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset; 130 } 131 132 return 0; 133 } 134 135 unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, 136 enum vnic_res_type type) 137 { 138 return vdev->res[type].count; 139 } 140 141 void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, 142 unsigned int index) 143 { 144 if (!vdev->res[type].vaddr) 145 return NULL; 146 147 switch (type) { 148 case RES_TYPE_WQ: 149 case RES_TYPE_RQ: 150 case RES_TYPE_CQ: 151 case RES_TYPE_INTR_CTRL: 152 return (char __iomem *)vdev->res[type].vaddr + 153 index * VNIC_RES_STRIDE; 154 default: 155 return (char __iomem *)vdev->res[type].vaddr; 156 } 157 } 158 159 unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring, 160 unsigned int desc_count, 161 unsigned int desc_size) 162 { 163 /* The base address of the desc rings must be 512 byte aligned. 164 * Descriptor count is aligned to groups of 32 descriptors. A 165 * count of 0 means the maximum 4096 descriptors. Descriptor 166 * size is aligned to 16 bytes. 167 */ 168 169 unsigned int count_align = 32; 170 unsigned int desc_align = 16; 171 172 ring->base_align = 512; 173 174 if (desc_count == 0) 175 desc_count = 4096; 176 177 ring->desc_count = ALIGN(desc_count, count_align); 178 179 ring->desc_size = ALIGN(desc_size, desc_align); 180 181 ring->size = ring->desc_count * ring->desc_size; 182 ring->size_unaligned = ring->size + ring->base_align; 183 184 return ring->size_unaligned; 185 } 186 187 void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring) 188 { 189 memset(ring->descs, 0, ring->size); 190 } 191 192 int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, 193 unsigned int desc_count, unsigned int desc_size) 194 { 195 vnic_dev_desc_ring_size(ring, desc_count, desc_size); 196 197 ring->descs_unaligned = pci_alloc_consistent(vdev->pdev, 198 ring->size_unaligned, 199 &ring->base_addr_unaligned); 200 201 if (!ring->descs_unaligned) { 202 printk(KERN_ERR 203 "Failed to allocate ring (size=%d), aborting\n", 204 (int)ring->size); 205 return -ENOMEM; 206 } 207 208 ring->base_addr = ALIGN(ring->base_addr_unaligned, 209 ring->base_align); 210 ring->descs = (u8 *)ring->descs_unaligned + 211 (ring->base_addr - ring->base_addr_unaligned); 212 213 vnic_dev_clear_desc_ring(ring); 214 215 ring->desc_avail = ring->desc_count - 1; 216 217 return 0; 218 } 219 220 void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring) 221 { 222 if (ring->descs) { 223 pci_free_consistent(vdev->pdev, 224 ring->size_unaligned, 225 ring->descs_unaligned, 226 ring->base_addr_unaligned); 227 ring->descs = NULL; 228 } 229 } 230 231 int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, 232 u64 *a0, u64 *a1, int wait) 233 { 234 struct vnic_devcmd __iomem *devcmd = vdev->devcmd; 235 int delay; 236 u32 status; 237 int dev_cmd_err[] = { 238 /* convert from fw's version of error.h to host's version */ 239 0, /* ERR_SUCCESS */ 240 EINVAL, /* ERR_EINVAL */ 241 EFAULT, /* ERR_EFAULT */ 242 EPERM, /* ERR_EPERM */ 243 EBUSY, /* ERR_EBUSY */ 244 }; 245 int err; 246 247 status = ioread32(&devcmd->status); 248 if (status & STAT_BUSY) { 249 printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd)); 250 return -EBUSY; 251 } 252 253 if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) { 254 writeq(*a0, &devcmd->args[0]); 255 writeq(*a1, &devcmd->args[1]); 256 wmb(); 257 } 258 259 iowrite32(cmd, &devcmd->cmd); 260 261 if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT)) 262 return 0; 263 264 for (delay = 0; delay < wait; delay++) { 265 266 udelay(100); 267 268 status = ioread32(&devcmd->status); 269 if (!(status & STAT_BUSY)) { 270 271 if (status & STAT_ERROR) { 272 err = dev_cmd_err[(int)readq(&devcmd->args[0])]; 273 printk(KERN_ERR "Error %d devcmd %d\n", 274 err, _CMD_N(cmd)); 275 return -err; 276 } 277 278 if (_CMD_DIR(cmd) & _CMD_DIR_READ) { 279 rmb(); 280 *a0 = readq(&devcmd->args[0]); 281 *a1 = readq(&devcmd->args[1]); 282 } 283 284 return 0; 285 } 286 } 287 288 printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd)); 289 return -ETIMEDOUT; 290 } 291 292 int vnic_dev_fw_info(struct vnic_dev *vdev, 293 struct vnic_devcmd_fw_info **fw_info) 294 { 295 u64 a0, a1 = 0; 296 int wait = 1000; 297 int err = 0; 298 299 if (!vdev->fw_info) { 300 vdev->fw_info = pci_alloc_consistent(vdev->pdev, 301 sizeof(struct vnic_devcmd_fw_info), 302 &vdev->fw_info_pa); 303 if (!vdev->fw_info) 304 return -ENOMEM; 305 306 a0 = vdev->fw_info_pa; 307 308 /* only get fw_info once and cache it */ 309 err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait); 310 } 311 312 *fw_info = vdev->fw_info; 313 314 return err; 315 } 316 317 int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size, 318 void *value) 319 { 320 u64 a0, a1; 321 int wait = 1000; 322 int err; 323 324 a0 = offset; 325 a1 = size; 326 327 err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait); 328 329 switch (size) { 330 case 1: 331 *(u8 *)value = (u8)a0; 332 break; 333 case 2: 334 *(u16 *)value = (u16)a0; 335 break; 336 case 4: 337 *(u32 *)value = (u32)a0; 338 break; 339 case 8: 340 *(u64 *)value = a0; 341 break; 342 default: 343 BUG(); 344 break; 345 } 346 347 return err; 348 } 349 350 int vnic_dev_stats_clear(struct vnic_dev *vdev) 351 { 352 u64 a0 = 0, a1 = 0; 353 int wait = 1000; 354 return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait); 355 } 356 357 int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats) 358 { 359 u64 a0, a1; 360 int wait = 1000; 361 362 if (!vdev->stats) { 363 vdev->stats = pci_alloc_consistent(vdev->pdev, 364 sizeof(struct vnic_stats), &vdev->stats_pa); 365 if (!vdev->stats) 366 return -ENOMEM; 367 } 368 369 *stats = vdev->stats; 370 a0 = vdev->stats_pa; 371 a1 = sizeof(struct vnic_stats); 372 373 return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait); 374 } 375 376 int vnic_dev_close(struct vnic_dev *vdev) 377 { 378 u64 a0 = 0, a1 = 0; 379 int wait = 1000; 380 return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait); 381 } 382 383 int vnic_dev_enable(struct vnic_dev *vdev) 384 { 385 u64 a0 = 0, a1 = 0; 386 int wait = 1000; 387 return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait); 388 } 389 390 int vnic_dev_disable(struct vnic_dev *vdev) 391 { 392 u64 a0 = 0, a1 = 0; 393 int wait = 1000; 394 return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait); 395 } 396 397 int vnic_dev_open(struct vnic_dev *vdev, int arg) 398 { 399 u64 a0 = (u32)arg, a1 = 0; 400 int wait = 1000; 401 return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait); 402 } 403 404 int vnic_dev_open_done(struct vnic_dev *vdev, int *done) 405 { 406 u64 a0 = 0, a1 = 0; 407 int wait = 1000; 408 int err; 409 410 *done = 0; 411 412 err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait); 413 if (err) 414 return err; 415 416 *done = (a0 == 0); 417 418 return 0; 419 } 420 421 int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg) 422 { 423 u64 a0 = (u32)arg, a1 = 0; 424 int wait = 1000; 425 return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait); 426 } 427 428 int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done) 429 { 430 u64 a0 = 0, a1 = 0; 431 int wait = 1000; 432 int err; 433 434 *done = 0; 435 436 err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait); 437 if (err) 438 return err; 439 440 *done = (a0 == 0); 441 442 return 0; 443 } 444 445 int vnic_dev_hang_notify(struct vnic_dev *vdev) 446 { 447 u64 a0, a1; 448 int wait = 1000; 449 return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait); 450 } 451 452 int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr) 453 { 454 u64 a0, a1; 455 int wait = 1000; 456 int err, i; 457 458 for (i = 0; i < ETH_ALEN; i++) 459 mac_addr[i] = 0; 460 461 err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait); 462 if (err) 463 return err; 464 465 for (i = 0; i < ETH_ALEN; i++) 466 mac_addr[i] = ((u8 *)&a0)[i]; 467 468 return 0; 469 } 470 471 void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, 472 int broadcast, int promisc, int allmulti) 473 { 474 u64 a0, a1 = 0; 475 int wait = 1000; 476 int err; 477 478 a0 = (directed ? CMD_PFILTER_DIRECTED : 0) | 479 (multicast ? CMD_PFILTER_MULTICAST : 0) | 480 (broadcast ? CMD_PFILTER_BROADCAST : 0) | 481 (promisc ? CMD_PFILTER_PROMISCUOUS : 0) | 482 (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0); 483 484 err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait); 485 if (err) 486 printk(KERN_ERR "Can't set packet filter\n"); 487 } 488 489 void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr) 490 { 491 u64 a0 = 0, a1 = 0; 492 int wait = 1000; 493 int err; 494 int i; 495 496 for (i = 0; i < ETH_ALEN; i++) 497 ((u8 *)&a0)[i] = addr[i]; 498 499 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); 500 if (err) 501 printk(KERN_ERR 502 "Can't add addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n", 503 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], 504 err); 505 } 506 507 void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr) 508 { 509 u64 a0 = 0, a1 = 0; 510 int wait = 1000; 511 int err; 512 int i; 513 514 for (i = 0; i < ETH_ALEN; i++) 515 ((u8 *)&a0)[i] = addr[i]; 516 517 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); 518 if (err) 519 printk(KERN_ERR 520 "Can't del addr [%02x:%02x:%02x:%02x:%02x:%02x], %d\n", 521 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], 522 err); 523 } 524 525 int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) 526 { 527 u64 a0, a1; 528 int wait = 1000; 529 530 if (!vdev->notify) { 531 vdev->notify = pci_alloc_consistent(vdev->pdev, 532 sizeof(struct vnic_devcmd_notify), 533 &vdev->notify_pa); 534 if (!vdev->notify) 535 return -ENOMEM; 536 } 537 538 a0 = vdev->notify_pa; 539 a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL; 540 a1 += sizeof(struct vnic_devcmd_notify); 541 542 return vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); 543 } 544 545 void vnic_dev_notify_unset(struct vnic_dev *vdev) 546 { 547 u64 a0, a1; 548 int wait = 1000; 549 550 a0 = 0; /* paddr = 0 to unset notify buffer */ 551 a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */ 552 a1 += sizeof(struct vnic_devcmd_notify); 553 554 vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); 555 } 556 557 static int vnic_dev_notify_ready(struct vnic_dev *vdev) 558 { 559 u32 *words; 560 unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4; 561 unsigned int i; 562 u32 csum; 563 564 if (!vdev->notify) 565 return 0; 566 567 do { 568 csum = 0; 569 memcpy(&vdev->notify_copy, vdev->notify, 570 sizeof(struct vnic_devcmd_notify)); 571 words = (u32 *)&vdev->notify_copy; 572 for (i = 1; i < nwords; i++) 573 csum += words[i]; 574 } while (csum != words[0]); 575 576 return 1; 577 } 578 579 int vnic_dev_init(struct vnic_dev *vdev, int arg) 580 { 581 u64 a0 = (u32)arg, a1 = 0; 582 int wait = 1000; 583 return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); 584 } 585 586 int vnic_dev_link_status(struct vnic_dev *vdev) 587 { 588 if (vdev->linkstatus) 589 return *vdev->linkstatus; 590 591 if (!vnic_dev_notify_ready(vdev)) 592 return 0; 593 594 return vdev->notify_copy.link_state; 595 } 596 597 u32 vnic_dev_port_speed(struct vnic_dev *vdev) 598 { 599 if (!vnic_dev_notify_ready(vdev)) 600 return 0; 601 602 return vdev->notify_copy.port_speed; 603 } 604 605 u32 vnic_dev_msg_lvl(struct vnic_dev *vdev) 606 { 607 if (!vnic_dev_notify_ready(vdev)) 608 return 0; 609 610 return vdev->notify_copy.msglvl; 611 } 612 613 u32 vnic_dev_mtu(struct vnic_dev *vdev) 614 { 615 if (!vnic_dev_notify_ready(vdev)) 616 return 0; 617 618 return vdev->notify_copy.mtu; 619 } 620 621 u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev) 622 { 623 if (!vnic_dev_notify_ready(vdev)) 624 return 0; 625 626 return vdev->notify_copy.link_down_cnt; 627 } 628 629 void vnic_dev_set_intr_mode(struct vnic_dev *vdev, 630 enum vnic_dev_intr_mode intr_mode) 631 { 632 vdev->intr_mode = intr_mode; 633 } 634 635 enum vnic_dev_intr_mode vnic_dev_get_intr_mode( 636 struct vnic_dev *vdev) 637 { 638 return vdev->intr_mode; 639 } 640 641 void vnic_dev_unregister(struct vnic_dev *vdev) 642 { 643 if (vdev) { 644 if (vdev->notify) 645 pci_free_consistent(vdev->pdev, 646 sizeof(struct vnic_devcmd_notify), 647 vdev->notify, 648 vdev->notify_pa); 649 if (vdev->linkstatus) 650 pci_free_consistent(vdev->pdev, 651 sizeof(u32), 652 vdev->linkstatus, 653 vdev->linkstatus_pa); 654 if (vdev->stats) 655 pci_free_consistent(vdev->pdev, 656 sizeof(struct vnic_dev), 657 vdev->stats, vdev->stats_pa); 658 if (vdev->fw_info) 659 pci_free_consistent(vdev->pdev, 660 sizeof(struct vnic_devcmd_fw_info), 661 vdev->fw_info, vdev->fw_info_pa); 662 kfree(vdev); 663 } 664 } 665 666 struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev, 667 void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar) 668 { 669 if (!vdev) { 670 vdev = kzalloc(sizeof(struct vnic_dev), GFP_KERNEL); 671 if (!vdev) 672 return NULL; 673 } 674 675 vdev->priv = priv; 676 vdev->pdev = pdev; 677 678 if (vnic_dev_discover_res(vdev, bar)) 679 goto err_out; 680 681 vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0); 682 if (!vdev->devcmd) 683 goto err_out; 684 685 return vdev; 686 687 err_out: 688 vnic_dev_unregister(vdev); 689 return NULL; 690 } 691