1 /* 2 * drivers/net/ethernet/mellanox/mlxsw/pci.c 3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Neither the names of the copyright holders nor the names of its 15 * contributors may be used to endorse or promote products derived from 16 * this software without specific prior written permission. 17 * 18 * Alternatively, this software may be distributed under the terms of the 19 * GNU General Public License ("GPL") version 2 as published by the Free 20 * Software Foundation. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <linux/kernel.h> 36 #include <linux/module.h> 37 #include <linux/export.h> 38 #include <linux/err.h> 39 #include <linux/device.h> 40 #include <linux/pci.h> 41 #include <linux/interrupt.h> 42 #include <linux/wait.h> 43 #include <linux/types.h> 44 #include <linux/skbuff.h> 45 #include <linux/if_vlan.h> 46 #include <linux/log2.h> 47 #include <linux/debugfs.h> 48 #include <linux/seq_file.h> 49 #include <linux/string.h> 50 51 #include "pci_hw.h" 52 #include "pci.h" 53 #include "core.h" 54 #include "cmd.h" 55 #include "port.h" 56 #include "resources.h" 57 58 static const char mlxsw_pci_driver_name[] = "mlxsw_pci"; 59 60 static struct dentry *mlxsw_pci_dbg_root; 61 62 #define mlxsw_pci_write32(mlxsw_pci, reg, val) \ 63 iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg)) 64 #define mlxsw_pci_read32(mlxsw_pci, reg) \ 65 ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg)) 66 67 enum mlxsw_pci_queue_type { 68 MLXSW_PCI_QUEUE_TYPE_SDQ, 69 MLXSW_PCI_QUEUE_TYPE_RDQ, 70 MLXSW_PCI_QUEUE_TYPE_CQ, 71 MLXSW_PCI_QUEUE_TYPE_EQ, 72 }; 73 74 static const char *mlxsw_pci_queue_type_str(enum mlxsw_pci_queue_type q_type) 75 { 76 switch (q_type) { 77 case MLXSW_PCI_QUEUE_TYPE_SDQ: 78 return "sdq"; 79 case MLXSW_PCI_QUEUE_TYPE_RDQ: 80 return "rdq"; 81 case MLXSW_PCI_QUEUE_TYPE_CQ: 82 return "cq"; 83 case MLXSW_PCI_QUEUE_TYPE_EQ: 84 return "eq"; 85 } 86 BUG(); 87 } 88 89 #define MLXSW_PCI_QUEUE_TYPE_COUNT 4 90 91 static const u16 mlxsw_pci_doorbell_type_offset[] = { 92 MLXSW_PCI_DOORBELL_SDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */ 93 MLXSW_PCI_DOORBELL_RDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */ 94 MLXSW_PCI_DOORBELL_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */ 95 MLXSW_PCI_DOORBELL_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */ 96 }; 97 98 static const u16 mlxsw_pci_doorbell_arm_type_offset[] = { 99 0, /* unused */ 100 0, /* unused */ 101 MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */ 102 MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */ 103 }; 104 105 struct mlxsw_pci_mem_item { 106 char *buf; 107 dma_addr_t mapaddr; 108 size_t size; 109 }; 110 111 struct mlxsw_pci_queue_elem_info { 112 char *elem; /* pointer to actual dma mapped element mem chunk */ 113 union { 114 struct { 115 struct sk_buff *skb; 116 } sdq; 117 struct { 118 struct sk_buff *skb; 119 } rdq; 120 } u; 121 }; 122 123 struct mlxsw_pci_queue { 124 spinlock_t lock; /* for queue accesses */ 125 struct mlxsw_pci_mem_item mem_item; 126 struct mlxsw_pci_queue_elem_info *elem_info; 127 u16 producer_counter; 128 u16 consumer_counter; 129 u16 count; /* number of elements in queue */ 130 u8 num; /* queue number */ 131 u8 elem_size; /* size of one element */ 132 enum mlxsw_pci_queue_type type; 133 struct tasklet_struct tasklet; /* queue processing tasklet */ 134 struct mlxsw_pci *pci; 135 union { 136 struct { 137 u32 comp_sdq_count; 138 u32 comp_rdq_count; 139 } cq; 140 struct { 141 u32 ev_cmd_count; 142 u32 ev_comp_count; 143 u32 ev_other_count; 144 } eq; 145 } u; 146 }; 147 148 struct mlxsw_pci_queue_type_group { 149 struct mlxsw_pci_queue *q; 150 u8 count; /* number of queues in group */ 151 }; 152 153 struct mlxsw_pci { 154 struct pci_dev *pdev; 155 u8 __iomem *hw_addr; 156 struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT]; 157 u32 doorbell_offset; 158 struct msix_entry msix_entry; 159 struct mlxsw_core *core; 160 struct { 161 struct mlxsw_pci_mem_item *items; 162 unsigned int count; 163 } fw_area; 164 struct { 165 struct mlxsw_pci_mem_item out_mbox; 166 struct mlxsw_pci_mem_item in_mbox; 167 struct mutex lock; /* Lock access to command registers */ 168 bool nopoll; 169 wait_queue_head_t wait; 170 bool wait_done; 171 struct { 172 u8 status; 173 u64 out_param; 174 } comp; 175 } cmd; 176 struct mlxsw_bus_info bus_info; 177 struct dentry *dbg_dir; 178 }; 179 180 static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q) 181 { 182 tasklet_schedule(&q->tasklet); 183 } 184 185 static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, 186 size_t elem_size, int elem_index) 187 { 188 return q->mem_item.buf + (elem_size * elem_index); 189 } 190 191 static struct mlxsw_pci_queue_elem_info * 192 mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index) 193 { 194 return &q->elem_info[elem_index]; 195 } 196 197 static struct mlxsw_pci_queue_elem_info * 198 mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q) 199 { 200 int index = q->producer_counter & (q->count - 1); 201 202 if ((u16) (q->producer_counter - q->consumer_counter) == q->count) 203 return NULL; 204 return mlxsw_pci_queue_elem_info_get(q, index); 205 } 206 207 static struct mlxsw_pci_queue_elem_info * 208 mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q) 209 { 210 int index = q->consumer_counter & (q->count - 1); 211 212 return mlxsw_pci_queue_elem_info_get(q, index); 213 } 214 215 static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index) 216 { 217 return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem; 218 } 219 220 static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit) 221 { 222 return owner_bit != !!(q->consumer_counter & q->count); 223 } 224 225 static char * 226 mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q, 227 u32 (*get_elem_owner_func)(const char *)) 228 { 229 struct mlxsw_pci_queue_elem_info *elem_info; 230 char *elem; 231 bool owner_bit; 232 233 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); 234 elem = elem_info->elem; 235 owner_bit = get_elem_owner_func(elem); 236 if (mlxsw_pci_elem_hw_owned(q, owner_bit)) 237 return NULL; 238 q->consumer_counter++; 239 rmb(); /* make sure we read owned bit before the rest of elem */ 240 return elem; 241 } 242 243 static struct mlxsw_pci_queue_type_group * 244 mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci, 245 enum mlxsw_pci_queue_type q_type) 246 { 247 return &mlxsw_pci->queues[q_type]; 248 } 249 250 static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci, 251 enum mlxsw_pci_queue_type q_type) 252 { 253 struct mlxsw_pci_queue_type_group *queue_group; 254 255 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type); 256 return queue_group->count; 257 } 258 259 static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci) 260 { 261 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ); 262 } 263 264 static u8 mlxsw_pci_rdq_count(struct mlxsw_pci *mlxsw_pci) 265 { 266 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_RDQ); 267 } 268 269 static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci) 270 { 271 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ); 272 } 273 274 static u8 mlxsw_pci_eq_count(struct mlxsw_pci *mlxsw_pci) 275 { 276 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ); 277 } 278 279 static struct mlxsw_pci_queue * 280 __mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci, 281 enum mlxsw_pci_queue_type q_type, u8 q_num) 282 { 283 return &mlxsw_pci->queues[q_type].q[q_num]; 284 } 285 286 static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci, 287 u8 q_num) 288 { 289 return __mlxsw_pci_queue_get(mlxsw_pci, 290 MLXSW_PCI_QUEUE_TYPE_SDQ, q_num); 291 } 292 293 static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci, 294 u8 q_num) 295 { 296 return __mlxsw_pci_queue_get(mlxsw_pci, 297 MLXSW_PCI_QUEUE_TYPE_RDQ, q_num); 298 } 299 300 static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci, 301 u8 q_num) 302 { 303 return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num); 304 } 305 306 static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci, 307 u8 q_num) 308 { 309 return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num); 310 } 311 312 static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci, 313 struct mlxsw_pci_queue *q, 314 u16 val) 315 { 316 mlxsw_pci_write32(mlxsw_pci, 317 DOORBELL(mlxsw_pci->doorbell_offset, 318 mlxsw_pci_doorbell_type_offset[q->type], 319 q->num), val); 320 } 321 322 static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci, 323 struct mlxsw_pci_queue *q, 324 u16 val) 325 { 326 mlxsw_pci_write32(mlxsw_pci, 327 DOORBELL(mlxsw_pci->doorbell_offset, 328 mlxsw_pci_doorbell_arm_type_offset[q->type], 329 q->num), val); 330 } 331 332 static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci, 333 struct mlxsw_pci_queue *q) 334 { 335 wmb(); /* ensure all writes are done before we ring a bell */ 336 __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter); 337 } 338 339 static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci, 340 struct mlxsw_pci_queue *q) 341 { 342 wmb(); /* ensure all writes are done before we ring a bell */ 343 __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, 344 q->consumer_counter + q->count); 345 } 346 347 static void 348 mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci, 349 struct mlxsw_pci_queue *q) 350 { 351 wmb(); /* ensure all writes are done before we ring a bell */ 352 __mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter); 353 } 354 355 static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q, 356 int page_index) 357 { 358 return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index; 359 } 360 361 static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, 362 struct mlxsw_pci_queue *q) 363 { 364 int i; 365 int err; 366 367 q->producer_counter = 0; 368 q->consumer_counter = 0; 369 370 /* Set CQ of same number of this SDQ. */ 371 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num); 372 mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, 3); 373 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */ 374 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { 375 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); 376 377 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr); 378 } 379 380 err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num); 381 if (err) 382 return err; 383 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); 384 return 0; 385 } 386 387 static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci, 388 struct mlxsw_pci_queue *q) 389 { 390 mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num); 391 } 392 393 static int mlxsw_pci_sdq_dbg_read(struct seq_file *file, void *data) 394 { 395 struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private); 396 struct mlxsw_pci_queue *q; 397 int i; 398 static const char hdr[] = 399 "NUM PROD_COUNT CONS_COUNT COUNT\n"; 400 401 seq_printf(file, hdr); 402 for (i = 0; i < mlxsw_pci_sdq_count(mlxsw_pci); i++) { 403 q = mlxsw_pci_sdq_get(mlxsw_pci, i); 404 spin_lock_bh(&q->lock); 405 seq_printf(file, "%3d %10d %10d %5d\n", 406 i, q->producer_counter, q->consumer_counter, 407 q->count); 408 spin_unlock_bh(&q->lock); 409 } 410 return 0; 411 } 412 413 static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe, 414 int index, char *frag_data, size_t frag_len, 415 int direction) 416 { 417 struct pci_dev *pdev = mlxsw_pci->pdev; 418 dma_addr_t mapaddr; 419 420 mapaddr = pci_map_single(pdev, frag_data, frag_len, direction); 421 if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) { 422 dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n"); 423 return -EIO; 424 } 425 mlxsw_pci_wqe_address_set(wqe, index, mapaddr); 426 mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len); 427 return 0; 428 } 429 430 static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe, 431 int index, int direction) 432 { 433 struct pci_dev *pdev = mlxsw_pci->pdev; 434 size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index); 435 dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index); 436 437 if (!frag_len) 438 return; 439 pci_unmap_single(pdev, mapaddr, frag_len, direction); 440 } 441 442 static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci, 443 struct mlxsw_pci_queue_elem_info *elem_info) 444 { 445 size_t buf_len = MLXSW_PORT_MAX_MTU; 446 char *wqe = elem_info->elem; 447 struct sk_buff *skb; 448 int err; 449 450 elem_info->u.rdq.skb = NULL; 451 skb = netdev_alloc_skb_ip_align(NULL, buf_len); 452 if (!skb) 453 return -ENOMEM; 454 455 /* Assume that wqe was previously zeroed. */ 456 457 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data, 458 buf_len, DMA_FROM_DEVICE); 459 if (err) 460 goto err_frag_map; 461 462 elem_info->u.rdq.skb = skb; 463 return 0; 464 465 err_frag_map: 466 dev_kfree_skb_any(skb); 467 return err; 468 } 469 470 static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci, 471 struct mlxsw_pci_queue_elem_info *elem_info) 472 { 473 struct sk_buff *skb; 474 char *wqe; 475 476 skb = elem_info->u.rdq.skb; 477 wqe = elem_info->elem; 478 479 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE); 480 dev_kfree_skb_any(skb); 481 } 482 483 static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, 484 struct mlxsw_pci_queue *q) 485 { 486 struct mlxsw_pci_queue_elem_info *elem_info; 487 u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci); 488 int i; 489 int err; 490 491 q->producer_counter = 0; 492 q->consumer_counter = 0; 493 494 /* Set CQ of same number of this RDQ with base 495 * above SDQ count as the lower ones are assigned to SDQs. 496 */ 497 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num); 498 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */ 499 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { 500 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); 501 502 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr); 503 } 504 505 err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num); 506 if (err) 507 return err; 508 509 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); 510 511 for (i = 0; i < q->count; i++) { 512 elem_info = mlxsw_pci_queue_elem_info_producer_get(q); 513 BUG_ON(!elem_info); 514 err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info); 515 if (err) 516 goto rollback; 517 /* Everything is set up, ring doorbell to pass elem to HW */ 518 q->producer_counter++; 519 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); 520 } 521 522 return 0; 523 524 rollback: 525 for (i--; i >= 0; i--) { 526 elem_info = mlxsw_pci_queue_elem_info_get(q, i); 527 mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info); 528 } 529 mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num); 530 531 return err; 532 } 533 534 static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci, 535 struct mlxsw_pci_queue *q) 536 { 537 struct mlxsw_pci_queue_elem_info *elem_info; 538 int i; 539 540 mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num); 541 for (i = 0; i < q->count; i++) { 542 elem_info = mlxsw_pci_queue_elem_info_get(q, i); 543 mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info); 544 } 545 } 546 547 static int mlxsw_pci_rdq_dbg_read(struct seq_file *file, void *data) 548 { 549 struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private); 550 struct mlxsw_pci_queue *q; 551 int i; 552 static const char hdr[] = 553 "NUM PROD_COUNT CONS_COUNT COUNT\n"; 554 555 seq_printf(file, hdr); 556 for (i = 0; i < mlxsw_pci_rdq_count(mlxsw_pci); i++) { 557 q = mlxsw_pci_rdq_get(mlxsw_pci, i); 558 spin_lock_bh(&q->lock); 559 seq_printf(file, "%3d %10d %10d %5d\n", 560 i, q->producer_counter, q->consumer_counter, 561 q->count); 562 spin_unlock_bh(&q->lock); 563 } 564 return 0; 565 } 566 567 static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, 568 struct mlxsw_pci_queue *q) 569 { 570 int i; 571 int err; 572 573 q->consumer_counter = 0; 574 575 for (i = 0; i < q->count; i++) { 576 char *elem = mlxsw_pci_queue_elem_get(q, i); 577 578 mlxsw_pci_cqe_owner_set(elem, 1); 579 } 580 581 mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */ 582 mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM); 583 mlxsw_cmd_mbox_sw2hw_cq_oi_set(mbox, 0); 584 mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0); 585 mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count)); 586 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { 587 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); 588 589 mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr); 590 } 591 err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num); 592 if (err) 593 return err; 594 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); 595 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); 596 return 0; 597 } 598 599 static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci, 600 struct mlxsw_pci_queue *q) 601 { 602 mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num); 603 } 604 605 static int mlxsw_pci_cq_dbg_read(struct seq_file *file, void *data) 606 { 607 struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private); 608 609 struct mlxsw_pci_queue *q; 610 int i; 611 static const char hdr[] = 612 "NUM CONS_INDEX SDQ_COUNT RDQ_COUNT COUNT\n"; 613 614 seq_printf(file, hdr); 615 for (i = 0; i < mlxsw_pci_cq_count(mlxsw_pci); i++) { 616 q = mlxsw_pci_cq_get(mlxsw_pci, i); 617 spin_lock_bh(&q->lock); 618 seq_printf(file, "%3d %10d %10d %10d %5d\n", 619 i, q->consumer_counter, q->u.cq.comp_sdq_count, 620 q->u.cq.comp_rdq_count, q->count); 621 spin_unlock_bh(&q->lock); 622 } 623 return 0; 624 } 625 626 static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci, 627 struct mlxsw_pci_queue *q, 628 u16 consumer_counter_limit, 629 char *cqe) 630 { 631 struct pci_dev *pdev = mlxsw_pci->pdev; 632 struct mlxsw_pci_queue_elem_info *elem_info; 633 char *wqe; 634 struct sk_buff *skb; 635 int i; 636 637 spin_lock(&q->lock); 638 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); 639 skb = elem_info->u.sdq.skb; 640 wqe = elem_info->elem; 641 for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++) 642 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE); 643 dev_kfree_skb_any(skb); 644 elem_info->u.sdq.skb = NULL; 645 646 if (q->consumer_counter++ != consumer_counter_limit) 647 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n"); 648 spin_unlock(&q->lock); 649 } 650 651 static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, 652 struct mlxsw_pci_queue *q, 653 u16 consumer_counter_limit, 654 char *cqe) 655 { 656 struct pci_dev *pdev = mlxsw_pci->pdev; 657 struct mlxsw_pci_queue_elem_info *elem_info; 658 char *wqe; 659 struct sk_buff *skb; 660 struct mlxsw_rx_info rx_info; 661 u16 byte_count; 662 int err; 663 664 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); 665 skb = elem_info->u.sdq.skb; 666 if (!skb) 667 return; 668 wqe = elem_info->elem; 669 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE); 670 671 if (q->consumer_counter++ != consumer_counter_limit) 672 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n"); 673 674 if (mlxsw_pci_cqe_lag_get(cqe)) { 675 rx_info.is_lag = true; 676 rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe); 677 rx_info.lag_port_index = mlxsw_pci_cqe_lag_port_index_get(cqe); 678 } else { 679 rx_info.is_lag = false; 680 rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe); 681 } 682 683 rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe); 684 685 byte_count = mlxsw_pci_cqe_byte_count_get(cqe); 686 if (mlxsw_pci_cqe_crc_get(cqe)) 687 byte_count -= ETH_FCS_LEN; 688 skb_put(skb, byte_count); 689 mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info); 690 691 memset(wqe, 0, q->elem_size); 692 err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info); 693 if (err) 694 dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n"); 695 /* Everything is set up, ring doorbell to pass elem to HW */ 696 q->producer_counter++; 697 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); 698 return; 699 } 700 701 static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q) 702 { 703 return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_cqe_owner_get); 704 } 705 706 static void mlxsw_pci_cq_tasklet(unsigned long data) 707 { 708 struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data; 709 struct mlxsw_pci *mlxsw_pci = q->pci; 710 char *cqe; 711 int items = 0; 712 int credits = q->count >> 1; 713 714 while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) { 715 u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe); 716 u8 sendq = mlxsw_pci_cqe_sr_get(cqe); 717 u8 dqn = mlxsw_pci_cqe_dqn_get(cqe); 718 719 if (sendq) { 720 struct mlxsw_pci_queue *sdq; 721 722 sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn); 723 mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq, 724 wqe_counter, cqe); 725 q->u.cq.comp_sdq_count++; 726 } else { 727 struct mlxsw_pci_queue *rdq; 728 729 rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn); 730 mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq, 731 wqe_counter, cqe); 732 q->u.cq.comp_rdq_count++; 733 } 734 if (++items == credits) 735 break; 736 } 737 if (items) { 738 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); 739 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); 740 } 741 } 742 743 static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, 744 struct mlxsw_pci_queue *q) 745 { 746 int i; 747 int err; 748 749 q->consumer_counter = 0; 750 751 for (i = 0; i < q->count; i++) { 752 char *elem = mlxsw_pci_queue_elem_get(q, i); 753 754 mlxsw_pci_eqe_owner_set(elem, 1); 755 } 756 757 mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */ 758 mlxsw_cmd_mbox_sw2hw_eq_oi_set(mbox, 0); 759 mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */ 760 mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count)); 761 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { 762 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); 763 764 mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr); 765 } 766 err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num); 767 if (err) 768 return err; 769 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); 770 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); 771 return 0; 772 } 773 774 static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci, 775 struct mlxsw_pci_queue *q) 776 { 777 mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num); 778 } 779 780 static int mlxsw_pci_eq_dbg_read(struct seq_file *file, void *data) 781 { 782 struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private); 783 struct mlxsw_pci_queue *q; 784 int i; 785 static const char hdr[] = 786 "NUM CONS_COUNT EV_CMD EV_COMP EV_OTHER COUNT\n"; 787 788 seq_printf(file, hdr); 789 for (i = 0; i < mlxsw_pci_eq_count(mlxsw_pci); i++) { 790 q = mlxsw_pci_eq_get(mlxsw_pci, i); 791 spin_lock_bh(&q->lock); 792 seq_printf(file, "%3d %10d %10d %10d %10d %5d\n", 793 i, q->consumer_counter, q->u.eq.ev_cmd_count, 794 q->u.eq.ev_comp_count, q->u.eq.ev_other_count, 795 q->count); 796 spin_unlock_bh(&q->lock); 797 } 798 return 0; 799 } 800 801 static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe) 802 { 803 mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe); 804 mlxsw_pci->cmd.comp.out_param = 805 ((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 | 806 mlxsw_pci_eqe_cmd_out_param_l_get(eqe); 807 mlxsw_pci->cmd.wait_done = true; 808 wake_up(&mlxsw_pci->cmd.wait); 809 } 810 811 static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q) 812 { 813 return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_eqe_owner_get); 814 } 815 816 static void mlxsw_pci_eq_tasklet(unsigned long data) 817 { 818 struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data; 819 struct mlxsw_pci *mlxsw_pci = q->pci; 820 u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci); 821 unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)]; 822 char *eqe; 823 u8 cqn; 824 bool cq_handle = false; 825 int items = 0; 826 int credits = q->count >> 1; 827 828 memset(&active_cqns, 0, sizeof(active_cqns)); 829 830 while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) { 831 u8 event_type = mlxsw_pci_eqe_event_type_get(eqe); 832 833 switch (event_type) { 834 case MLXSW_PCI_EQE_EVENT_TYPE_CMD: 835 mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe); 836 q->u.eq.ev_cmd_count++; 837 break; 838 case MLXSW_PCI_EQE_EVENT_TYPE_COMP: 839 cqn = mlxsw_pci_eqe_cqn_get(eqe); 840 set_bit(cqn, active_cqns); 841 cq_handle = true; 842 q->u.eq.ev_comp_count++; 843 break; 844 default: 845 q->u.eq.ev_other_count++; 846 } 847 if (++items == credits) 848 break; 849 } 850 if (items) { 851 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); 852 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); 853 } 854 855 if (!cq_handle) 856 return; 857 for_each_set_bit(cqn, active_cqns, cq_count) { 858 q = mlxsw_pci_cq_get(mlxsw_pci, cqn); 859 mlxsw_pci_queue_tasklet_schedule(q); 860 } 861 } 862 863 struct mlxsw_pci_queue_ops { 864 const char *name; 865 enum mlxsw_pci_queue_type type; 866 int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox, 867 struct mlxsw_pci_queue *q); 868 void (*fini)(struct mlxsw_pci *mlxsw_pci, 869 struct mlxsw_pci_queue *q); 870 void (*tasklet)(unsigned long data); 871 int (*dbg_read)(struct seq_file *s, void *data); 872 u16 elem_count; 873 u8 elem_size; 874 }; 875 876 static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = { 877 .type = MLXSW_PCI_QUEUE_TYPE_SDQ, 878 .init = mlxsw_pci_sdq_init, 879 .fini = mlxsw_pci_sdq_fini, 880 .dbg_read = mlxsw_pci_sdq_dbg_read, 881 .elem_count = MLXSW_PCI_WQE_COUNT, 882 .elem_size = MLXSW_PCI_WQE_SIZE, 883 }; 884 885 static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = { 886 .type = MLXSW_PCI_QUEUE_TYPE_RDQ, 887 .init = mlxsw_pci_rdq_init, 888 .fini = mlxsw_pci_rdq_fini, 889 .dbg_read = mlxsw_pci_rdq_dbg_read, 890 .elem_count = MLXSW_PCI_WQE_COUNT, 891 .elem_size = MLXSW_PCI_WQE_SIZE 892 }; 893 894 static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = { 895 .type = MLXSW_PCI_QUEUE_TYPE_CQ, 896 .init = mlxsw_pci_cq_init, 897 .fini = mlxsw_pci_cq_fini, 898 .tasklet = mlxsw_pci_cq_tasklet, 899 .dbg_read = mlxsw_pci_cq_dbg_read, 900 .elem_count = MLXSW_PCI_CQE_COUNT, 901 .elem_size = MLXSW_PCI_CQE_SIZE 902 }; 903 904 static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = { 905 .type = MLXSW_PCI_QUEUE_TYPE_EQ, 906 .init = mlxsw_pci_eq_init, 907 .fini = mlxsw_pci_eq_fini, 908 .tasklet = mlxsw_pci_eq_tasklet, 909 .dbg_read = mlxsw_pci_eq_dbg_read, 910 .elem_count = MLXSW_PCI_EQE_COUNT, 911 .elem_size = MLXSW_PCI_EQE_SIZE 912 }; 913 914 static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox, 915 const struct mlxsw_pci_queue_ops *q_ops, 916 struct mlxsw_pci_queue *q, u8 q_num) 917 { 918 struct mlxsw_pci_mem_item *mem_item = &q->mem_item; 919 int i; 920 int err; 921 922 spin_lock_init(&q->lock); 923 q->num = q_num; 924 q->count = q_ops->elem_count; 925 q->elem_size = q_ops->elem_size; 926 q->type = q_ops->type; 927 q->pci = mlxsw_pci; 928 929 if (q_ops->tasklet) 930 tasklet_init(&q->tasklet, q_ops->tasklet, (unsigned long) q); 931 932 mem_item->size = MLXSW_PCI_AQ_SIZE; 933 mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev, 934 mem_item->size, 935 &mem_item->mapaddr); 936 if (!mem_item->buf) 937 return -ENOMEM; 938 memset(mem_item->buf, 0, mem_item->size); 939 940 q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL); 941 if (!q->elem_info) { 942 err = -ENOMEM; 943 goto err_elem_info_alloc; 944 } 945 946 /* Initialize dma mapped elements info elem_info for 947 * future easy access. 948 */ 949 for (i = 0; i < q->count; i++) { 950 struct mlxsw_pci_queue_elem_info *elem_info; 951 952 elem_info = mlxsw_pci_queue_elem_info_get(q, i); 953 elem_info->elem = 954 __mlxsw_pci_queue_elem_get(q, q_ops->elem_size, i); 955 } 956 957 mlxsw_cmd_mbox_zero(mbox); 958 err = q_ops->init(mlxsw_pci, mbox, q); 959 if (err) 960 goto err_q_ops_init; 961 return 0; 962 963 err_q_ops_init: 964 kfree(q->elem_info); 965 err_elem_info_alloc: 966 pci_free_consistent(mlxsw_pci->pdev, mem_item->size, 967 mem_item->buf, mem_item->mapaddr); 968 return err; 969 } 970 971 static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci, 972 const struct mlxsw_pci_queue_ops *q_ops, 973 struct mlxsw_pci_queue *q) 974 { 975 struct mlxsw_pci_mem_item *mem_item = &q->mem_item; 976 977 q_ops->fini(mlxsw_pci, q); 978 kfree(q->elem_info); 979 pci_free_consistent(mlxsw_pci->pdev, mem_item->size, 980 mem_item->buf, mem_item->mapaddr); 981 } 982 983 static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox, 984 const struct mlxsw_pci_queue_ops *q_ops, 985 u8 num_qs) 986 { 987 struct pci_dev *pdev = mlxsw_pci->pdev; 988 struct mlxsw_pci_queue_type_group *queue_group; 989 char tmp[16]; 990 int i; 991 int err; 992 993 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type); 994 queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL); 995 if (!queue_group->q) 996 return -ENOMEM; 997 998 for (i = 0; i < num_qs; i++) { 999 err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops, 1000 &queue_group->q[i], i); 1001 if (err) 1002 goto err_queue_init; 1003 } 1004 queue_group->count = num_qs; 1005 1006 sprintf(tmp, "%s_stats", mlxsw_pci_queue_type_str(q_ops->type)); 1007 debugfs_create_devm_seqfile(&pdev->dev, tmp, mlxsw_pci->dbg_dir, 1008 q_ops->dbg_read); 1009 1010 return 0; 1011 1012 err_queue_init: 1013 for (i--; i >= 0; i--) 1014 mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]); 1015 kfree(queue_group->q); 1016 return err; 1017 } 1018 1019 static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci, 1020 const struct mlxsw_pci_queue_ops *q_ops) 1021 { 1022 struct mlxsw_pci_queue_type_group *queue_group; 1023 int i; 1024 1025 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type); 1026 for (i = 0; i < queue_group->count; i++) 1027 mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]); 1028 kfree(queue_group->q); 1029 } 1030 1031 static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox) 1032 { 1033 struct pci_dev *pdev = mlxsw_pci->pdev; 1034 u8 num_sdqs; 1035 u8 sdq_log2sz; 1036 u8 num_rdqs; 1037 u8 rdq_log2sz; 1038 u8 num_cqs; 1039 u8 cq_log2sz; 1040 u8 num_eqs; 1041 u8 eq_log2sz; 1042 int err; 1043 1044 mlxsw_cmd_mbox_zero(mbox); 1045 err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox); 1046 if (err) 1047 return err; 1048 1049 num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox); 1050 sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox); 1051 num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox); 1052 rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox); 1053 num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox); 1054 cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox); 1055 num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox); 1056 eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox); 1057 1058 if (num_sdqs + num_rdqs > num_cqs || 1059 num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) { 1060 dev_err(&pdev->dev, "Unsupported number of queues\n"); 1061 return -EINVAL; 1062 } 1063 1064 if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) || 1065 (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) || 1066 (1 << cq_log2sz != MLXSW_PCI_CQE_COUNT) || 1067 (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) { 1068 dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n"); 1069 return -EINVAL; 1070 } 1071 1072 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops, 1073 num_eqs); 1074 if (err) { 1075 dev_err(&pdev->dev, "Failed to initialize event queues\n"); 1076 return err; 1077 } 1078 1079 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops, 1080 num_cqs); 1081 if (err) { 1082 dev_err(&pdev->dev, "Failed to initialize completion queues\n"); 1083 goto err_cqs_init; 1084 } 1085 1086 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops, 1087 num_sdqs); 1088 if (err) { 1089 dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n"); 1090 goto err_sdqs_init; 1091 } 1092 1093 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops, 1094 num_rdqs); 1095 if (err) { 1096 dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n"); 1097 goto err_rdqs_init; 1098 } 1099 1100 /* We have to poll in command interface until queues are initialized */ 1101 mlxsw_pci->cmd.nopoll = true; 1102 return 0; 1103 1104 err_rdqs_init: 1105 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops); 1106 err_sdqs_init: 1107 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops); 1108 err_cqs_init: 1109 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops); 1110 return err; 1111 } 1112 1113 static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci) 1114 { 1115 mlxsw_pci->cmd.nopoll = false; 1116 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops); 1117 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops); 1118 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops); 1119 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops); 1120 } 1121 1122 static void 1123 mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci, 1124 char *mbox, int index, 1125 const struct mlxsw_swid_config *swid) 1126 { 1127 u8 mask = 0; 1128 1129 if (swid->used_type) { 1130 mlxsw_cmd_mbox_config_profile_swid_config_type_set( 1131 mbox, index, swid->type); 1132 mask |= 1; 1133 } 1134 if (swid->used_properties) { 1135 mlxsw_cmd_mbox_config_profile_swid_config_properties_set( 1136 mbox, index, swid->properties); 1137 mask |= 2; 1138 } 1139 mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask); 1140 } 1141 1142 static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox, 1143 struct mlxsw_res *res, 1144 u8 query_enabled) 1145 { 1146 int index, i; 1147 u64 data; 1148 u16 id; 1149 int err; 1150 1151 /* Not all the versions support resources query */ 1152 if (!query_enabled) 1153 return 0; 1154 1155 mlxsw_cmd_mbox_zero(mbox); 1156 1157 for (index = 0; index < MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES; 1158 index++) { 1159 err = mlxsw_cmd_query_resources(mlxsw_pci->core, mbox, index); 1160 if (err) 1161 return err; 1162 1163 for (i = 0; i < MLXSW_CMD_QUERY_RESOURCES_PER_QUERY; i++) { 1164 id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i); 1165 data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i); 1166 1167 if (id == MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID) 1168 return 0; 1169 1170 mlxsw_res_parse(res, id, data); 1171 } 1172 } 1173 1174 /* If after MLXSW_RESOURCES_QUERY_MAX_QUERIES we still didn't get 1175 * MLXSW_RESOURCES_TABLE_END_ID, something went bad in the FW. 1176 */ 1177 return -EIO; 1178 } 1179 1180 static int 1181 mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_config_profile *profile, 1182 struct mlxsw_res *res) 1183 { 1184 u32 single_size, double_size, linear_size; 1185 1186 if (!MLXSW_RES_VALID(res, KVD_SINGLE_MIN_SIZE) || 1187 !MLXSW_RES_VALID(res, KVD_DOUBLE_MIN_SIZE) || 1188 !profile->used_kvd_split_data) 1189 return -EIO; 1190 1191 linear_size = profile->kvd_linear_size; 1192 1193 /* The hash part is what left of the kvd without the 1194 * linear part. It is split to the single size and 1195 * double size by the parts ratio from the profile. 1196 * Both sizes must be a multiplications of the 1197 * granularity from the profile. 1198 */ 1199 double_size = MLXSW_RES_GET(res, KVD_SIZE) - linear_size; 1200 double_size *= profile->kvd_hash_double_parts; 1201 double_size /= profile->kvd_hash_double_parts + 1202 profile->kvd_hash_single_parts; 1203 double_size /= profile->kvd_hash_granularity; 1204 double_size *= profile->kvd_hash_granularity; 1205 single_size = MLXSW_RES_GET(res, KVD_SIZE) - double_size - 1206 linear_size; 1207 1208 /* Check results are legal. */ 1209 if (single_size < MLXSW_RES_GET(res, KVD_SINGLE_MIN_SIZE) || 1210 double_size < MLXSW_RES_GET(res, KVD_DOUBLE_MIN_SIZE) || 1211 MLXSW_RES_GET(res, KVD_SIZE) < linear_size) 1212 return -EIO; 1213 1214 MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size); 1215 MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size); 1216 MLXSW_RES_SET(res, KVD_LINEAR_SIZE, linear_size); 1217 1218 return 0; 1219 } 1220 1221 static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox, 1222 const struct mlxsw_config_profile *profile, 1223 struct mlxsw_res *res) 1224 { 1225 int i; 1226 int err; 1227 1228 mlxsw_cmd_mbox_zero(mbox); 1229 1230 if (profile->used_max_vepa_channels) { 1231 mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set( 1232 mbox, 1); 1233 mlxsw_cmd_mbox_config_profile_max_vepa_channels_set( 1234 mbox, profile->max_vepa_channels); 1235 } 1236 if (profile->used_max_mid) { 1237 mlxsw_cmd_mbox_config_profile_set_max_mid_set( 1238 mbox, 1); 1239 mlxsw_cmd_mbox_config_profile_max_mid_set( 1240 mbox, profile->max_mid); 1241 } 1242 if (profile->used_max_pgt) { 1243 mlxsw_cmd_mbox_config_profile_set_max_pgt_set( 1244 mbox, 1); 1245 mlxsw_cmd_mbox_config_profile_max_pgt_set( 1246 mbox, profile->max_pgt); 1247 } 1248 if (profile->used_max_system_port) { 1249 mlxsw_cmd_mbox_config_profile_set_max_system_port_set( 1250 mbox, 1); 1251 mlxsw_cmd_mbox_config_profile_max_system_port_set( 1252 mbox, profile->max_system_port); 1253 } 1254 if (profile->used_max_vlan_groups) { 1255 mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set( 1256 mbox, 1); 1257 mlxsw_cmd_mbox_config_profile_max_vlan_groups_set( 1258 mbox, profile->max_vlan_groups); 1259 } 1260 if (profile->used_max_regions) { 1261 mlxsw_cmd_mbox_config_profile_set_max_regions_set( 1262 mbox, 1); 1263 mlxsw_cmd_mbox_config_profile_max_regions_set( 1264 mbox, profile->max_regions); 1265 } 1266 if (profile->used_flood_tables) { 1267 mlxsw_cmd_mbox_config_profile_set_flood_tables_set( 1268 mbox, 1); 1269 mlxsw_cmd_mbox_config_profile_max_flood_tables_set( 1270 mbox, profile->max_flood_tables); 1271 mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set( 1272 mbox, profile->max_vid_flood_tables); 1273 mlxsw_cmd_mbox_config_profile_max_fid_offset_flood_tables_set( 1274 mbox, profile->max_fid_offset_flood_tables); 1275 mlxsw_cmd_mbox_config_profile_fid_offset_flood_table_size_set( 1276 mbox, profile->fid_offset_flood_table_size); 1277 mlxsw_cmd_mbox_config_profile_max_fid_flood_tables_set( 1278 mbox, profile->max_fid_flood_tables); 1279 mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set( 1280 mbox, profile->fid_flood_table_size); 1281 } 1282 if (profile->used_flood_mode) { 1283 mlxsw_cmd_mbox_config_profile_set_flood_mode_set( 1284 mbox, 1); 1285 mlxsw_cmd_mbox_config_profile_flood_mode_set( 1286 mbox, profile->flood_mode); 1287 } 1288 if (profile->used_max_ib_mc) { 1289 mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set( 1290 mbox, 1); 1291 mlxsw_cmd_mbox_config_profile_max_ib_mc_set( 1292 mbox, profile->max_ib_mc); 1293 } 1294 if (profile->used_max_pkey) { 1295 mlxsw_cmd_mbox_config_profile_set_max_pkey_set( 1296 mbox, 1); 1297 mlxsw_cmd_mbox_config_profile_max_pkey_set( 1298 mbox, profile->max_pkey); 1299 } 1300 if (profile->used_ar_sec) { 1301 mlxsw_cmd_mbox_config_profile_set_ar_sec_set( 1302 mbox, 1); 1303 mlxsw_cmd_mbox_config_profile_ar_sec_set( 1304 mbox, profile->ar_sec); 1305 } 1306 if (profile->used_adaptive_routing_group_cap) { 1307 mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set( 1308 mbox, 1); 1309 mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set( 1310 mbox, profile->adaptive_routing_group_cap); 1311 } 1312 if (MLXSW_RES_VALID(res, KVD_SIZE)) { 1313 err = mlxsw_pci_profile_get_kvd_sizes(profile, res); 1314 if (err) 1315 return err; 1316 1317 mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(mbox, 1); 1318 mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(mbox, 1319 MLXSW_RES_GET(res, KVD_LINEAR_SIZE)); 1320 mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(mbox, 1321 1); 1322 mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(mbox, 1323 MLXSW_RES_GET(res, KVD_SINGLE_SIZE)); 1324 mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set( 1325 mbox, 1); 1326 mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox, 1327 MLXSW_RES_GET(res, KVD_DOUBLE_SIZE)); 1328 } 1329 1330 for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++) 1331 mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i, 1332 &profile->swid_config[i]); 1333 1334 return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox); 1335 } 1336 1337 static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox) 1338 { 1339 struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info; 1340 int err; 1341 1342 mlxsw_cmd_mbox_zero(mbox); 1343 err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox); 1344 if (err) 1345 return err; 1346 mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd); 1347 mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid); 1348 return 0; 1349 } 1350 1351 static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox, 1352 u16 num_pages) 1353 { 1354 struct mlxsw_pci_mem_item *mem_item; 1355 int nent = 0; 1356 int i; 1357 int err; 1358 1359 mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item), 1360 GFP_KERNEL); 1361 if (!mlxsw_pci->fw_area.items) 1362 return -ENOMEM; 1363 mlxsw_pci->fw_area.count = num_pages; 1364 1365 mlxsw_cmd_mbox_zero(mbox); 1366 for (i = 0; i < num_pages; i++) { 1367 mem_item = &mlxsw_pci->fw_area.items[i]; 1368 1369 mem_item->size = MLXSW_PCI_PAGE_SIZE; 1370 mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev, 1371 mem_item->size, 1372 &mem_item->mapaddr); 1373 if (!mem_item->buf) { 1374 err = -ENOMEM; 1375 goto err_alloc; 1376 } 1377 mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr); 1378 mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */ 1379 if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) { 1380 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent); 1381 if (err) 1382 goto err_cmd_map_fa; 1383 nent = 0; 1384 mlxsw_cmd_mbox_zero(mbox); 1385 } 1386 } 1387 1388 if (nent) { 1389 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent); 1390 if (err) 1391 goto err_cmd_map_fa; 1392 } 1393 1394 return 0; 1395 1396 err_cmd_map_fa: 1397 err_alloc: 1398 for (i--; i >= 0; i--) { 1399 mem_item = &mlxsw_pci->fw_area.items[i]; 1400 1401 pci_free_consistent(mlxsw_pci->pdev, mem_item->size, 1402 mem_item->buf, mem_item->mapaddr); 1403 } 1404 kfree(mlxsw_pci->fw_area.items); 1405 return err; 1406 } 1407 1408 static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci) 1409 { 1410 struct mlxsw_pci_mem_item *mem_item; 1411 int i; 1412 1413 mlxsw_cmd_unmap_fa(mlxsw_pci->core); 1414 1415 for (i = 0; i < mlxsw_pci->fw_area.count; i++) { 1416 mem_item = &mlxsw_pci->fw_area.items[i]; 1417 1418 pci_free_consistent(mlxsw_pci->pdev, mem_item->size, 1419 mem_item->buf, mem_item->mapaddr); 1420 } 1421 kfree(mlxsw_pci->fw_area.items); 1422 } 1423 1424 static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id) 1425 { 1426 struct mlxsw_pci *mlxsw_pci = dev_id; 1427 struct mlxsw_pci_queue *q; 1428 int i; 1429 1430 for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) { 1431 q = mlxsw_pci_eq_get(mlxsw_pci, i); 1432 mlxsw_pci_queue_tasklet_schedule(q); 1433 } 1434 return IRQ_HANDLED; 1435 } 1436 1437 static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci, 1438 struct mlxsw_pci_mem_item *mbox) 1439 { 1440 struct pci_dev *pdev = mlxsw_pci->pdev; 1441 int err = 0; 1442 1443 mbox->size = MLXSW_CMD_MBOX_SIZE; 1444 mbox->buf = pci_alloc_consistent(pdev, MLXSW_CMD_MBOX_SIZE, 1445 &mbox->mapaddr); 1446 if (!mbox->buf) { 1447 dev_err(&pdev->dev, "Failed allocating memory for mailbox\n"); 1448 err = -ENOMEM; 1449 } 1450 1451 return err; 1452 } 1453 1454 static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci, 1455 struct mlxsw_pci_mem_item *mbox) 1456 { 1457 struct pci_dev *pdev = mlxsw_pci->pdev; 1458 1459 pci_free_consistent(pdev, MLXSW_CMD_MBOX_SIZE, mbox->buf, 1460 mbox->mapaddr); 1461 } 1462 1463 static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, 1464 const struct mlxsw_config_profile *profile, 1465 struct mlxsw_res *res) 1466 { 1467 struct mlxsw_pci *mlxsw_pci = bus_priv; 1468 struct pci_dev *pdev = mlxsw_pci->pdev; 1469 char *mbox; 1470 u16 num_pages; 1471 int err; 1472 1473 mutex_init(&mlxsw_pci->cmd.lock); 1474 init_waitqueue_head(&mlxsw_pci->cmd.wait); 1475 1476 mlxsw_pci->core = mlxsw_core; 1477 1478 mbox = mlxsw_cmd_mbox_alloc(); 1479 if (!mbox) 1480 return -ENOMEM; 1481 1482 err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); 1483 if (err) 1484 goto mbox_put; 1485 1486 err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); 1487 if (err) 1488 goto err_out_mbox_alloc; 1489 1490 err = mlxsw_cmd_query_fw(mlxsw_core, mbox); 1491 if (err) 1492 goto err_query_fw; 1493 1494 mlxsw_pci->bus_info.fw_rev.major = 1495 mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox); 1496 mlxsw_pci->bus_info.fw_rev.minor = 1497 mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox); 1498 mlxsw_pci->bus_info.fw_rev.subminor = 1499 mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox); 1500 1501 if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) { 1502 dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n"); 1503 err = -EINVAL; 1504 goto err_iface_rev; 1505 } 1506 if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) { 1507 dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n"); 1508 err = -EINVAL; 1509 goto err_doorbell_page_bar; 1510 } 1511 1512 mlxsw_pci->doorbell_offset = 1513 mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox); 1514 1515 num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox); 1516 err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages); 1517 if (err) 1518 goto err_fw_area_init; 1519 1520 err = mlxsw_pci_boardinfo(mlxsw_pci, mbox); 1521 if (err) 1522 goto err_boardinfo; 1523 1524 err = mlxsw_pci_resources_query(mlxsw_pci, mbox, res, 1525 profile->resource_query_enable); 1526 if (err) 1527 goto err_query_resources; 1528 1529 err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res); 1530 if (err) 1531 goto err_config_profile; 1532 1533 err = mlxsw_pci_aqs_init(mlxsw_pci, mbox); 1534 if (err) 1535 goto err_aqs_init; 1536 1537 err = request_irq(mlxsw_pci->msix_entry.vector, 1538 mlxsw_pci_eq_irq_handler, 0, 1539 mlxsw_pci->bus_info.device_kind, mlxsw_pci); 1540 if (err) { 1541 dev_err(&pdev->dev, "IRQ request failed\n"); 1542 goto err_request_eq_irq; 1543 } 1544 1545 goto mbox_put; 1546 1547 err_request_eq_irq: 1548 mlxsw_pci_aqs_fini(mlxsw_pci); 1549 err_aqs_init: 1550 err_config_profile: 1551 err_query_resources: 1552 err_boardinfo: 1553 mlxsw_pci_fw_area_fini(mlxsw_pci); 1554 err_fw_area_init: 1555 err_doorbell_page_bar: 1556 err_iface_rev: 1557 err_query_fw: 1558 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); 1559 err_out_mbox_alloc: 1560 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); 1561 mbox_put: 1562 mlxsw_cmd_mbox_free(mbox); 1563 return err; 1564 } 1565 1566 static void mlxsw_pci_fini(void *bus_priv) 1567 { 1568 struct mlxsw_pci *mlxsw_pci = bus_priv; 1569 1570 free_irq(mlxsw_pci->msix_entry.vector, mlxsw_pci); 1571 mlxsw_pci_aqs_fini(mlxsw_pci); 1572 mlxsw_pci_fw_area_fini(mlxsw_pci); 1573 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox); 1574 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); 1575 } 1576 1577 static struct mlxsw_pci_queue * 1578 mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci, 1579 const struct mlxsw_tx_info *tx_info) 1580 { 1581 u8 sdqn = tx_info->local_port % mlxsw_pci_sdq_count(mlxsw_pci); 1582 1583 return mlxsw_pci_sdq_get(mlxsw_pci, sdqn); 1584 } 1585 1586 static bool mlxsw_pci_skb_transmit_busy(void *bus_priv, 1587 const struct mlxsw_tx_info *tx_info) 1588 { 1589 struct mlxsw_pci *mlxsw_pci = bus_priv; 1590 struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info); 1591 1592 return !mlxsw_pci_queue_elem_info_producer_get(q); 1593 } 1594 1595 static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb, 1596 const struct mlxsw_tx_info *tx_info) 1597 { 1598 struct mlxsw_pci *mlxsw_pci = bus_priv; 1599 struct mlxsw_pci_queue *q; 1600 struct mlxsw_pci_queue_elem_info *elem_info; 1601 char *wqe; 1602 int i; 1603 int err; 1604 1605 if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) { 1606 err = skb_linearize(skb); 1607 if (err) 1608 return err; 1609 } 1610 1611 q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info); 1612 spin_lock_bh(&q->lock); 1613 elem_info = mlxsw_pci_queue_elem_info_producer_get(q); 1614 if (!elem_info) { 1615 /* queue is full */ 1616 err = -EAGAIN; 1617 goto unlock; 1618 } 1619 elem_info->u.sdq.skb = skb; 1620 1621 wqe = elem_info->elem; 1622 mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */ 1623 mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad); 1624 mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET); 1625 1626 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data, 1627 skb_headlen(skb), DMA_TO_DEVICE); 1628 if (err) 1629 goto unlock; 1630 1631 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1632 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1633 1634 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1, 1635 skb_frag_address(frag), 1636 skb_frag_size(frag), 1637 DMA_TO_DEVICE); 1638 if (err) 1639 goto unmap_frags; 1640 } 1641 1642 /* Set unused sq entries byte count to zero. */ 1643 for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++) 1644 mlxsw_pci_wqe_byte_count_set(wqe, i, 0); 1645 1646 /* Everything is set up, ring producer doorbell to get HW going */ 1647 q->producer_counter++; 1648 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); 1649 1650 goto unlock; 1651 1652 unmap_frags: 1653 for (; i >= 0; i--) 1654 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE); 1655 unlock: 1656 spin_unlock_bh(&q->lock); 1657 return err; 1658 } 1659 1660 static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod, 1661 u32 in_mod, bool out_mbox_direct, 1662 char *in_mbox, size_t in_mbox_size, 1663 char *out_mbox, size_t out_mbox_size, 1664 u8 *p_status) 1665 { 1666 struct mlxsw_pci *mlxsw_pci = bus_priv; 1667 dma_addr_t in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr; 1668 dma_addr_t out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr; 1669 bool evreq = mlxsw_pci->cmd.nopoll; 1670 unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS); 1671 bool *p_wait_done = &mlxsw_pci->cmd.wait_done; 1672 int err; 1673 1674 *p_status = MLXSW_CMD_STATUS_OK; 1675 1676 err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock); 1677 if (err) 1678 return err; 1679 1680 if (in_mbox) 1681 memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size); 1682 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr)); 1683 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr)); 1684 1685 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr)); 1686 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr)); 1687 1688 mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod); 1689 mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0); 1690 1691 *p_wait_done = false; 1692 1693 wmb(); /* all needs to be written before we write control register */ 1694 mlxsw_pci_write32(mlxsw_pci, CIR_CTRL, 1695 MLXSW_PCI_CIR_CTRL_GO_BIT | 1696 (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) | 1697 (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) | 1698 opcode); 1699 1700 if (!evreq) { 1701 unsigned long end; 1702 1703 end = jiffies + timeout; 1704 do { 1705 u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL); 1706 1707 if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) { 1708 *p_wait_done = true; 1709 *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT; 1710 break; 1711 } 1712 cond_resched(); 1713 } while (time_before(jiffies, end)); 1714 } else { 1715 wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout); 1716 *p_status = mlxsw_pci->cmd.comp.status; 1717 } 1718 1719 err = 0; 1720 if (*p_wait_done) { 1721 if (*p_status) 1722 err = -EIO; 1723 } else { 1724 err = -ETIMEDOUT; 1725 } 1726 1727 if (!err && out_mbox && out_mbox_direct) { 1728 /* Some commands don't use output param as address to mailbox 1729 * but they store output directly into registers. In that case, 1730 * copy registers into mbox buffer. 1731 */ 1732 __be32 tmp; 1733 1734 if (!evreq) { 1735 tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci, 1736 CIR_OUT_PARAM_HI)); 1737 memcpy(out_mbox, &tmp, sizeof(tmp)); 1738 tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci, 1739 CIR_OUT_PARAM_LO)); 1740 memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp)); 1741 } 1742 } else if (!err && out_mbox) { 1743 memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size); 1744 } 1745 1746 mutex_unlock(&mlxsw_pci->cmd.lock); 1747 1748 return err; 1749 } 1750 1751 static const struct mlxsw_bus mlxsw_pci_bus = { 1752 .kind = "pci", 1753 .init = mlxsw_pci_init, 1754 .fini = mlxsw_pci_fini, 1755 .skb_transmit_busy = mlxsw_pci_skb_transmit_busy, 1756 .skb_transmit = mlxsw_pci_skb_transmit, 1757 .cmd_exec = mlxsw_pci_cmd_exec, 1758 .features = MLXSW_BUS_F_TXRX, 1759 }; 1760 1761 static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci, 1762 const struct pci_device_id *id) 1763 { 1764 unsigned long end; 1765 1766 mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT); 1767 if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) { 1768 msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); 1769 return 0; 1770 } 1771 1772 wmb(); /* reset needs to be written before we read control register */ 1773 end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); 1774 do { 1775 u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); 1776 1777 if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC) 1778 break; 1779 cond_resched(); 1780 } while (time_before(jiffies, end)); 1781 return 0; 1782 } 1783 1784 static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1785 { 1786 const char *driver_name = pdev->driver->name; 1787 struct mlxsw_pci *mlxsw_pci; 1788 int err; 1789 1790 mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL); 1791 if (!mlxsw_pci) 1792 return -ENOMEM; 1793 1794 err = pci_enable_device(pdev); 1795 if (err) { 1796 dev_err(&pdev->dev, "pci_enable_device failed\n"); 1797 goto err_pci_enable_device; 1798 } 1799 1800 err = pci_request_regions(pdev, driver_name); 1801 if (err) { 1802 dev_err(&pdev->dev, "pci_request_regions failed\n"); 1803 goto err_pci_request_regions; 1804 } 1805 1806 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1807 if (!err) { 1808 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1809 if (err) { 1810 dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n"); 1811 goto err_pci_set_dma_mask; 1812 } 1813 } else { 1814 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1815 if (err) { 1816 dev_err(&pdev->dev, "pci_set_dma_mask failed\n"); 1817 goto err_pci_set_dma_mask; 1818 } 1819 } 1820 1821 if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) { 1822 dev_err(&pdev->dev, "invalid PCI region size\n"); 1823 err = -EINVAL; 1824 goto err_pci_resource_len_check; 1825 } 1826 1827 mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0), 1828 pci_resource_len(pdev, 0)); 1829 if (!mlxsw_pci->hw_addr) { 1830 dev_err(&pdev->dev, "ioremap failed\n"); 1831 err = -EIO; 1832 goto err_ioremap; 1833 } 1834 pci_set_master(pdev); 1835 1836 mlxsw_pci->pdev = pdev; 1837 pci_set_drvdata(pdev, mlxsw_pci); 1838 1839 err = mlxsw_pci_sw_reset(mlxsw_pci, id); 1840 if (err) { 1841 dev_err(&pdev->dev, "Software reset failed\n"); 1842 goto err_sw_reset; 1843 } 1844 1845 err = pci_enable_msix_exact(pdev, &mlxsw_pci->msix_entry, 1); 1846 if (err) { 1847 dev_err(&pdev->dev, "MSI-X init failed\n"); 1848 goto err_msix_init; 1849 } 1850 1851 mlxsw_pci->bus_info.device_kind = driver_name; 1852 mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev); 1853 mlxsw_pci->bus_info.dev = &pdev->dev; 1854 1855 mlxsw_pci->dbg_dir = debugfs_create_dir(mlxsw_pci->bus_info.device_name, 1856 mlxsw_pci_dbg_root); 1857 if (!mlxsw_pci->dbg_dir) { 1858 dev_err(&pdev->dev, "Failed to create debugfs dir\n"); 1859 err = -ENOMEM; 1860 goto err_dbg_create_dir; 1861 } 1862 1863 err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info, 1864 &mlxsw_pci_bus, mlxsw_pci); 1865 if (err) { 1866 dev_err(&pdev->dev, "cannot register bus device\n"); 1867 goto err_bus_device_register; 1868 } 1869 1870 return 0; 1871 1872 err_bus_device_register: 1873 debugfs_remove_recursive(mlxsw_pci->dbg_dir); 1874 err_dbg_create_dir: 1875 pci_disable_msix(mlxsw_pci->pdev); 1876 err_msix_init: 1877 err_sw_reset: 1878 iounmap(mlxsw_pci->hw_addr); 1879 err_ioremap: 1880 err_pci_resource_len_check: 1881 err_pci_set_dma_mask: 1882 pci_release_regions(pdev); 1883 err_pci_request_regions: 1884 pci_disable_device(pdev); 1885 err_pci_enable_device: 1886 kfree(mlxsw_pci); 1887 return err; 1888 } 1889 1890 static void mlxsw_pci_remove(struct pci_dev *pdev) 1891 { 1892 struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev); 1893 1894 mlxsw_core_bus_device_unregister(mlxsw_pci->core); 1895 debugfs_remove_recursive(mlxsw_pci->dbg_dir); 1896 pci_disable_msix(mlxsw_pci->pdev); 1897 iounmap(mlxsw_pci->hw_addr); 1898 pci_release_regions(mlxsw_pci->pdev); 1899 pci_disable_device(mlxsw_pci->pdev); 1900 kfree(mlxsw_pci); 1901 } 1902 1903 int mlxsw_pci_driver_register(struct pci_driver *pci_driver) 1904 { 1905 pci_driver->probe = mlxsw_pci_probe; 1906 pci_driver->remove = mlxsw_pci_remove; 1907 return pci_register_driver(pci_driver); 1908 } 1909 EXPORT_SYMBOL(mlxsw_pci_driver_register); 1910 1911 void mlxsw_pci_driver_unregister(struct pci_driver *pci_driver) 1912 { 1913 pci_unregister_driver(pci_driver); 1914 } 1915 EXPORT_SYMBOL(mlxsw_pci_driver_unregister); 1916 1917 static int __init mlxsw_pci_module_init(void) 1918 { 1919 mlxsw_pci_dbg_root = debugfs_create_dir(mlxsw_pci_driver_name, NULL); 1920 if (!mlxsw_pci_dbg_root) 1921 return -ENOMEM; 1922 return 0; 1923 } 1924 1925 static void __exit mlxsw_pci_module_exit(void) 1926 { 1927 debugfs_remove_recursive(mlxsw_pci_dbg_root); 1928 } 1929 1930 module_init(mlxsw_pci_module_init); 1931 module_exit(mlxsw_pci_module_exit); 1932 1933 MODULE_LICENSE("Dual BSD/GPL"); 1934 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>"); 1935 MODULE_DESCRIPTION("Mellanox switch PCI interface driver"); 1936