1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Mellanox BlueField SoC TmFifo driver 4 * 5 * Copyright (C) 2019 Mellanox Technologies 6 */ 7 8 #include <linux/acpi.h> 9 #include <linux/bitfield.h> 10 #include <linux/circ_buf.h> 11 #include <linux/efi.h> 12 #include <linux/irq.h> 13 #include <linux/module.h> 14 #include <linux/mutex.h> 15 #include <linux/platform_device.h> 16 #include <linux/types.h> 17 18 #include <linux/virtio_config.h> 19 #include <linux/virtio_console.h> 20 #include <linux/virtio_ids.h> 21 #include <linux/virtio_net.h> 22 #include <linux/virtio_ring.h> 23 24 #include "mlxbf-tmfifo-regs.h" 25 26 /* Vring size. */ 27 #define MLXBF_TMFIFO_VRING_SIZE SZ_1K 28 29 /* Console Tx buffer size. */ 30 #define MLXBF_TMFIFO_CON_TX_BUF_SIZE SZ_32K 31 32 /* Console Tx buffer reserved space. */ 33 #define MLXBF_TMFIFO_CON_TX_BUF_RSV_SIZE 8 34 35 /* House-keeping timer interval. */ 36 #define MLXBF_TMFIFO_TIMER_INTERVAL (HZ / 10) 37 38 /* Virtual devices sharing the TM FIFO. */ 39 #define MLXBF_TMFIFO_VDEV_MAX (VIRTIO_ID_CONSOLE + 1) 40 41 /* 42 * Reserve 1/16 of TmFifo space, so console messages are not starved by 43 * the networking traffic. 44 */ 45 #define MLXBF_TMFIFO_RESERVE_RATIO 16 46 47 /* Message with data needs at least two words (for header & data). */ 48 #define MLXBF_TMFIFO_DATA_MIN_WORDS 2 49 50 struct mlxbf_tmfifo; 51 52 /** 53 * mlxbf_tmfifo_vring - Structure of the TmFifo virtual ring 54 * @va: virtual address of the ring 55 * @dma: dma address of the ring 56 * @vq: pointer to the virtio virtqueue 57 * @desc: current descriptor of the pending packet 58 * @desc_head: head descriptor of the pending packet 59 * @cur_len: processed length of the current descriptor 60 * @rem_len: remaining length of the pending packet 61 * @pkt_len: total length of the pending packet 62 * @next_avail: next avail descriptor id 63 * @num: vring size (number of descriptors) 64 * @align: vring alignment size 65 * @index: vring index 66 * @vdev_id: vring virtio id (VIRTIO_ID_xxx) 67 * @fifo: pointer to the tmfifo structure 68 */ 69 struct mlxbf_tmfifo_vring { 70 void *va; 71 dma_addr_t dma; 72 struct virtqueue *vq; 73 struct vring_desc *desc; 74 struct vring_desc *desc_head; 75 int cur_len; 76 int rem_len; 77 u32 pkt_len; 78 u16 next_avail; 79 int num; 80 int align; 81 int index; 82 int vdev_id; 83 struct mlxbf_tmfifo *fifo; 84 }; 85 86 /* Interrupt types. */ 87 enum { 88 MLXBF_TM_RX_LWM_IRQ, 89 MLXBF_TM_RX_HWM_IRQ, 90 MLXBF_TM_TX_LWM_IRQ, 91 MLXBF_TM_TX_HWM_IRQ, 92 MLXBF_TM_MAX_IRQ 93 }; 94 95 /* Ring types (Rx & Tx). */ 96 enum { 97 MLXBF_TMFIFO_VRING_RX, 98 MLXBF_TMFIFO_VRING_TX, 99 MLXBF_TMFIFO_VRING_MAX 100 }; 101 102 /** 103 * mlxbf_tmfifo_vdev - Structure of the TmFifo virtual device 104 * @vdev: virtio device, in which the vdev.id.device field has the 105 * VIRTIO_ID_xxx id to distinguish the virtual device. 106 * @status: status of the device 107 * @features: supported features of the device 108 * @vrings: array of tmfifo vrings of this device 109 * @config.cons: virtual console config - 110 * select if vdev.id.device is VIRTIO_ID_CONSOLE 111 * @config.net: virtual network config - 112 * select if vdev.id.device is VIRTIO_ID_NET 113 * @tx_buf: tx buffer used to buffer data before writing into the FIFO 114 */ 115 struct mlxbf_tmfifo_vdev { 116 struct virtio_device vdev; 117 u8 status; 118 u64 features; 119 struct mlxbf_tmfifo_vring vrings[MLXBF_TMFIFO_VRING_MAX]; 120 union { 121 struct virtio_console_config cons; 122 struct virtio_net_config net; 123 } config; 124 struct circ_buf tx_buf; 125 }; 126 127 /** 128 * mlxbf_tmfifo_irq_info - Structure of the interrupt information 129 * @fifo: pointer to the tmfifo structure 130 * @irq: interrupt number 131 * @index: index into the interrupt array 132 */ 133 struct mlxbf_tmfifo_irq_info { 134 struct mlxbf_tmfifo *fifo; 135 int irq; 136 int index; 137 }; 138 139 /** 140 * mlxbf_tmfifo - Structure of the TmFifo 141 * @vdev: array of the virtual devices running over the TmFifo 142 * @lock: lock to protect the TmFifo access 143 * @rx_base: mapped register base address for the Rx FIFO 144 * @tx_base: mapped register base address for the Tx FIFO 145 * @rx_fifo_size: number of entries of the Rx FIFO 146 * @tx_fifo_size: number of entries of the Tx FIFO 147 * @pend_events: pending bits for deferred events 148 * @irq_info: interrupt information 149 * @work: work struct for deferred process 150 * @timer: background timer 151 * @vring: Tx/Rx ring 152 * @spin_lock: Tx/Rx spin lock 153 * @is_ready: ready flag 154 */ 155 struct mlxbf_tmfifo { 156 struct mlxbf_tmfifo_vdev *vdev[MLXBF_TMFIFO_VDEV_MAX]; 157 struct mutex lock; /* TmFifo lock */ 158 void __iomem *rx_base; 159 void __iomem *tx_base; 160 int rx_fifo_size; 161 int tx_fifo_size; 162 unsigned long pend_events; 163 struct mlxbf_tmfifo_irq_info irq_info[MLXBF_TM_MAX_IRQ]; 164 struct work_struct work; 165 struct timer_list timer; 166 struct mlxbf_tmfifo_vring *vring[2]; 167 spinlock_t spin_lock[2]; /* spin lock */ 168 bool is_ready; 169 }; 170 171 /** 172 * mlxbf_tmfifo_msg_hdr - Structure of the TmFifo message header 173 * @type: message type 174 * @len: payload length in network byte order. Messages sent into the FIFO 175 * will be read by the other side as data stream in the same byte order. 176 * The length needs to be encoded into network order so both sides 177 * could understand it. 178 */ 179 struct mlxbf_tmfifo_msg_hdr { 180 u8 type; 181 __be16 len; 182 u8 unused[5]; 183 } __packed __aligned(sizeof(u64)); 184 185 /* 186 * Default MAC. 187 * This MAC address will be read from EFI persistent variable if configured. 188 * It can also be reconfigured with standard Linux tools. 189 */ 190 static u8 mlxbf_tmfifo_net_default_mac[ETH_ALEN] = { 191 0x00, 0x1A, 0xCA, 0xFF, 0xFF, 0x01 192 }; 193 194 /* EFI variable name of the MAC address. */ 195 static efi_char16_t mlxbf_tmfifo_efi_name[] = L"RshimMacAddr"; 196 197 /* Maximum L2 header length. */ 198 #define MLXBF_TMFIFO_NET_L2_OVERHEAD 36 199 200 /* Supported virtio-net features. */ 201 #define MLXBF_TMFIFO_NET_FEATURES \ 202 (BIT_ULL(VIRTIO_NET_F_MTU) | BIT_ULL(VIRTIO_NET_F_STATUS) | \ 203 BIT_ULL(VIRTIO_NET_F_MAC)) 204 205 #define mlxbf_vdev_to_tmfifo(d) container_of(d, struct mlxbf_tmfifo_vdev, vdev) 206 207 /* Free vrings of the FIFO device. */ 208 static void mlxbf_tmfifo_free_vrings(struct mlxbf_tmfifo *fifo, 209 struct mlxbf_tmfifo_vdev *tm_vdev) 210 { 211 struct mlxbf_tmfifo_vring *vring; 212 int i, size; 213 214 for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) { 215 vring = &tm_vdev->vrings[i]; 216 if (vring->va) { 217 size = vring_size(vring->num, vring->align); 218 dma_free_coherent(tm_vdev->vdev.dev.parent, size, 219 vring->va, vring->dma); 220 vring->va = NULL; 221 if (vring->vq) { 222 vring_del_virtqueue(vring->vq); 223 vring->vq = NULL; 224 } 225 } 226 } 227 } 228 229 /* Allocate vrings for the FIFO. */ 230 static int mlxbf_tmfifo_alloc_vrings(struct mlxbf_tmfifo *fifo, 231 struct mlxbf_tmfifo_vdev *tm_vdev) 232 { 233 struct mlxbf_tmfifo_vring *vring; 234 struct device *dev; 235 dma_addr_t dma; 236 int i, size; 237 void *va; 238 239 for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) { 240 vring = &tm_vdev->vrings[i]; 241 vring->fifo = fifo; 242 vring->num = MLXBF_TMFIFO_VRING_SIZE; 243 vring->align = SMP_CACHE_BYTES; 244 vring->index = i; 245 vring->vdev_id = tm_vdev->vdev.id.device; 246 dev = &tm_vdev->vdev.dev; 247 248 size = vring_size(vring->num, vring->align); 249 va = dma_alloc_coherent(dev->parent, size, &dma, GFP_KERNEL); 250 if (!va) { 251 mlxbf_tmfifo_free_vrings(fifo, tm_vdev); 252 dev_err(dev->parent, "dma_alloc_coherent failed\n"); 253 return -ENOMEM; 254 } 255 256 vring->va = va; 257 vring->dma = dma; 258 } 259 260 return 0; 261 } 262 263 /* Disable interrupts of the FIFO device. */ 264 static void mlxbf_tmfifo_disable_irqs(struct mlxbf_tmfifo *fifo) 265 { 266 int i, irq; 267 268 for (i = 0; i < MLXBF_TM_MAX_IRQ; i++) { 269 irq = fifo->irq_info[i].irq; 270 fifo->irq_info[i].irq = 0; 271 disable_irq(irq); 272 } 273 } 274 275 /* Interrupt handler. */ 276 static irqreturn_t mlxbf_tmfifo_irq_handler(int irq, void *arg) 277 { 278 struct mlxbf_tmfifo_irq_info *irq_info = arg; 279 280 if (!test_and_set_bit(irq_info->index, &irq_info->fifo->pend_events)) 281 schedule_work(&irq_info->fifo->work); 282 283 return IRQ_HANDLED; 284 } 285 286 /* Get the next packet descriptor from the vring. */ 287 static struct vring_desc * 288 mlxbf_tmfifo_get_next_desc(struct mlxbf_tmfifo_vring *vring) 289 { 290 const struct vring *vr = virtqueue_get_vring(vring->vq); 291 struct virtio_device *vdev = vring->vq->vdev; 292 unsigned int idx, head; 293 294 if (vring->next_avail == virtio16_to_cpu(vdev, vr->avail->idx)) 295 return NULL; 296 297 idx = vring->next_avail % vr->num; 298 head = virtio16_to_cpu(vdev, vr->avail->ring[idx]); 299 if (WARN_ON(head >= vr->num)) 300 return NULL; 301 302 vring->next_avail++; 303 304 return &vr->desc[head]; 305 } 306 307 /* Release virtio descriptor. */ 308 static void mlxbf_tmfifo_release_desc(struct mlxbf_tmfifo_vring *vring, 309 struct vring_desc *desc, u32 len) 310 { 311 const struct vring *vr = virtqueue_get_vring(vring->vq); 312 struct virtio_device *vdev = vring->vq->vdev; 313 u16 idx, vr_idx; 314 315 vr_idx = virtio16_to_cpu(vdev, vr->used->idx); 316 idx = vr_idx % vr->num; 317 vr->used->ring[idx].id = cpu_to_virtio32(vdev, desc - vr->desc); 318 vr->used->ring[idx].len = cpu_to_virtio32(vdev, len); 319 320 /* 321 * Virtio could poll and check the 'idx' to decide whether the desc is 322 * done or not. Add a memory barrier here to make sure the update above 323 * completes before updating the idx. 324 */ 325 mb(); 326 vr->used->idx = cpu_to_virtio16(vdev, vr_idx + 1); 327 } 328 329 /* Get the total length of the descriptor chain. */ 330 static u32 mlxbf_tmfifo_get_pkt_len(struct mlxbf_tmfifo_vring *vring, 331 struct vring_desc *desc) 332 { 333 const struct vring *vr = virtqueue_get_vring(vring->vq); 334 struct virtio_device *vdev = vring->vq->vdev; 335 u32 len = 0, idx; 336 337 while (desc) { 338 len += virtio32_to_cpu(vdev, desc->len); 339 if (!(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) 340 break; 341 idx = virtio16_to_cpu(vdev, desc->next); 342 desc = &vr->desc[idx]; 343 } 344 345 return len; 346 } 347 348 static void mlxbf_tmfifo_release_pending_pkt(struct mlxbf_tmfifo_vring *vring) 349 { 350 struct vring_desc *desc_head; 351 u32 len = 0; 352 353 if (vring->desc_head) { 354 desc_head = vring->desc_head; 355 len = vring->pkt_len; 356 } else { 357 desc_head = mlxbf_tmfifo_get_next_desc(vring); 358 len = mlxbf_tmfifo_get_pkt_len(vring, desc_head); 359 } 360 361 if (desc_head) 362 mlxbf_tmfifo_release_desc(vring, desc_head, len); 363 364 vring->pkt_len = 0; 365 vring->desc = NULL; 366 vring->desc_head = NULL; 367 } 368 369 static void mlxbf_tmfifo_init_net_desc(struct mlxbf_tmfifo_vring *vring, 370 struct vring_desc *desc, bool is_rx) 371 { 372 struct virtio_device *vdev = vring->vq->vdev; 373 struct virtio_net_hdr *net_hdr; 374 375 net_hdr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr)); 376 memset(net_hdr, 0, sizeof(*net_hdr)); 377 } 378 379 /* Get and initialize the next packet. */ 380 static struct vring_desc * 381 mlxbf_tmfifo_get_next_pkt(struct mlxbf_tmfifo_vring *vring, bool is_rx) 382 { 383 struct vring_desc *desc; 384 385 desc = mlxbf_tmfifo_get_next_desc(vring); 386 if (desc && is_rx && vring->vdev_id == VIRTIO_ID_NET) 387 mlxbf_tmfifo_init_net_desc(vring, desc, is_rx); 388 389 vring->desc_head = desc; 390 vring->desc = desc; 391 392 return desc; 393 } 394 395 /* House-keeping timer. */ 396 static void mlxbf_tmfifo_timer(struct timer_list *t) 397 { 398 struct mlxbf_tmfifo *fifo = container_of(t, struct mlxbf_tmfifo, timer); 399 int rx, tx; 400 401 rx = !test_and_set_bit(MLXBF_TM_RX_HWM_IRQ, &fifo->pend_events); 402 tx = !test_and_set_bit(MLXBF_TM_TX_LWM_IRQ, &fifo->pend_events); 403 404 if (rx || tx) 405 schedule_work(&fifo->work); 406 407 mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL); 408 } 409 410 /* Copy one console packet into the output buffer. */ 411 static void mlxbf_tmfifo_console_output_one(struct mlxbf_tmfifo_vdev *cons, 412 struct mlxbf_tmfifo_vring *vring, 413 struct vring_desc *desc) 414 { 415 const struct vring *vr = virtqueue_get_vring(vring->vq); 416 struct virtio_device *vdev = &cons->vdev; 417 u32 len, idx, seg; 418 void *addr; 419 420 while (desc) { 421 addr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr)); 422 len = virtio32_to_cpu(vdev, desc->len); 423 424 seg = CIRC_SPACE_TO_END(cons->tx_buf.head, cons->tx_buf.tail, 425 MLXBF_TMFIFO_CON_TX_BUF_SIZE); 426 if (len <= seg) { 427 memcpy(cons->tx_buf.buf + cons->tx_buf.head, addr, len); 428 } else { 429 memcpy(cons->tx_buf.buf + cons->tx_buf.head, addr, seg); 430 addr += seg; 431 memcpy(cons->tx_buf.buf, addr, len - seg); 432 } 433 cons->tx_buf.head = (cons->tx_buf.head + len) % 434 MLXBF_TMFIFO_CON_TX_BUF_SIZE; 435 436 if (!(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) 437 break; 438 idx = virtio16_to_cpu(vdev, desc->next); 439 desc = &vr->desc[idx]; 440 } 441 } 442 443 /* Copy console data into the output buffer. */ 444 static void mlxbf_tmfifo_console_output(struct mlxbf_tmfifo_vdev *cons, 445 struct mlxbf_tmfifo_vring *vring) 446 { 447 struct vring_desc *desc; 448 u32 len, avail; 449 450 desc = mlxbf_tmfifo_get_next_desc(vring); 451 while (desc) { 452 /* Release the packet if not enough space. */ 453 len = mlxbf_tmfifo_get_pkt_len(vring, desc); 454 avail = CIRC_SPACE(cons->tx_buf.head, cons->tx_buf.tail, 455 MLXBF_TMFIFO_CON_TX_BUF_SIZE); 456 if (len + MLXBF_TMFIFO_CON_TX_BUF_RSV_SIZE > avail) { 457 mlxbf_tmfifo_release_desc(vring, desc, len); 458 break; 459 } 460 461 mlxbf_tmfifo_console_output_one(cons, vring, desc); 462 mlxbf_tmfifo_release_desc(vring, desc, len); 463 desc = mlxbf_tmfifo_get_next_desc(vring); 464 } 465 } 466 467 /* Get the number of available words in Rx FIFO for receiving. */ 468 static int mlxbf_tmfifo_get_rx_avail(struct mlxbf_tmfifo *fifo) 469 { 470 u64 sts; 471 472 sts = readq(fifo->rx_base + MLXBF_TMFIFO_RX_STS); 473 return FIELD_GET(MLXBF_TMFIFO_RX_STS__COUNT_MASK, sts); 474 } 475 476 /* Get the number of available words in the TmFifo for sending. */ 477 static int mlxbf_tmfifo_get_tx_avail(struct mlxbf_tmfifo *fifo, int vdev_id) 478 { 479 int tx_reserve; 480 u32 count; 481 u64 sts; 482 483 /* Reserve some room in FIFO for console messages. */ 484 if (vdev_id == VIRTIO_ID_NET) 485 tx_reserve = fifo->tx_fifo_size / MLXBF_TMFIFO_RESERVE_RATIO; 486 else 487 tx_reserve = 1; 488 489 sts = readq(fifo->tx_base + MLXBF_TMFIFO_TX_STS); 490 count = FIELD_GET(MLXBF_TMFIFO_TX_STS__COUNT_MASK, sts); 491 return fifo->tx_fifo_size - tx_reserve - count; 492 } 493 494 /* Console Tx (move data from the output buffer into the TmFifo). */ 495 static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail) 496 { 497 struct mlxbf_tmfifo_msg_hdr hdr; 498 struct mlxbf_tmfifo_vdev *cons; 499 unsigned long flags; 500 int size, seg; 501 void *addr; 502 u64 data; 503 504 /* Return if not enough space available. */ 505 if (avail < MLXBF_TMFIFO_DATA_MIN_WORDS) 506 return; 507 508 cons = fifo->vdev[VIRTIO_ID_CONSOLE]; 509 if (!cons || !cons->tx_buf.buf) 510 return; 511 512 /* Return if no data to send. */ 513 size = CIRC_CNT(cons->tx_buf.head, cons->tx_buf.tail, 514 MLXBF_TMFIFO_CON_TX_BUF_SIZE); 515 if (size == 0) 516 return; 517 518 /* Adjust the size to available space. */ 519 if (size + sizeof(hdr) > avail * sizeof(u64)) 520 size = avail * sizeof(u64) - sizeof(hdr); 521 522 /* Write header. */ 523 hdr.type = VIRTIO_ID_CONSOLE; 524 hdr.len = htons(size); 525 writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA); 526 527 /* Use spin-lock to protect the 'cons->tx_buf'. */ 528 spin_lock_irqsave(&fifo->spin_lock[0], flags); 529 530 while (size > 0) { 531 addr = cons->tx_buf.buf + cons->tx_buf.tail; 532 533 seg = CIRC_CNT_TO_END(cons->tx_buf.head, cons->tx_buf.tail, 534 MLXBF_TMFIFO_CON_TX_BUF_SIZE); 535 if (seg >= sizeof(u64)) { 536 memcpy(&data, addr, sizeof(u64)); 537 } else { 538 memcpy(&data, addr, seg); 539 memcpy((u8 *)&data + seg, cons->tx_buf.buf, 540 sizeof(u64) - seg); 541 } 542 writeq(data, fifo->tx_base + MLXBF_TMFIFO_TX_DATA); 543 544 if (size >= sizeof(u64)) { 545 cons->tx_buf.tail = (cons->tx_buf.tail + sizeof(u64)) % 546 MLXBF_TMFIFO_CON_TX_BUF_SIZE; 547 size -= sizeof(u64); 548 } else { 549 cons->tx_buf.tail = (cons->tx_buf.tail + size) % 550 MLXBF_TMFIFO_CON_TX_BUF_SIZE; 551 size = 0; 552 } 553 } 554 555 spin_unlock_irqrestore(&fifo->spin_lock[0], flags); 556 } 557 558 /* Rx/Tx one word in the descriptor buffer. */ 559 static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring, 560 struct vring_desc *desc, 561 bool is_rx, int len) 562 { 563 struct virtio_device *vdev = vring->vq->vdev; 564 struct mlxbf_tmfifo *fifo = vring->fifo; 565 void *addr; 566 u64 data; 567 568 /* Get the buffer address of this desc. */ 569 addr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr)); 570 571 /* Read a word from FIFO for Rx. */ 572 if (is_rx) 573 data = readq(fifo->rx_base + MLXBF_TMFIFO_RX_DATA); 574 575 if (vring->cur_len + sizeof(u64) <= len) { 576 /* The whole word. */ 577 if (is_rx) 578 memcpy(addr + vring->cur_len, &data, sizeof(u64)); 579 else 580 memcpy(&data, addr + vring->cur_len, sizeof(u64)); 581 vring->cur_len += sizeof(u64); 582 } else { 583 /* Leftover bytes. */ 584 if (is_rx) 585 memcpy(addr + vring->cur_len, &data, 586 len - vring->cur_len); 587 else 588 memcpy(&data, addr + vring->cur_len, 589 len - vring->cur_len); 590 vring->cur_len = len; 591 } 592 593 /* Write the word into FIFO for Tx. */ 594 if (!is_rx) 595 writeq(data, fifo->tx_base + MLXBF_TMFIFO_TX_DATA); 596 } 597 598 /* 599 * Rx/Tx packet header. 600 * 601 * In Rx case, the packet might be found to belong to a different vring since 602 * the TmFifo is shared by different services. In such case, the 'vring_change' 603 * flag is set. 604 */ 605 static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring, 606 struct vring_desc *desc, 607 bool is_rx, bool *vring_change) 608 { 609 struct mlxbf_tmfifo *fifo = vring->fifo; 610 struct virtio_net_config *config; 611 struct mlxbf_tmfifo_msg_hdr hdr; 612 int vdev_id, hdr_len; 613 614 /* Read/Write packet header. */ 615 if (is_rx) { 616 /* Drain one word from the FIFO. */ 617 *(u64 *)&hdr = readq(fifo->rx_base + MLXBF_TMFIFO_RX_DATA); 618 619 /* Skip the length 0 packets (keepalive). */ 620 if (hdr.len == 0) 621 return; 622 623 /* Check packet type. */ 624 if (hdr.type == VIRTIO_ID_NET) { 625 vdev_id = VIRTIO_ID_NET; 626 hdr_len = sizeof(struct virtio_net_hdr); 627 config = &fifo->vdev[vdev_id]->config.net; 628 /* A legacy-only interface for now. */ 629 if (ntohs(hdr.len) > 630 __virtio16_to_cpu(virtio_legacy_is_little_endian(), 631 config->mtu) + 632 MLXBF_TMFIFO_NET_L2_OVERHEAD) 633 return; 634 } else { 635 vdev_id = VIRTIO_ID_CONSOLE; 636 hdr_len = 0; 637 } 638 639 /* 640 * Check whether the new packet still belongs to this vring. 641 * If not, update the pkt_len of the new vring. 642 */ 643 if (vdev_id != vring->vdev_id) { 644 struct mlxbf_tmfifo_vdev *tm_dev2 = fifo->vdev[vdev_id]; 645 646 if (!tm_dev2) 647 return; 648 vring->desc = desc; 649 vring = &tm_dev2->vrings[MLXBF_TMFIFO_VRING_RX]; 650 *vring_change = true; 651 } 652 vring->pkt_len = ntohs(hdr.len) + hdr_len; 653 } else { 654 /* Network virtio has an extra header. */ 655 hdr_len = (vring->vdev_id == VIRTIO_ID_NET) ? 656 sizeof(struct virtio_net_hdr) : 0; 657 vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, desc); 658 hdr.type = (vring->vdev_id == VIRTIO_ID_NET) ? 659 VIRTIO_ID_NET : VIRTIO_ID_CONSOLE; 660 hdr.len = htons(vring->pkt_len - hdr_len); 661 writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA); 662 } 663 664 vring->cur_len = hdr_len; 665 vring->rem_len = vring->pkt_len; 666 fifo->vring[is_rx] = vring; 667 } 668 669 /* 670 * Rx/Tx one descriptor. 671 * 672 * Return true to indicate more data available. 673 */ 674 static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring, 675 bool is_rx, int *avail) 676 { 677 const struct vring *vr = virtqueue_get_vring(vring->vq); 678 struct mlxbf_tmfifo *fifo = vring->fifo; 679 struct virtio_device *vdev; 680 bool vring_change = false; 681 struct vring_desc *desc; 682 unsigned long flags; 683 u32 len, idx; 684 685 vdev = &fifo->vdev[vring->vdev_id]->vdev; 686 687 /* Get the descriptor of the next packet. */ 688 if (!vring->desc) { 689 desc = mlxbf_tmfifo_get_next_pkt(vring, is_rx); 690 if (!desc) 691 return false; 692 } else { 693 desc = vring->desc; 694 } 695 696 /* Beginning of a packet. Start to Rx/Tx packet header. */ 697 if (vring->pkt_len == 0) { 698 mlxbf_tmfifo_rxtx_header(vring, desc, is_rx, &vring_change); 699 (*avail)--; 700 701 /* Return if new packet is for another ring. */ 702 if (vring_change) 703 return false; 704 goto mlxbf_tmfifo_desc_done; 705 } 706 707 /* Get the length of this desc. */ 708 len = virtio32_to_cpu(vdev, desc->len); 709 if (len > vring->rem_len) 710 len = vring->rem_len; 711 712 /* Rx/Tx one word (8 bytes) if not done. */ 713 if (vring->cur_len < len) { 714 mlxbf_tmfifo_rxtx_word(vring, desc, is_rx, len); 715 (*avail)--; 716 } 717 718 /* Check again whether it's done. */ 719 if (vring->cur_len == len) { 720 vring->cur_len = 0; 721 vring->rem_len -= len; 722 723 /* Get the next desc on the chain. */ 724 if (vring->rem_len > 0 && 725 (virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) { 726 idx = virtio16_to_cpu(vdev, desc->next); 727 desc = &vr->desc[idx]; 728 goto mlxbf_tmfifo_desc_done; 729 } 730 731 /* Done and release the pending packet. */ 732 mlxbf_tmfifo_release_pending_pkt(vring); 733 desc = NULL; 734 fifo->vring[is_rx] = NULL; 735 736 /* Notify upper layer that packet is done. */ 737 spin_lock_irqsave(&fifo->spin_lock[is_rx], flags); 738 vring_interrupt(0, vring->vq); 739 spin_unlock_irqrestore(&fifo->spin_lock[is_rx], flags); 740 } 741 742 mlxbf_tmfifo_desc_done: 743 /* Save the current desc. */ 744 vring->desc = desc; 745 746 return true; 747 } 748 749 /* Rx & Tx processing of a queue. */ 750 static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx) 751 { 752 int avail = 0, devid = vring->vdev_id; 753 struct mlxbf_tmfifo *fifo; 754 bool more; 755 756 fifo = vring->fifo; 757 758 /* Return if vdev is not ready. */ 759 if (!fifo->vdev[devid]) 760 return; 761 762 /* Return if another vring is running. */ 763 if (fifo->vring[is_rx] && fifo->vring[is_rx] != vring) 764 return; 765 766 /* Only handle console and network for now. */ 767 if (WARN_ON(devid != VIRTIO_ID_NET && devid != VIRTIO_ID_CONSOLE)) 768 return; 769 770 do { 771 /* Get available FIFO space. */ 772 if (avail == 0) { 773 if (is_rx) 774 avail = mlxbf_tmfifo_get_rx_avail(fifo); 775 else 776 avail = mlxbf_tmfifo_get_tx_avail(fifo, devid); 777 if (avail <= 0) 778 break; 779 } 780 781 /* Console output always comes from the Tx buffer. */ 782 if (!is_rx && devid == VIRTIO_ID_CONSOLE) { 783 mlxbf_tmfifo_console_tx(fifo, avail); 784 break; 785 } 786 787 /* Handle one descriptor. */ 788 more = mlxbf_tmfifo_rxtx_one_desc(vring, is_rx, &avail); 789 } while (more); 790 } 791 792 /* Handle Rx or Tx queues. */ 793 static void mlxbf_tmfifo_work_rxtx(struct mlxbf_tmfifo *fifo, int queue_id, 794 int irq_id, bool is_rx) 795 { 796 struct mlxbf_tmfifo_vdev *tm_vdev; 797 struct mlxbf_tmfifo_vring *vring; 798 int i; 799 800 if (!test_and_clear_bit(irq_id, &fifo->pend_events) || 801 !fifo->irq_info[irq_id].irq) 802 return; 803 804 for (i = 0; i < MLXBF_TMFIFO_VDEV_MAX; i++) { 805 tm_vdev = fifo->vdev[i]; 806 if (tm_vdev) { 807 vring = &tm_vdev->vrings[queue_id]; 808 if (vring->vq) 809 mlxbf_tmfifo_rxtx(vring, is_rx); 810 } 811 } 812 } 813 814 /* Work handler for Rx and Tx case. */ 815 static void mlxbf_tmfifo_work_handler(struct work_struct *work) 816 { 817 struct mlxbf_tmfifo *fifo; 818 819 fifo = container_of(work, struct mlxbf_tmfifo, work); 820 if (!fifo->is_ready) 821 return; 822 823 mutex_lock(&fifo->lock); 824 825 /* Tx (Send data to the TmFifo). */ 826 mlxbf_tmfifo_work_rxtx(fifo, MLXBF_TMFIFO_VRING_TX, 827 MLXBF_TM_TX_LWM_IRQ, false); 828 829 /* Rx (Receive data from the TmFifo). */ 830 mlxbf_tmfifo_work_rxtx(fifo, MLXBF_TMFIFO_VRING_RX, 831 MLXBF_TM_RX_HWM_IRQ, true); 832 833 mutex_unlock(&fifo->lock); 834 } 835 836 /* The notify function is called when new buffers are posted. */ 837 static bool mlxbf_tmfifo_virtio_notify(struct virtqueue *vq) 838 { 839 struct mlxbf_tmfifo_vring *vring = vq->priv; 840 struct mlxbf_tmfifo_vdev *tm_vdev; 841 struct mlxbf_tmfifo *fifo; 842 unsigned long flags; 843 844 fifo = vring->fifo; 845 846 /* 847 * Virtio maintains vrings in pairs, even number ring for Rx 848 * and odd number ring for Tx. 849 */ 850 if (vring->index & BIT(0)) { 851 /* 852 * Console could make blocking call with interrupts disabled. 853 * In such case, the vring needs to be served right away. For 854 * other cases, just set the TX LWM bit to start Tx in the 855 * worker handler. 856 */ 857 if (vring->vdev_id == VIRTIO_ID_CONSOLE) { 858 spin_lock_irqsave(&fifo->spin_lock[0], flags); 859 tm_vdev = fifo->vdev[VIRTIO_ID_CONSOLE]; 860 mlxbf_tmfifo_console_output(tm_vdev, vring); 861 spin_unlock_irqrestore(&fifo->spin_lock[0], flags); 862 } else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ, 863 &fifo->pend_events)) { 864 return true; 865 } 866 } else { 867 if (test_and_set_bit(MLXBF_TM_RX_HWM_IRQ, &fifo->pend_events)) 868 return true; 869 } 870 871 schedule_work(&fifo->work); 872 873 return true; 874 } 875 876 /* Get the array of feature bits for this device. */ 877 static u64 mlxbf_tmfifo_virtio_get_features(struct virtio_device *vdev) 878 { 879 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 880 881 return tm_vdev->features; 882 } 883 884 /* Confirm device features to use. */ 885 static int mlxbf_tmfifo_virtio_finalize_features(struct virtio_device *vdev) 886 { 887 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 888 889 tm_vdev->features = vdev->features; 890 891 return 0; 892 } 893 894 /* Free virtqueues found by find_vqs(). */ 895 static void mlxbf_tmfifo_virtio_del_vqs(struct virtio_device *vdev) 896 { 897 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 898 struct mlxbf_tmfifo_vring *vring; 899 struct virtqueue *vq; 900 int i; 901 902 for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) { 903 vring = &tm_vdev->vrings[i]; 904 905 /* Release the pending packet. */ 906 if (vring->desc) 907 mlxbf_tmfifo_release_pending_pkt(vring); 908 vq = vring->vq; 909 if (vq) { 910 vring->vq = NULL; 911 vring_del_virtqueue(vq); 912 } 913 } 914 } 915 916 /* Create and initialize the virtual queues. */ 917 static int mlxbf_tmfifo_virtio_find_vqs(struct virtio_device *vdev, 918 unsigned int nvqs, 919 struct virtqueue *vqs[], 920 vq_callback_t *callbacks[], 921 const char * const names[], 922 const bool *ctx, 923 struct irq_affinity *desc) 924 { 925 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 926 struct mlxbf_tmfifo_vring *vring; 927 struct virtqueue *vq; 928 int i, ret, size; 929 930 if (nvqs > ARRAY_SIZE(tm_vdev->vrings)) 931 return -EINVAL; 932 933 for (i = 0; i < nvqs; ++i) { 934 if (!names[i]) { 935 ret = -EINVAL; 936 goto error; 937 } 938 vring = &tm_vdev->vrings[i]; 939 940 /* zero vring */ 941 size = vring_size(vring->num, vring->align); 942 memset(vring->va, 0, size); 943 vq = vring_new_virtqueue(i, vring->num, vring->align, vdev, 944 false, false, vring->va, 945 mlxbf_tmfifo_virtio_notify, 946 callbacks[i], names[i]); 947 if (!vq) { 948 dev_err(&vdev->dev, "vring_new_virtqueue failed\n"); 949 ret = -ENOMEM; 950 goto error; 951 } 952 953 vqs[i] = vq; 954 vring->vq = vq; 955 vq->priv = vring; 956 } 957 958 return 0; 959 960 error: 961 mlxbf_tmfifo_virtio_del_vqs(vdev); 962 return ret; 963 } 964 965 /* Read the status byte. */ 966 static u8 mlxbf_tmfifo_virtio_get_status(struct virtio_device *vdev) 967 { 968 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 969 970 return tm_vdev->status; 971 } 972 973 /* Write the status byte. */ 974 static void mlxbf_tmfifo_virtio_set_status(struct virtio_device *vdev, 975 u8 status) 976 { 977 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 978 979 tm_vdev->status = status; 980 } 981 982 /* Reset the device. Not much here for now. */ 983 static void mlxbf_tmfifo_virtio_reset(struct virtio_device *vdev) 984 { 985 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 986 987 tm_vdev->status = 0; 988 } 989 990 /* Read the value of a configuration field. */ 991 static void mlxbf_tmfifo_virtio_get(struct virtio_device *vdev, 992 unsigned int offset, 993 void *buf, 994 unsigned int len) 995 { 996 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 997 998 if ((u64)offset + len > sizeof(tm_vdev->config)) 999 return; 1000 1001 memcpy(buf, (u8 *)&tm_vdev->config + offset, len); 1002 } 1003 1004 /* Write the value of a configuration field. */ 1005 static void mlxbf_tmfifo_virtio_set(struct virtio_device *vdev, 1006 unsigned int offset, 1007 const void *buf, 1008 unsigned int len) 1009 { 1010 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1011 1012 if ((u64)offset + len > sizeof(tm_vdev->config)) 1013 return; 1014 1015 memcpy((u8 *)&tm_vdev->config + offset, buf, len); 1016 } 1017 1018 static void tmfifo_virtio_dev_release(struct device *device) 1019 { 1020 struct virtio_device *vdev = 1021 container_of(device, struct virtio_device, dev); 1022 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1023 1024 kfree(tm_vdev); 1025 } 1026 1027 /* Virtio config operations. */ 1028 static const struct virtio_config_ops mlxbf_tmfifo_virtio_config_ops = { 1029 .get_features = mlxbf_tmfifo_virtio_get_features, 1030 .finalize_features = mlxbf_tmfifo_virtio_finalize_features, 1031 .find_vqs = mlxbf_tmfifo_virtio_find_vqs, 1032 .del_vqs = mlxbf_tmfifo_virtio_del_vqs, 1033 .reset = mlxbf_tmfifo_virtio_reset, 1034 .set_status = mlxbf_tmfifo_virtio_set_status, 1035 .get_status = mlxbf_tmfifo_virtio_get_status, 1036 .get = mlxbf_tmfifo_virtio_get, 1037 .set = mlxbf_tmfifo_virtio_set, 1038 }; 1039 1040 /* Create vdev for the FIFO. */ 1041 static int mlxbf_tmfifo_create_vdev(struct device *dev, 1042 struct mlxbf_tmfifo *fifo, 1043 int vdev_id, u64 features, 1044 void *config, u32 size) 1045 { 1046 struct mlxbf_tmfifo_vdev *tm_vdev, *reg_dev = NULL; 1047 int ret; 1048 1049 mutex_lock(&fifo->lock); 1050 1051 tm_vdev = fifo->vdev[vdev_id]; 1052 if (tm_vdev) { 1053 dev_err(dev, "vdev %d already exists\n", vdev_id); 1054 ret = -EEXIST; 1055 goto fail; 1056 } 1057 1058 tm_vdev = kzalloc(sizeof(*tm_vdev), GFP_KERNEL); 1059 if (!tm_vdev) { 1060 ret = -ENOMEM; 1061 goto fail; 1062 } 1063 1064 tm_vdev->vdev.id.device = vdev_id; 1065 tm_vdev->vdev.config = &mlxbf_tmfifo_virtio_config_ops; 1066 tm_vdev->vdev.dev.parent = dev; 1067 tm_vdev->vdev.dev.release = tmfifo_virtio_dev_release; 1068 tm_vdev->features = features; 1069 if (config) 1070 memcpy(&tm_vdev->config, config, size); 1071 1072 if (mlxbf_tmfifo_alloc_vrings(fifo, tm_vdev)) { 1073 dev_err(dev, "unable to allocate vring\n"); 1074 ret = -ENOMEM; 1075 goto vdev_fail; 1076 } 1077 1078 /* Allocate an output buffer for the console device. */ 1079 if (vdev_id == VIRTIO_ID_CONSOLE) 1080 tm_vdev->tx_buf.buf = devm_kmalloc(dev, 1081 MLXBF_TMFIFO_CON_TX_BUF_SIZE, 1082 GFP_KERNEL); 1083 fifo->vdev[vdev_id] = tm_vdev; 1084 1085 /* Register the virtio device. */ 1086 ret = register_virtio_device(&tm_vdev->vdev); 1087 reg_dev = tm_vdev; 1088 if (ret) { 1089 dev_err(dev, "register_virtio_device failed\n"); 1090 goto vdev_fail; 1091 } 1092 1093 mutex_unlock(&fifo->lock); 1094 return 0; 1095 1096 vdev_fail: 1097 mlxbf_tmfifo_free_vrings(fifo, tm_vdev); 1098 fifo->vdev[vdev_id] = NULL; 1099 if (reg_dev) 1100 put_device(&tm_vdev->vdev.dev); 1101 else 1102 kfree(tm_vdev); 1103 fail: 1104 mutex_unlock(&fifo->lock); 1105 return ret; 1106 } 1107 1108 /* Delete vdev for the FIFO. */ 1109 static int mlxbf_tmfifo_delete_vdev(struct mlxbf_tmfifo *fifo, int vdev_id) 1110 { 1111 struct mlxbf_tmfifo_vdev *tm_vdev; 1112 1113 mutex_lock(&fifo->lock); 1114 1115 /* Unregister vdev. */ 1116 tm_vdev = fifo->vdev[vdev_id]; 1117 if (tm_vdev) { 1118 unregister_virtio_device(&tm_vdev->vdev); 1119 mlxbf_tmfifo_free_vrings(fifo, tm_vdev); 1120 fifo->vdev[vdev_id] = NULL; 1121 } 1122 1123 mutex_unlock(&fifo->lock); 1124 1125 return 0; 1126 } 1127 1128 /* Read the configured network MAC address from efi variable. */ 1129 static void mlxbf_tmfifo_get_cfg_mac(u8 *mac) 1130 { 1131 efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID; 1132 unsigned long size = ETH_ALEN; 1133 u8 buf[ETH_ALEN]; 1134 efi_status_t rc; 1135 1136 rc = efi.get_variable(mlxbf_tmfifo_efi_name, &guid, NULL, &size, buf); 1137 if (rc == EFI_SUCCESS && size == ETH_ALEN) 1138 ether_addr_copy(mac, buf); 1139 else 1140 ether_addr_copy(mac, mlxbf_tmfifo_net_default_mac); 1141 } 1142 1143 /* Set TmFifo thresolds which is used to trigger interrupts. */ 1144 static void mlxbf_tmfifo_set_threshold(struct mlxbf_tmfifo *fifo) 1145 { 1146 u64 ctl; 1147 1148 /* Get Tx FIFO size and set the low/high watermark. */ 1149 ctl = readq(fifo->tx_base + MLXBF_TMFIFO_TX_CTL); 1150 fifo->tx_fifo_size = 1151 FIELD_GET(MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_MASK, ctl); 1152 ctl = (ctl & ~MLXBF_TMFIFO_TX_CTL__LWM_MASK) | 1153 FIELD_PREP(MLXBF_TMFIFO_TX_CTL__LWM_MASK, 1154 fifo->tx_fifo_size / 2); 1155 ctl = (ctl & ~MLXBF_TMFIFO_TX_CTL__HWM_MASK) | 1156 FIELD_PREP(MLXBF_TMFIFO_TX_CTL__HWM_MASK, 1157 fifo->tx_fifo_size - 1); 1158 writeq(ctl, fifo->tx_base + MLXBF_TMFIFO_TX_CTL); 1159 1160 /* Get Rx FIFO size and set the low/high watermark. */ 1161 ctl = readq(fifo->rx_base + MLXBF_TMFIFO_RX_CTL); 1162 fifo->rx_fifo_size = 1163 FIELD_GET(MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_MASK, ctl); 1164 ctl = (ctl & ~MLXBF_TMFIFO_RX_CTL__LWM_MASK) | 1165 FIELD_PREP(MLXBF_TMFIFO_RX_CTL__LWM_MASK, 0); 1166 ctl = (ctl & ~MLXBF_TMFIFO_RX_CTL__HWM_MASK) | 1167 FIELD_PREP(MLXBF_TMFIFO_RX_CTL__HWM_MASK, 1); 1168 writeq(ctl, fifo->rx_base + MLXBF_TMFIFO_RX_CTL); 1169 } 1170 1171 static void mlxbf_tmfifo_cleanup(struct mlxbf_tmfifo *fifo) 1172 { 1173 int i; 1174 1175 fifo->is_ready = false; 1176 del_timer_sync(&fifo->timer); 1177 mlxbf_tmfifo_disable_irqs(fifo); 1178 cancel_work_sync(&fifo->work); 1179 for (i = 0; i < MLXBF_TMFIFO_VDEV_MAX; i++) 1180 mlxbf_tmfifo_delete_vdev(fifo, i); 1181 } 1182 1183 /* Probe the TMFIFO. */ 1184 static int mlxbf_tmfifo_probe(struct platform_device *pdev) 1185 { 1186 struct virtio_net_config net_config; 1187 struct device *dev = &pdev->dev; 1188 struct mlxbf_tmfifo *fifo; 1189 int i, rc; 1190 1191 fifo = devm_kzalloc(dev, sizeof(*fifo), GFP_KERNEL); 1192 if (!fifo) 1193 return -ENOMEM; 1194 1195 spin_lock_init(&fifo->spin_lock[0]); 1196 spin_lock_init(&fifo->spin_lock[1]); 1197 INIT_WORK(&fifo->work, mlxbf_tmfifo_work_handler); 1198 mutex_init(&fifo->lock); 1199 1200 /* Get the resource of the Rx FIFO. */ 1201 fifo->rx_base = devm_platform_ioremap_resource(pdev, 0); 1202 if (IS_ERR(fifo->rx_base)) 1203 return PTR_ERR(fifo->rx_base); 1204 1205 /* Get the resource of the Tx FIFO. */ 1206 fifo->tx_base = devm_platform_ioremap_resource(pdev, 1); 1207 if (IS_ERR(fifo->tx_base)) 1208 return PTR_ERR(fifo->tx_base); 1209 1210 platform_set_drvdata(pdev, fifo); 1211 1212 timer_setup(&fifo->timer, mlxbf_tmfifo_timer, 0); 1213 1214 for (i = 0; i < MLXBF_TM_MAX_IRQ; i++) { 1215 fifo->irq_info[i].index = i; 1216 fifo->irq_info[i].fifo = fifo; 1217 fifo->irq_info[i].irq = platform_get_irq(pdev, i); 1218 rc = devm_request_irq(dev, fifo->irq_info[i].irq, 1219 mlxbf_tmfifo_irq_handler, 0, 1220 "tmfifo", &fifo->irq_info[i]); 1221 if (rc) { 1222 dev_err(dev, "devm_request_irq failed\n"); 1223 fifo->irq_info[i].irq = 0; 1224 return rc; 1225 } 1226 } 1227 1228 mlxbf_tmfifo_set_threshold(fifo); 1229 1230 /* Create the console vdev. */ 1231 rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_CONSOLE, 0, NULL, 0); 1232 if (rc) 1233 goto fail; 1234 1235 /* Create the network vdev. */ 1236 memset(&net_config, 0, sizeof(net_config)); 1237 1238 /* A legacy-only interface for now. */ 1239 net_config.mtu = __cpu_to_virtio16(virtio_legacy_is_little_endian(), 1240 ETH_DATA_LEN); 1241 net_config.status = __cpu_to_virtio16(virtio_legacy_is_little_endian(), 1242 VIRTIO_NET_S_LINK_UP); 1243 mlxbf_tmfifo_get_cfg_mac(net_config.mac); 1244 rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_NET, 1245 MLXBF_TMFIFO_NET_FEATURES, &net_config, 1246 sizeof(net_config)); 1247 if (rc) 1248 goto fail; 1249 1250 mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL); 1251 1252 fifo->is_ready = true; 1253 return 0; 1254 1255 fail: 1256 mlxbf_tmfifo_cleanup(fifo); 1257 return rc; 1258 } 1259 1260 /* Device remove function. */ 1261 static int mlxbf_tmfifo_remove(struct platform_device *pdev) 1262 { 1263 struct mlxbf_tmfifo *fifo = platform_get_drvdata(pdev); 1264 1265 mlxbf_tmfifo_cleanup(fifo); 1266 1267 return 0; 1268 } 1269 1270 static const struct acpi_device_id mlxbf_tmfifo_acpi_match[] = { 1271 { "MLNXBF01", 0 }, 1272 {} 1273 }; 1274 MODULE_DEVICE_TABLE(acpi, mlxbf_tmfifo_acpi_match); 1275 1276 static struct platform_driver mlxbf_tmfifo_driver = { 1277 .probe = mlxbf_tmfifo_probe, 1278 .remove = mlxbf_tmfifo_remove, 1279 .driver = { 1280 .name = "bf-tmfifo", 1281 .acpi_match_table = mlxbf_tmfifo_acpi_match, 1282 }, 1283 }; 1284 1285 module_platform_driver(mlxbf_tmfifo_driver); 1286 1287 MODULE_DESCRIPTION("Mellanox BlueField SoC TmFifo Driver"); 1288 MODULE_LICENSE("GPL v2"); 1289 MODULE_AUTHOR("Mellanox Technologies"); 1290