1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Mellanox BlueField SoC TmFifo driver 4 * 5 * Copyright (C) 2019 Mellanox Technologies 6 */ 7 8 #include <linux/acpi.h> 9 #include <linux/bitfield.h> 10 #include <linux/circ_buf.h> 11 #include <linux/efi.h> 12 #include <linux/irq.h> 13 #include <linux/module.h> 14 #include <linux/mutex.h> 15 #include <linux/platform_device.h> 16 #include <linux/types.h> 17 18 #include <linux/virtio_config.h> 19 #include <linux/virtio_console.h> 20 #include <linux/virtio_ids.h> 21 #include <linux/virtio_net.h> 22 #include <linux/virtio_ring.h> 23 24 #include "mlxbf-tmfifo-regs.h" 25 26 /* Vring size. */ 27 #define MLXBF_TMFIFO_VRING_SIZE SZ_1K 28 29 /* Console Tx buffer size. */ 30 #define MLXBF_TMFIFO_CON_TX_BUF_SIZE SZ_32K 31 32 /* Console Tx buffer reserved space. */ 33 #define MLXBF_TMFIFO_CON_TX_BUF_RSV_SIZE 8 34 35 /* House-keeping timer interval. */ 36 #define MLXBF_TMFIFO_TIMER_INTERVAL (HZ / 10) 37 38 /* Virtual devices sharing the TM FIFO. */ 39 #define MLXBF_TMFIFO_VDEV_MAX (VIRTIO_ID_CONSOLE + 1) 40 41 /* 42 * Reserve 1/16 of TmFifo space, so console messages are not starved by 43 * the networking traffic. 44 */ 45 #define MLXBF_TMFIFO_RESERVE_RATIO 16 46 47 /* Message with data needs at least two words (for header & data). */ 48 #define MLXBF_TMFIFO_DATA_MIN_WORDS 2 49 50 /* ACPI UID for BlueField-3. */ 51 #define TMFIFO_BF3_UID 1 52 53 struct mlxbf_tmfifo; 54 55 /** 56 * struct mlxbf_tmfifo_vring - Structure of the TmFifo virtual ring 57 * @va: virtual address of the ring 58 * @dma: dma address of the ring 59 * @vq: pointer to the virtio virtqueue 60 * @desc: current descriptor of the pending packet 61 * @desc_head: head descriptor of the pending packet 62 * @drop_desc: dummy desc for packet dropping 63 * @cur_len: processed length of the current descriptor 64 * @rem_len: remaining length of the pending packet 65 * @pkt_len: total length of the pending packet 66 * @next_avail: next avail descriptor id 67 * @num: vring size (number of descriptors) 68 * @align: vring alignment size 69 * @index: vring index 70 * @vdev_id: vring virtio id (VIRTIO_ID_xxx) 71 * @fifo: pointer to the tmfifo structure 72 */ 73 struct mlxbf_tmfifo_vring { 74 void *va; 75 dma_addr_t dma; 76 struct virtqueue *vq; 77 struct vring_desc *desc; 78 struct vring_desc *desc_head; 79 struct vring_desc drop_desc; 80 int cur_len; 81 int rem_len; 82 u32 pkt_len; 83 u16 next_avail; 84 int num; 85 int align; 86 int index; 87 int vdev_id; 88 struct mlxbf_tmfifo *fifo; 89 }; 90 91 /* Check whether vring is in drop mode. */ 92 #define IS_VRING_DROP(_r) ({ \ 93 typeof(_r) (r) = (_r); \ 94 (r->desc_head == &r->drop_desc ? true : false); }) 95 96 /* A stub length to drop maximum length packet. */ 97 #define VRING_DROP_DESC_MAX_LEN GENMASK(15, 0) 98 99 /* Interrupt types. */ 100 enum { 101 MLXBF_TM_RX_LWM_IRQ, 102 MLXBF_TM_RX_HWM_IRQ, 103 MLXBF_TM_TX_LWM_IRQ, 104 MLXBF_TM_TX_HWM_IRQ, 105 MLXBF_TM_MAX_IRQ 106 }; 107 108 /* Ring types (Rx & Tx). */ 109 enum { 110 MLXBF_TMFIFO_VRING_RX, 111 MLXBF_TMFIFO_VRING_TX, 112 MLXBF_TMFIFO_VRING_MAX 113 }; 114 115 /** 116 * struct mlxbf_tmfifo_vdev - Structure of the TmFifo virtual device 117 * @vdev: virtio device, in which the vdev.id.device field has the 118 * VIRTIO_ID_xxx id to distinguish the virtual device. 119 * @status: status of the device 120 * @features: supported features of the device 121 * @vrings: array of tmfifo vrings of this device 122 * @config: non-anonymous union for cons and net 123 * @config.cons: virtual console config - 124 * select if vdev.id.device is VIRTIO_ID_CONSOLE 125 * @config.net: virtual network config - 126 * select if vdev.id.device is VIRTIO_ID_NET 127 * @tx_buf: tx buffer used to buffer data before writing into the FIFO 128 */ 129 struct mlxbf_tmfifo_vdev { 130 struct virtio_device vdev; 131 u8 status; 132 u64 features; 133 struct mlxbf_tmfifo_vring vrings[MLXBF_TMFIFO_VRING_MAX]; 134 union { 135 struct virtio_console_config cons; 136 struct virtio_net_config net; 137 } config; 138 struct circ_buf tx_buf; 139 }; 140 141 /** 142 * struct mlxbf_tmfifo_irq_info - Structure of the interrupt information 143 * @fifo: pointer to the tmfifo structure 144 * @irq: interrupt number 145 * @index: index into the interrupt array 146 */ 147 struct mlxbf_tmfifo_irq_info { 148 struct mlxbf_tmfifo *fifo; 149 int irq; 150 int index; 151 }; 152 153 /** 154 * struct mlxbf_tmfifo_io - Structure of the TmFifo IO resource (for both rx & tx) 155 * @ctl: control register offset (TMFIFO_RX_CTL / TMFIFO_TX_CTL) 156 * @sts: status register offset (TMFIFO_RX_STS / TMFIFO_TX_STS) 157 * @data: data register offset (TMFIFO_RX_DATA / TMFIFO_TX_DATA) 158 */ 159 struct mlxbf_tmfifo_io { 160 void __iomem *ctl; 161 void __iomem *sts; 162 void __iomem *data; 163 }; 164 165 /** 166 * struct mlxbf_tmfifo - Structure of the TmFifo 167 * @vdev: array of the virtual devices running over the TmFifo 168 * @lock: lock to protect the TmFifo access 169 * @res0: mapped resource block 0 170 * @res1: mapped resource block 1 171 * @rx: rx io resource 172 * @tx: tx io resource 173 * @rx_fifo_size: number of entries of the Rx FIFO 174 * @tx_fifo_size: number of entries of the Tx FIFO 175 * @pend_events: pending bits for deferred events 176 * @irq_info: interrupt information 177 * @work: work struct for deferred process 178 * @timer: background timer 179 * @vring: Tx/Rx ring 180 * @spin_lock: Tx/Rx spin lock 181 * @is_ready: ready flag 182 */ 183 struct mlxbf_tmfifo { 184 struct mlxbf_tmfifo_vdev *vdev[MLXBF_TMFIFO_VDEV_MAX]; 185 struct mutex lock; /* TmFifo lock */ 186 void __iomem *res0; 187 void __iomem *res1; 188 struct mlxbf_tmfifo_io rx; 189 struct mlxbf_tmfifo_io tx; 190 int rx_fifo_size; 191 int tx_fifo_size; 192 unsigned long pend_events; 193 struct mlxbf_tmfifo_irq_info irq_info[MLXBF_TM_MAX_IRQ]; 194 struct work_struct work; 195 struct timer_list timer; 196 struct mlxbf_tmfifo_vring *vring[2]; 197 spinlock_t spin_lock[2]; /* spin lock */ 198 bool is_ready; 199 }; 200 201 /** 202 * struct mlxbf_tmfifo_msg_hdr - Structure of the TmFifo message header 203 * @type: message type 204 * @len: payload length in network byte order. Messages sent into the FIFO 205 * will be read by the other side as data stream in the same byte order. 206 * The length needs to be encoded into network order so both sides 207 * could understand it. 208 */ 209 struct mlxbf_tmfifo_msg_hdr { 210 u8 type; 211 __be16 len; 212 /* private: */ 213 u8 unused[5]; 214 } __packed __aligned(sizeof(u64)); 215 216 /* 217 * Default MAC. 218 * This MAC address will be read from EFI persistent variable if configured. 219 * It can also be reconfigured with standard Linux tools. 220 */ 221 static u8 mlxbf_tmfifo_net_default_mac[ETH_ALEN] = { 222 0x00, 0x1A, 0xCA, 0xFF, 0xFF, 0x01 223 }; 224 225 /* EFI variable name of the MAC address. */ 226 static efi_char16_t mlxbf_tmfifo_efi_name[] = L"RshimMacAddr"; 227 228 /* Maximum L2 header length. */ 229 #define MLXBF_TMFIFO_NET_L2_OVERHEAD (ETH_HLEN + VLAN_HLEN) 230 231 /* Supported virtio-net features. */ 232 #define MLXBF_TMFIFO_NET_FEATURES \ 233 (BIT_ULL(VIRTIO_NET_F_MTU) | BIT_ULL(VIRTIO_NET_F_STATUS) | \ 234 BIT_ULL(VIRTIO_NET_F_MAC)) 235 236 #define mlxbf_vdev_to_tmfifo(d) container_of(d, struct mlxbf_tmfifo_vdev, vdev) 237 238 /* Free vrings of the FIFO device. */ 239 static void mlxbf_tmfifo_free_vrings(struct mlxbf_tmfifo *fifo, 240 struct mlxbf_tmfifo_vdev *tm_vdev) 241 { 242 struct mlxbf_tmfifo_vring *vring; 243 int i, size; 244 245 for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) { 246 vring = &tm_vdev->vrings[i]; 247 if (vring->va) { 248 size = vring_size(vring->num, vring->align); 249 dma_free_coherent(tm_vdev->vdev.dev.parent, size, 250 vring->va, vring->dma); 251 vring->va = NULL; 252 if (vring->vq) { 253 vring_del_virtqueue(vring->vq); 254 vring->vq = NULL; 255 } 256 } 257 } 258 } 259 260 /* Allocate vrings for the FIFO. */ 261 static int mlxbf_tmfifo_alloc_vrings(struct mlxbf_tmfifo *fifo, 262 struct mlxbf_tmfifo_vdev *tm_vdev) 263 { 264 struct mlxbf_tmfifo_vring *vring; 265 struct device *dev; 266 dma_addr_t dma; 267 int i, size; 268 void *va; 269 270 for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) { 271 vring = &tm_vdev->vrings[i]; 272 vring->fifo = fifo; 273 vring->num = MLXBF_TMFIFO_VRING_SIZE; 274 vring->align = SMP_CACHE_BYTES; 275 vring->index = i; 276 vring->vdev_id = tm_vdev->vdev.id.device; 277 vring->drop_desc.len = VRING_DROP_DESC_MAX_LEN; 278 dev = &tm_vdev->vdev.dev; 279 280 size = vring_size(vring->num, vring->align); 281 va = dma_alloc_coherent(dev->parent, size, &dma, GFP_KERNEL); 282 if (!va) { 283 mlxbf_tmfifo_free_vrings(fifo, tm_vdev); 284 dev_err(dev->parent, "dma_alloc_coherent failed\n"); 285 return -ENOMEM; 286 } 287 288 vring->va = va; 289 vring->dma = dma; 290 } 291 292 return 0; 293 } 294 295 /* Disable interrupts of the FIFO device. */ 296 static void mlxbf_tmfifo_disable_irqs(struct mlxbf_tmfifo *fifo) 297 { 298 int i, irq; 299 300 for (i = 0; i < MLXBF_TM_MAX_IRQ; i++) { 301 irq = fifo->irq_info[i].irq; 302 fifo->irq_info[i].irq = 0; 303 disable_irq(irq); 304 } 305 } 306 307 /* Interrupt handler. */ 308 static irqreturn_t mlxbf_tmfifo_irq_handler(int irq, void *arg) 309 { 310 struct mlxbf_tmfifo_irq_info *irq_info = arg; 311 312 if (!test_and_set_bit(irq_info->index, &irq_info->fifo->pend_events)) 313 schedule_work(&irq_info->fifo->work); 314 315 return IRQ_HANDLED; 316 } 317 318 /* Get the next packet descriptor from the vring. */ 319 static struct vring_desc * 320 mlxbf_tmfifo_get_next_desc(struct mlxbf_tmfifo_vring *vring) 321 { 322 const struct vring *vr = virtqueue_get_vring(vring->vq); 323 struct virtio_device *vdev = vring->vq->vdev; 324 unsigned int idx, head; 325 326 if (vring->next_avail == virtio16_to_cpu(vdev, vr->avail->idx)) 327 return NULL; 328 329 /* Make sure 'avail->idx' is visible already. */ 330 virtio_rmb(false); 331 332 idx = vring->next_avail % vr->num; 333 head = virtio16_to_cpu(vdev, vr->avail->ring[idx]); 334 if (WARN_ON(head >= vr->num)) 335 return NULL; 336 337 vring->next_avail++; 338 339 return &vr->desc[head]; 340 } 341 342 /* Release virtio descriptor. */ 343 static void mlxbf_tmfifo_release_desc(struct mlxbf_tmfifo_vring *vring, 344 struct vring_desc *desc, u32 len) 345 { 346 const struct vring *vr = virtqueue_get_vring(vring->vq); 347 struct virtio_device *vdev = vring->vq->vdev; 348 u16 idx, vr_idx; 349 350 vr_idx = virtio16_to_cpu(vdev, vr->used->idx); 351 idx = vr_idx % vr->num; 352 vr->used->ring[idx].id = cpu_to_virtio32(vdev, desc - vr->desc); 353 vr->used->ring[idx].len = cpu_to_virtio32(vdev, len); 354 355 /* 356 * Virtio could poll and check the 'idx' to decide whether the desc is 357 * done or not. Add a memory barrier here to make sure the update above 358 * completes before updating the idx. 359 */ 360 virtio_mb(false); 361 vr->used->idx = cpu_to_virtio16(vdev, vr_idx + 1); 362 } 363 364 /* Get the total length of the descriptor chain. */ 365 static u32 mlxbf_tmfifo_get_pkt_len(struct mlxbf_tmfifo_vring *vring, 366 struct vring_desc *desc) 367 { 368 const struct vring *vr = virtqueue_get_vring(vring->vq); 369 struct virtio_device *vdev = vring->vq->vdev; 370 u32 len = 0, idx; 371 372 while (desc) { 373 len += virtio32_to_cpu(vdev, desc->len); 374 if (!(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) 375 break; 376 idx = virtio16_to_cpu(vdev, desc->next); 377 desc = &vr->desc[idx]; 378 } 379 380 return len; 381 } 382 383 static void mlxbf_tmfifo_release_pkt(struct mlxbf_tmfifo_vring *vring) 384 { 385 struct vring_desc *desc_head; 386 u32 len = 0; 387 388 if (vring->desc_head) { 389 desc_head = vring->desc_head; 390 len = vring->pkt_len; 391 } else { 392 desc_head = mlxbf_tmfifo_get_next_desc(vring); 393 len = mlxbf_tmfifo_get_pkt_len(vring, desc_head); 394 } 395 396 if (desc_head) 397 mlxbf_tmfifo_release_desc(vring, desc_head, len); 398 399 vring->pkt_len = 0; 400 vring->desc = NULL; 401 vring->desc_head = NULL; 402 } 403 404 static void mlxbf_tmfifo_init_net_desc(struct mlxbf_tmfifo_vring *vring, 405 struct vring_desc *desc, bool is_rx) 406 { 407 struct virtio_device *vdev = vring->vq->vdev; 408 struct virtio_net_hdr *net_hdr; 409 410 net_hdr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr)); 411 memset(net_hdr, 0, sizeof(*net_hdr)); 412 } 413 414 /* Get and initialize the next packet. */ 415 static struct vring_desc * 416 mlxbf_tmfifo_get_next_pkt(struct mlxbf_tmfifo_vring *vring, bool is_rx) 417 { 418 struct vring_desc *desc; 419 420 desc = mlxbf_tmfifo_get_next_desc(vring); 421 if (desc && is_rx && vring->vdev_id == VIRTIO_ID_NET) 422 mlxbf_tmfifo_init_net_desc(vring, desc, is_rx); 423 424 vring->desc_head = desc; 425 vring->desc = desc; 426 427 return desc; 428 } 429 430 /* House-keeping timer. */ 431 static void mlxbf_tmfifo_timer(struct timer_list *t) 432 { 433 struct mlxbf_tmfifo *fifo = container_of(t, struct mlxbf_tmfifo, timer); 434 int rx, tx; 435 436 rx = !test_and_set_bit(MLXBF_TM_RX_HWM_IRQ, &fifo->pend_events); 437 tx = !test_and_set_bit(MLXBF_TM_TX_LWM_IRQ, &fifo->pend_events); 438 439 if (rx || tx) 440 schedule_work(&fifo->work); 441 442 mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL); 443 } 444 445 /* Copy one console packet into the output buffer. */ 446 static void mlxbf_tmfifo_console_output_one(struct mlxbf_tmfifo_vdev *cons, 447 struct mlxbf_tmfifo_vring *vring, 448 struct vring_desc *desc) 449 { 450 const struct vring *vr = virtqueue_get_vring(vring->vq); 451 struct virtio_device *vdev = &cons->vdev; 452 u32 len, idx, seg; 453 void *addr; 454 455 while (desc) { 456 addr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr)); 457 len = virtio32_to_cpu(vdev, desc->len); 458 459 seg = CIRC_SPACE_TO_END(cons->tx_buf.head, cons->tx_buf.tail, 460 MLXBF_TMFIFO_CON_TX_BUF_SIZE); 461 if (len <= seg) { 462 memcpy(cons->tx_buf.buf + cons->tx_buf.head, addr, len); 463 } else { 464 memcpy(cons->tx_buf.buf + cons->tx_buf.head, addr, seg); 465 addr += seg; 466 memcpy(cons->tx_buf.buf, addr, len - seg); 467 } 468 cons->tx_buf.head = (cons->tx_buf.head + len) % 469 MLXBF_TMFIFO_CON_TX_BUF_SIZE; 470 471 if (!(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) 472 break; 473 idx = virtio16_to_cpu(vdev, desc->next); 474 desc = &vr->desc[idx]; 475 } 476 } 477 478 /* Copy console data into the output buffer. */ 479 static void mlxbf_tmfifo_console_output(struct mlxbf_tmfifo_vdev *cons, 480 struct mlxbf_tmfifo_vring *vring) 481 { 482 struct vring_desc *desc; 483 u32 len, avail; 484 485 desc = mlxbf_tmfifo_get_next_desc(vring); 486 while (desc) { 487 /* Release the packet if not enough space. */ 488 len = mlxbf_tmfifo_get_pkt_len(vring, desc); 489 avail = CIRC_SPACE(cons->tx_buf.head, cons->tx_buf.tail, 490 MLXBF_TMFIFO_CON_TX_BUF_SIZE); 491 if (len + MLXBF_TMFIFO_CON_TX_BUF_RSV_SIZE > avail) { 492 mlxbf_tmfifo_release_desc(vring, desc, len); 493 break; 494 } 495 496 mlxbf_tmfifo_console_output_one(cons, vring, desc); 497 mlxbf_tmfifo_release_desc(vring, desc, len); 498 desc = mlxbf_tmfifo_get_next_desc(vring); 499 } 500 } 501 502 /* Get the number of available words in Rx FIFO for receiving. */ 503 static int mlxbf_tmfifo_get_rx_avail(struct mlxbf_tmfifo *fifo) 504 { 505 u64 sts; 506 507 sts = readq(fifo->rx.sts); 508 return FIELD_GET(MLXBF_TMFIFO_RX_STS__COUNT_MASK, sts); 509 } 510 511 /* Get the number of available words in the TmFifo for sending. */ 512 static int mlxbf_tmfifo_get_tx_avail(struct mlxbf_tmfifo *fifo, int vdev_id) 513 { 514 int tx_reserve; 515 u32 count; 516 u64 sts; 517 518 /* Reserve some room in FIFO for console messages. */ 519 if (vdev_id == VIRTIO_ID_NET) 520 tx_reserve = fifo->tx_fifo_size / MLXBF_TMFIFO_RESERVE_RATIO; 521 else 522 tx_reserve = 1; 523 524 sts = readq(fifo->tx.sts); 525 count = FIELD_GET(MLXBF_TMFIFO_TX_STS__COUNT_MASK, sts); 526 return fifo->tx_fifo_size - tx_reserve - count; 527 } 528 529 /* Console Tx (move data from the output buffer into the TmFifo). */ 530 static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail) 531 { 532 struct mlxbf_tmfifo_msg_hdr hdr; 533 struct mlxbf_tmfifo_vdev *cons; 534 unsigned long flags; 535 int size, seg; 536 void *addr; 537 u64 data; 538 539 /* Return if not enough space available. */ 540 if (avail < MLXBF_TMFIFO_DATA_MIN_WORDS) 541 return; 542 543 cons = fifo->vdev[VIRTIO_ID_CONSOLE]; 544 if (!cons || !cons->tx_buf.buf) 545 return; 546 547 /* Return if no data to send. */ 548 size = CIRC_CNT(cons->tx_buf.head, cons->tx_buf.tail, 549 MLXBF_TMFIFO_CON_TX_BUF_SIZE); 550 if (size == 0) 551 return; 552 553 /* Adjust the size to available space. */ 554 if (size + sizeof(hdr) > avail * sizeof(u64)) 555 size = avail * sizeof(u64) - sizeof(hdr); 556 557 /* Write header. */ 558 hdr.type = VIRTIO_ID_CONSOLE; 559 hdr.len = htons(size); 560 writeq(*(u64 *)&hdr, fifo->tx.data); 561 562 /* Use spin-lock to protect the 'cons->tx_buf'. */ 563 spin_lock_irqsave(&fifo->spin_lock[0], flags); 564 565 while (size > 0) { 566 addr = cons->tx_buf.buf + cons->tx_buf.tail; 567 568 seg = CIRC_CNT_TO_END(cons->tx_buf.head, cons->tx_buf.tail, 569 MLXBF_TMFIFO_CON_TX_BUF_SIZE); 570 if (seg >= sizeof(u64)) { 571 memcpy(&data, addr, sizeof(u64)); 572 } else { 573 memcpy(&data, addr, seg); 574 memcpy((u8 *)&data + seg, cons->tx_buf.buf, 575 sizeof(u64) - seg); 576 } 577 writeq(data, fifo->tx.data); 578 579 if (size >= sizeof(u64)) { 580 cons->tx_buf.tail = (cons->tx_buf.tail + sizeof(u64)) % 581 MLXBF_TMFIFO_CON_TX_BUF_SIZE; 582 size -= sizeof(u64); 583 } else { 584 cons->tx_buf.tail = (cons->tx_buf.tail + size) % 585 MLXBF_TMFIFO_CON_TX_BUF_SIZE; 586 size = 0; 587 } 588 } 589 590 spin_unlock_irqrestore(&fifo->spin_lock[0], flags); 591 } 592 593 /* Rx/Tx one word in the descriptor buffer. */ 594 static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring, 595 struct vring_desc *desc, 596 bool is_rx, int len) 597 { 598 struct virtio_device *vdev = vring->vq->vdev; 599 struct mlxbf_tmfifo *fifo = vring->fifo; 600 void *addr; 601 u64 data; 602 603 /* Get the buffer address of this desc. */ 604 addr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr)); 605 606 /* Read a word from FIFO for Rx. */ 607 if (is_rx) 608 data = readq(fifo->rx.data); 609 610 if (vring->cur_len + sizeof(u64) <= len) { 611 /* The whole word. */ 612 if (is_rx) { 613 if (!IS_VRING_DROP(vring)) 614 memcpy(addr + vring->cur_len, &data, 615 sizeof(u64)); 616 } else { 617 memcpy(&data, addr + vring->cur_len, 618 sizeof(u64)); 619 } 620 vring->cur_len += sizeof(u64); 621 } else { 622 /* Leftover bytes. */ 623 if (is_rx) { 624 if (!IS_VRING_DROP(vring)) 625 memcpy(addr + vring->cur_len, &data, 626 len - vring->cur_len); 627 } else { 628 data = 0; 629 memcpy(&data, addr + vring->cur_len, 630 len - vring->cur_len); 631 } 632 vring->cur_len = len; 633 } 634 635 /* Write the word into FIFO for Tx. */ 636 if (!is_rx) 637 writeq(data, fifo->tx.data); 638 } 639 640 /* 641 * Rx/Tx packet header. 642 * 643 * In Rx case, the packet might be found to belong to a different vring since 644 * the TmFifo is shared by different services. In such case, the 'vring_change' 645 * flag is set. 646 */ 647 static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring, 648 struct vring_desc **desc, 649 bool is_rx, bool *vring_change) 650 { 651 struct mlxbf_tmfifo *fifo = vring->fifo; 652 struct virtio_net_config *config; 653 struct mlxbf_tmfifo_msg_hdr hdr; 654 int vdev_id, hdr_len; 655 bool drop_rx = false; 656 657 /* Read/Write packet header. */ 658 if (is_rx) { 659 /* Drain one word from the FIFO. */ 660 *(u64 *)&hdr = readq(fifo->rx.data); 661 662 /* Skip the length 0 packets (keepalive). */ 663 if (hdr.len == 0) 664 return; 665 666 /* Check packet type. */ 667 if (hdr.type == VIRTIO_ID_NET) { 668 vdev_id = VIRTIO_ID_NET; 669 hdr_len = sizeof(struct virtio_net_hdr); 670 config = &fifo->vdev[vdev_id]->config.net; 671 /* A legacy-only interface for now. */ 672 if (ntohs(hdr.len) > 673 __virtio16_to_cpu(virtio_legacy_is_little_endian(), 674 config->mtu) + 675 MLXBF_TMFIFO_NET_L2_OVERHEAD) 676 drop_rx = true; 677 } else { 678 vdev_id = VIRTIO_ID_CONSOLE; 679 hdr_len = 0; 680 } 681 682 /* 683 * Check whether the new packet still belongs to this vring. 684 * If not, update the pkt_len of the new vring. 685 */ 686 if (vdev_id != vring->vdev_id) { 687 struct mlxbf_tmfifo_vdev *tm_dev2 = fifo->vdev[vdev_id]; 688 689 if (!tm_dev2) 690 return; 691 vring->desc = *desc; 692 vring = &tm_dev2->vrings[MLXBF_TMFIFO_VRING_RX]; 693 *vring_change = true; 694 } 695 696 if (drop_rx && !IS_VRING_DROP(vring)) { 697 if (vring->desc_head) 698 mlxbf_tmfifo_release_pkt(vring); 699 *desc = &vring->drop_desc; 700 vring->desc_head = *desc; 701 vring->desc = *desc; 702 } 703 704 vring->pkt_len = ntohs(hdr.len) + hdr_len; 705 } else { 706 /* Network virtio has an extra header. */ 707 hdr_len = (vring->vdev_id == VIRTIO_ID_NET) ? 708 sizeof(struct virtio_net_hdr) : 0; 709 vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, *desc); 710 hdr.type = (vring->vdev_id == VIRTIO_ID_NET) ? 711 VIRTIO_ID_NET : VIRTIO_ID_CONSOLE; 712 hdr.len = htons(vring->pkt_len - hdr_len); 713 writeq(*(u64 *)&hdr, fifo->tx.data); 714 } 715 716 vring->cur_len = hdr_len; 717 vring->rem_len = vring->pkt_len; 718 fifo->vring[is_rx] = vring; 719 } 720 721 /* 722 * Rx/Tx one descriptor. 723 * 724 * Return true to indicate more data available. 725 */ 726 static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring, 727 bool is_rx, int *avail) 728 { 729 const struct vring *vr = virtqueue_get_vring(vring->vq); 730 struct mlxbf_tmfifo *fifo = vring->fifo; 731 struct virtio_device *vdev; 732 bool vring_change = false; 733 struct vring_desc *desc; 734 unsigned long flags; 735 u32 len, idx; 736 737 vdev = &fifo->vdev[vring->vdev_id]->vdev; 738 739 /* Get the descriptor of the next packet. */ 740 if (!vring->desc) { 741 desc = mlxbf_tmfifo_get_next_pkt(vring, is_rx); 742 if (!desc) { 743 /* Drop next Rx packet to avoid stuck. */ 744 if (is_rx) { 745 desc = &vring->drop_desc; 746 vring->desc_head = desc; 747 vring->desc = desc; 748 } else { 749 return false; 750 } 751 } 752 } else { 753 desc = vring->desc; 754 } 755 756 /* Beginning of a packet. Start to Rx/Tx packet header. */ 757 if (vring->pkt_len == 0) { 758 mlxbf_tmfifo_rxtx_header(vring, &desc, is_rx, &vring_change); 759 (*avail)--; 760 761 /* Return if new packet is for another ring. */ 762 if (vring_change) 763 return false; 764 goto mlxbf_tmfifo_desc_done; 765 } 766 767 /* Get the length of this desc. */ 768 len = virtio32_to_cpu(vdev, desc->len); 769 if (len > vring->rem_len) 770 len = vring->rem_len; 771 772 /* Rx/Tx one word (8 bytes) if not done. */ 773 if (vring->cur_len < len) { 774 mlxbf_tmfifo_rxtx_word(vring, desc, is_rx, len); 775 (*avail)--; 776 } 777 778 /* Check again whether it's done. */ 779 if (vring->cur_len == len) { 780 vring->cur_len = 0; 781 vring->rem_len -= len; 782 783 /* Get the next desc on the chain. */ 784 if (!IS_VRING_DROP(vring) && vring->rem_len > 0 && 785 (virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) { 786 idx = virtio16_to_cpu(vdev, desc->next); 787 desc = &vr->desc[idx]; 788 goto mlxbf_tmfifo_desc_done; 789 } 790 791 /* Done and release the packet. */ 792 desc = NULL; 793 fifo->vring[is_rx] = NULL; 794 if (!IS_VRING_DROP(vring)) { 795 mlxbf_tmfifo_release_pkt(vring); 796 } else { 797 vring->pkt_len = 0; 798 vring->desc_head = NULL; 799 vring->desc = NULL; 800 return false; 801 } 802 803 /* 804 * Make sure the load/store are in order before 805 * returning back to virtio. 806 */ 807 virtio_mb(false); 808 809 /* Notify upper layer that packet is done. */ 810 spin_lock_irqsave(&fifo->spin_lock[is_rx], flags); 811 vring_interrupt(0, vring->vq); 812 spin_unlock_irqrestore(&fifo->spin_lock[is_rx], flags); 813 } 814 815 mlxbf_tmfifo_desc_done: 816 /* Save the current desc. */ 817 vring->desc = desc; 818 819 return true; 820 } 821 822 /* Rx & Tx processing of a queue. */ 823 static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx) 824 { 825 int avail = 0, devid = vring->vdev_id; 826 struct mlxbf_tmfifo *fifo; 827 bool more; 828 829 fifo = vring->fifo; 830 831 /* Return if vdev is not ready. */ 832 if (!fifo || !fifo->vdev[devid]) 833 return; 834 835 /* Return if another vring is running. */ 836 if (fifo->vring[is_rx] && fifo->vring[is_rx] != vring) 837 return; 838 839 /* Only handle console and network for now. */ 840 if (WARN_ON(devid != VIRTIO_ID_NET && devid != VIRTIO_ID_CONSOLE)) 841 return; 842 843 do { 844 /* Get available FIFO space. */ 845 if (avail == 0) { 846 if (is_rx) 847 avail = mlxbf_tmfifo_get_rx_avail(fifo); 848 else 849 avail = mlxbf_tmfifo_get_tx_avail(fifo, devid); 850 if (avail <= 0) 851 break; 852 } 853 854 /* Console output always comes from the Tx buffer. */ 855 if (!is_rx && devid == VIRTIO_ID_CONSOLE) { 856 mlxbf_tmfifo_console_tx(fifo, avail); 857 break; 858 } 859 860 /* Handle one descriptor. */ 861 more = mlxbf_tmfifo_rxtx_one_desc(vring, is_rx, &avail); 862 } while (more); 863 } 864 865 /* Handle Rx or Tx queues. */ 866 static void mlxbf_tmfifo_work_rxtx(struct mlxbf_tmfifo *fifo, int queue_id, 867 int irq_id, bool is_rx) 868 { 869 struct mlxbf_tmfifo_vdev *tm_vdev; 870 struct mlxbf_tmfifo_vring *vring; 871 int i; 872 873 if (!test_and_clear_bit(irq_id, &fifo->pend_events) || 874 !fifo->irq_info[irq_id].irq) 875 return; 876 877 for (i = 0; i < MLXBF_TMFIFO_VDEV_MAX; i++) { 878 tm_vdev = fifo->vdev[i]; 879 if (tm_vdev) { 880 vring = &tm_vdev->vrings[queue_id]; 881 if (vring->vq) 882 mlxbf_tmfifo_rxtx(vring, is_rx); 883 } 884 } 885 } 886 887 /* Work handler for Rx and Tx case. */ 888 static void mlxbf_tmfifo_work_handler(struct work_struct *work) 889 { 890 struct mlxbf_tmfifo *fifo; 891 892 fifo = container_of(work, struct mlxbf_tmfifo, work); 893 if (!fifo->is_ready) 894 return; 895 896 mutex_lock(&fifo->lock); 897 898 /* Tx (Send data to the TmFifo). */ 899 mlxbf_tmfifo_work_rxtx(fifo, MLXBF_TMFIFO_VRING_TX, 900 MLXBF_TM_TX_LWM_IRQ, false); 901 902 /* Rx (Receive data from the TmFifo). */ 903 mlxbf_tmfifo_work_rxtx(fifo, MLXBF_TMFIFO_VRING_RX, 904 MLXBF_TM_RX_HWM_IRQ, true); 905 906 mutex_unlock(&fifo->lock); 907 } 908 909 /* The notify function is called when new buffers are posted. */ 910 static bool mlxbf_tmfifo_virtio_notify(struct virtqueue *vq) 911 { 912 struct mlxbf_tmfifo_vring *vring = vq->priv; 913 struct mlxbf_tmfifo_vdev *tm_vdev; 914 struct mlxbf_tmfifo *fifo; 915 unsigned long flags; 916 917 fifo = vring->fifo; 918 919 /* 920 * Virtio maintains vrings in pairs, even number ring for Rx 921 * and odd number ring for Tx. 922 */ 923 if (vring->index & BIT(0)) { 924 /* 925 * Console could make blocking call with interrupts disabled. 926 * In such case, the vring needs to be served right away. For 927 * other cases, just set the TX LWM bit to start Tx in the 928 * worker handler. 929 */ 930 if (vring->vdev_id == VIRTIO_ID_CONSOLE) { 931 spin_lock_irqsave(&fifo->spin_lock[0], flags); 932 tm_vdev = fifo->vdev[VIRTIO_ID_CONSOLE]; 933 mlxbf_tmfifo_console_output(tm_vdev, vring); 934 spin_unlock_irqrestore(&fifo->spin_lock[0], flags); 935 set_bit(MLXBF_TM_TX_LWM_IRQ, &fifo->pend_events); 936 } else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ, 937 &fifo->pend_events)) { 938 return true; 939 } 940 } else { 941 if (test_and_set_bit(MLXBF_TM_RX_HWM_IRQ, &fifo->pend_events)) 942 return true; 943 } 944 945 schedule_work(&fifo->work); 946 947 return true; 948 } 949 950 /* Get the array of feature bits for this device. */ 951 static u64 mlxbf_tmfifo_virtio_get_features(struct virtio_device *vdev) 952 { 953 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 954 955 return tm_vdev->features; 956 } 957 958 /* Confirm device features to use. */ 959 static int mlxbf_tmfifo_virtio_finalize_features(struct virtio_device *vdev) 960 { 961 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 962 963 tm_vdev->features = vdev->features; 964 965 return 0; 966 } 967 968 /* Free virtqueues found by find_vqs(). */ 969 static void mlxbf_tmfifo_virtio_del_vqs(struct virtio_device *vdev) 970 { 971 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 972 struct mlxbf_tmfifo_vring *vring; 973 struct virtqueue *vq; 974 int i; 975 976 for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) { 977 vring = &tm_vdev->vrings[i]; 978 979 /* Release the pending packet. */ 980 if (vring->desc) 981 mlxbf_tmfifo_release_pkt(vring); 982 vq = vring->vq; 983 if (vq) { 984 vring->vq = NULL; 985 vring_del_virtqueue(vq); 986 } 987 } 988 } 989 990 /* Create and initialize the virtual queues. */ 991 static int mlxbf_tmfifo_virtio_find_vqs(struct virtio_device *vdev, 992 unsigned int nvqs, 993 struct virtqueue *vqs[], 994 vq_callback_t *callbacks[], 995 const char * const names[], 996 const bool *ctx, 997 struct irq_affinity *desc) 998 { 999 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1000 struct mlxbf_tmfifo_vring *vring; 1001 struct virtqueue *vq; 1002 int i, ret, size; 1003 1004 if (nvqs > ARRAY_SIZE(tm_vdev->vrings)) 1005 return -EINVAL; 1006 1007 for (i = 0; i < nvqs; ++i) { 1008 if (!names[i]) { 1009 ret = -EINVAL; 1010 goto error; 1011 } 1012 vring = &tm_vdev->vrings[i]; 1013 1014 /* zero vring */ 1015 size = vring_size(vring->num, vring->align); 1016 memset(vring->va, 0, size); 1017 vq = vring_new_virtqueue(i, vring->num, vring->align, vdev, 1018 false, false, vring->va, 1019 mlxbf_tmfifo_virtio_notify, 1020 callbacks[i], names[i]); 1021 if (!vq) { 1022 dev_err(&vdev->dev, "vring_new_virtqueue failed\n"); 1023 ret = -ENOMEM; 1024 goto error; 1025 } 1026 1027 vq->num_max = vring->num; 1028 1029 vq->priv = vring; 1030 1031 /* Make vq update visible before using it. */ 1032 virtio_mb(false); 1033 1034 vqs[i] = vq; 1035 vring->vq = vq; 1036 } 1037 1038 return 0; 1039 1040 error: 1041 mlxbf_tmfifo_virtio_del_vqs(vdev); 1042 return ret; 1043 } 1044 1045 /* Read the status byte. */ 1046 static u8 mlxbf_tmfifo_virtio_get_status(struct virtio_device *vdev) 1047 { 1048 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1049 1050 return tm_vdev->status; 1051 } 1052 1053 /* Write the status byte. */ 1054 static void mlxbf_tmfifo_virtio_set_status(struct virtio_device *vdev, 1055 u8 status) 1056 { 1057 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1058 1059 tm_vdev->status = status; 1060 } 1061 1062 /* Reset the device. Not much here for now. */ 1063 static void mlxbf_tmfifo_virtio_reset(struct virtio_device *vdev) 1064 { 1065 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1066 1067 tm_vdev->status = 0; 1068 } 1069 1070 /* Read the value of a configuration field. */ 1071 static void mlxbf_tmfifo_virtio_get(struct virtio_device *vdev, 1072 unsigned int offset, 1073 void *buf, 1074 unsigned int len) 1075 { 1076 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1077 1078 if ((u64)offset + len > sizeof(tm_vdev->config)) 1079 return; 1080 1081 memcpy(buf, (u8 *)&tm_vdev->config + offset, len); 1082 } 1083 1084 /* Write the value of a configuration field. */ 1085 static void mlxbf_tmfifo_virtio_set(struct virtio_device *vdev, 1086 unsigned int offset, 1087 const void *buf, 1088 unsigned int len) 1089 { 1090 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1091 1092 if ((u64)offset + len > sizeof(tm_vdev->config)) 1093 return; 1094 1095 memcpy((u8 *)&tm_vdev->config + offset, buf, len); 1096 } 1097 1098 static void tmfifo_virtio_dev_release(struct device *device) 1099 { 1100 struct virtio_device *vdev = 1101 container_of(device, struct virtio_device, dev); 1102 struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev); 1103 1104 kfree(tm_vdev); 1105 } 1106 1107 /* Virtio config operations. */ 1108 static const struct virtio_config_ops mlxbf_tmfifo_virtio_config_ops = { 1109 .get_features = mlxbf_tmfifo_virtio_get_features, 1110 .finalize_features = mlxbf_tmfifo_virtio_finalize_features, 1111 .find_vqs = mlxbf_tmfifo_virtio_find_vqs, 1112 .del_vqs = mlxbf_tmfifo_virtio_del_vqs, 1113 .reset = mlxbf_tmfifo_virtio_reset, 1114 .set_status = mlxbf_tmfifo_virtio_set_status, 1115 .get_status = mlxbf_tmfifo_virtio_get_status, 1116 .get = mlxbf_tmfifo_virtio_get, 1117 .set = mlxbf_tmfifo_virtio_set, 1118 }; 1119 1120 /* Create vdev for the FIFO. */ 1121 static int mlxbf_tmfifo_create_vdev(struct device *dev, 1122 struct mlxbf_tmfifo *fifo, 1123 int vdev_id, u64 features, 1124 void *config, u32 size) 1125 { 1126 struct mlxbf_tmfifo_vdev *tm_vdev, *reg_dev = NULL; 1127 int ret; 1128 1129 mutex_lock(&fifo->lock); 1130 1131 tm_vdev = fifo->vdev[vdev_id]; 1132 if (tm_vdev) { 1133 dev_err(dev, "vdev %d already exists\n", vdev_id); 1134 ret = -EEXIST; 1135 goto fail; 1136 } 1137 1138 tm_vdev = kzalloc(sizeof(*tm_vdev), GFP_KERNEL); 1139 if (!tm_vdev) { 1140 ret = -ENOMEM; 1141 goto fail; 1142 } 1143 1144 tm_vdev->vdev.id.device = vdev_id; 1145 tm_vdev->vdev.config = &mlxbf_tmfifo_virtio_config_ops; 1146 tm_vdev->vdev.dev.parent = dev; 1147 tm_vdev->vdev.dev.release = tmfifo_virtio_dev_release; 1148 tm_vdev->features = features; 1149 if (config) 1150 memcpy(&tm_vdev->config, config, size); 1151 1152 if (mlxbf_tmfifo_alloc_vrings(fifo, tm_vdev)) { 1153 dev_err(dev, "unable to allocate vring\n"); 1154 ret = -ENOMEM; 1155 goto vdev_fail; 1156 } 1157 1158 /* Allocate an output buffer for the console device. */ 1159 if (vdev_id == VIRTIO_ID_CONSOLE) 1160 tm_vdev->tx_buf.buf = devm_kmalloc(dev, 1161 MLXBF_TMFIFO_CON_TX_BUF_SIZE, 1162 GFP_KERNEL); 1163 fifo->vdev[vdev_id] = tm_vdev; 1164 1165 /* Register the virtio device. */ 1166 ret = register_virtio_device(&tm_vdev->vdev); 1167 reg_dev = tm_vdev; 1168 if (ret) { 1169 dev_err(dev, "register_virtio_device failed\n"); 1170 goto vdev_fail; 1171 } 1172 1173 mutex_unlock(&fifo->lock); 1174 return 0; 1175 1176 vdev_fail: 1177 mlxbf_tmfifo_free_vrings(fifo, tm_vdev); 1178 fifo->vdev[vdev_id] = NULL; 1179 if (reg_dev) 1180 put_device(&tm_vdev->vdev.dev); 1181 else 1182 kfree(tm_vdev); 1183 fail: 1184 mutex_unlock(&fifo->lock); 1185 return ret; 1186 } 1187 1188 /* Delete vdev for the FIFO. */ 1189 static int mlxbf_tmfifo_delete_vdev(struct mlxbf_tmfifo *fifo, int vdev_id) 1190 { 1191 struct mlxbf_tmfifo_vdev *tm_vdev; 1192 1193 mutex_lock(&fifo->lock); 1194 1195 /* Unregister vdev. */ 1196 tm_vdev = fifo->vdev[vdev_id]; 1197 if (tm_vdev) { 1198 unregister_virtio_device(&tm_vdev->vdev); 1199 mlxbf_tmfifo_free_vrings(fifo, tm_vdev); 1200 fifo->vdev[vdev_id] = NULL; 1201 } 1202 1203 mutex_unlock(&fifo->lock); 1204 1205 return 0; 1206 } 1207 1208 /* Read the configured network MAC address from efi variable. */ 1209 static void mlxbf_tmfifo_get_cfg_mac(u8 *mac) 1210 { 1211 efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID; 1212 unsigned long size = ETH_ALEN; 1213 u8 buf[ETH_ALEN]; 1214 efi_status_t rc; 1215 1216 rc = efi.get_variable(mlxbf_tmfifo_efi_name, &guid, NULL, &size, buf); 1217 if (rc == EFI_SUCCESS && size == ETH_ALEN) 1218 ether_addr_copy(mac, buf); 1219 else 1220 ether_addr_copy(mac, mlxbf_tmfifo_net_default_mac); 1221 } 1222 1223 /* Set TmFifo thresolds which is used to trigger interrupts. */ 1224 static void mlxbf_tmfifo_set_threshold(struct mlxbf_tmfifo *fifo) 1225 { 1226 u64 ctl; 1227 1228 /* Get Tx FIFO size and set the low/high watermark. */ 1229 ctl = readq(fifo->tx.ctl); 1230 fifo->tx_fifo_size = 1231 FIELD_GET(MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_MASK, ctl); 1232 ctl = (ctl & ~MLXBF_TMFIFO_TX_CTL__LWM_MASK) | 1233 FIELD_PREP(MLXBF_TMFIFO_TX_CTL__LWM_MASK, 1234 fifo->tx_fifo_size / 2); 1235 ctl = (ctl & ~MLXBF_TMFIFO_TX_CTL__HWM_MASK) | 1236 FIELD_PREP(MLXBF_TMFIFO_TX_CTL__HWM_MASK, 1237 fifo->tx_fifo_size - 1); 1238 writeq(ctl, fifo->tx.ctl); 1239 1240 /* Get Rx FIFO size and set the low/high watermark. */ 1241 ctl = readq(fifo->rx.ctl); 1242 fifo->rx_fifo_size = 1243 FIELD_GET(MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_MASK, ctl); 1244 ctl = (ctl & ~MLXBF_TMFIFO_RX_CTL__LWM_MASK) | 1245 FIELD_PREP(MLXBF_TMFIFO_RX_CTL__LWM_MASK, 0); 1246 ctl = (ctl & ~MLXBF_TMFIFO_RX_CTL__HWM_MASK) | 1247 FIELD_PREP(MLXBF_TMFIFO_RX_CTL__HWM_MASK, 1); 1248 writeq(ctl, fifo->rx.ctl); 1249 } 1250 1251 static void mlxbf_tmfifo_cleanup(struct mlxbf_tmfifo *fifo) 1252 { 1253 int i; 1254 1255 fifo->is_ready = false; 1256 del_timer_sync(&fifo->timer); 1257 mlxbf_tmfifo_disable_irqs(fifo); 1258 cancel_work_sync(&fifo->work); 1259 for (i = 0; i < MLXBF_TMFIFO_VDEV_MAX; i++) 1260 mlxbf_tmfifo_delete_vdev(fifo, i); 1261 } 1262 1263 /* Probe the TMFIFO. */ 1264 static int mlxbf_tmfifo_probe(struct platform_device *pdev) 1265 { 1266 struct virtio_net_config net_config; 1267 struct device *dev = &pdev->dev; 1268 struct mlxbf_tmfifo *fifo; 1269 u64 dev_id; 1270 int i, rc; 1271 1272 rc = acpi_dev_uid_to_integer(ACPI_COMPANION(dev), &dev_id); 1273 if (rc) { 1274 dev_err(dev, "Cannot retrieve UID\n"); 1275 return rc; 1276 } 1277 1278 fifo = devm_kzalloc(dev, sizeof(*fifo), GFP_KERNEL); 1279 if (!fifo) 1280 return -ENOMEM; 1281 1282 spin_lock_init(&fifo->spin_lock[0]); 1283 spin_lock_init(&fifo->spin_lock[1]); 1284 INIT_WORK(&fifo->work, mlxbf_tmfifo_work_handler); 1285 mutex_init(&fifo->lock); 1286 1287 /* Get the resource of the Rx FIFO. */ 1288 fifo->res0 = devm_platform_ioremap_resource(pdev, 0); 1289 if (IS_ERR(fifo->res0)) 1290 return PTR_ERR(fifo->res0); 1291 1292 /* Get the resource of the Tx FIFO. */ 1293 fifo->res1 = devm_platform_ioremap_resource(pdev, 1); 1294 if (IS_ERR(fifo->res1)) 1295 return PTR_ERR(fifo->res1); 1296 1297 if (dev_id == TMFIFO_BF3_UID) { 1298 fifo->rx.ctl = fifo->res1 + MLXBF_TMFIFO_RX_CTL_BF3; 1299 fifo->rx.sts = fifo->res1 + MLXBF_TMFIFO_RX_STS_BF3; 1300 fifo->rx.data = fifo->res0 + MLXBF_TMFIFO_RX_DATA_BF3; 1301 fifo->tx.ctl = fifo->res1 + MLXBF_TMFIFO_TX_CTL_BF3; 1302 fifo->tx.sts = fifo->res1 + MLXBF_TMFIFO_TX_STS_BF3; 1303 fifo->tx.data = fifo->res0 + MLXBF_TMFIFO_TX_DATA_BF3; 1304 } else { 1305 fifo->rx.ctl = fifo->res0 + MLXBF_TMFIFO_RX_CTL; 1306 fifo->rx.sts = fifo->res0 + MLXBF_TMFIFO_RX_STS; 1307 fifo->rx.data = fifo->res0 + MLXBF_TMFIFO_RX_DATA; 1308 fifo->tx.ctl = fifo->res1 + MLXBF_TMFIFO_TX_CTL; 1309 fifo->tx.sts = fifo->res1 + MLXBF_TMFIFO_TX_STS; 1310 fifo->tx.data = fifo->res1 + MLXBF_TMFIFO_TX_DATA; 1311 } 1312 1313 platform_set_drvdata(pdev, fifo); 1314 1315 timer_setup(&fifo->timer, mlxbf_tmfifo_timer, 0); 1316 1317 for (i = 0; i < MLXBF_TM_MAX_IRQ; i++) { 1318 fifo->irq_info[i].index = i; 1319 fifo->irq_info[i].fifo = fifo; 1320 fifo->irq_info[i].irq = platform_get_irq(pdev, i); 1321 rc = devm_request_irq(dev, fifo->irq_info[i].irq, 1322 mlxbf_tmfifo_irq_handler, 0, 1323 "tmfifo", &fifo->irq_info[i]); 1324 if (rc) { 1325 dev_err(dev, "devm_request_irq failed\n"); 1326 fifo->irq_info[i].irq = 0; 1327 return rc; 1328 } 1329 } 1330 1331 mlxbf_tmfifo_set_threshold(fifo); 1332 1333 /* Create the console vdev. */ 1334 rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_CONSOLE, 0, NULL, 0); 1335 if (rc) 1336 goto fail; 1337 1338 /* Create the network vdev. */ 1339 memset(&net_config, 0, sizeof(net_config)); 1340 1341 /* A legacy-only interface for now. */ 1342 net_config.mtu = __cpu_to_virtio16(virtio_legacy_is_little_endian(), 1343 ETH_DATA_LEN); 1344 net_config.status = __cpu_to_virtio16(virtio_legacy_is_little_endian(), 1345 VIRTIO_NET_S_LINK_UP); 1346 mlxbf_tmfifo_get_cfg_mac(net_config.mac); 1347 rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_NET, 1348 MLXBF_TMFIFO_NET_FEATURES, &net_config, 1349 sizeof(net_config)); 1350 if (rc) 1351 goto fail; 1352 1353 mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL); 1354 1355 /* Make all updates visible before setting the 'is_ready' flag. */ 1356 virtio_mb(false); 1357 1358 fifo->is_ready = true; 1359 return 0; 1360 1361 fail: 1362 mlxbf_tmfifo_cleanup(fifo); 1363 return rc; 1364 } 1365 1366 /* Device remove function. */ 1367 static int mlxbf_tmfifo_remove(struct platform_device *pdev) 1368 { 1369 struct mlxbf_tmfifo *fifo = platform_get_drvdata(pdev); 1370 1371 mlxbf_tmfifo_cleanup(fifo); 1372 1373 return 0; 1374 } 1375 1376 static const struct acpi_device_id mlxbf_tmfifo_acpi_match[] = { 1377 { "MLNXBF01", 0 }, 1378 {} 1379 }; 1380 MODULE_DEVICE_TABLE(acpi, mlxbf_tmfifo_acpi_match); 1381 1382 static struct platform_driver mlxbf_tmfifo_driver = { 1383 .probe = mlxbf_tmfifo_probe, 1384 .remove = mlxbf_tmfifo_remove, 1385 .driver = { 1386 .name = "bf-tmfifo", 1387 .acpi_match_table = mlxbf_tmfifo_acpi_match, 1388 }, 1389 }; 1390 1391 module_platform_driver(mlxbf_tmfifo_driver); 1392 1393 MODULE_DESCRIPTION("Mellanox BlueField SoC TmFifo Driver"); 1394 MODULE_LICENSE("GPL v2"); 1395 MODULE_AUTHOR("Mellanox Technologies"); 1396