1 // SPDX-License-Identifier: GPL-2.0-only 2 3 /* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */ 4 /* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ 5 6 #include <asm/byteorder.h> 7 #include <linux/completion.h> 8 #include <linux/crc32.h> 9 #include <linux/delay.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/kref.h> 12 #include <linux/list.h> 13 #include <linux/mhi.h> 14 #include <linux/mm.h> 15 #include <linux/moduleparam.h> 16 #include <linux/mutex.h> 17 #include <linux/overflow.h> 18 #include <linux/pci.h> 19 #include <linux/scatterlist.h> 20 #include <linux/types.h> 21 #include <linux/uaccess.h> 22 #include <linux/workqueue.h> 23 #include <linux/wait.h> 24 #include <drm/drm_device.h> 25 #include <drm/drm_file.h> 26 #include <uapi/drm/qaic_accel.h> 27 28 #include "qaic.h" 29 30 #define MANAGE_MAGIC_NUMBER ((__force __le32)0x43494151) /* "QAIC" in little endian */ 31 #define QAIC_DBC_Q_GAP SZ_256 32 #define QAIC_DBC_Q_BUF_ALIGN SZ_4K 33 #define QAIC_MANAGE_EXT_MSG_LENGTH SZ_64K /* Max DMA message length */ 34 #define QAIC_WRAPPER_MAX_SIZE SZ_4K 35 #define QAIC_MHI_RETRY_WAIT_MS 100 36 #define QAIC_MHI_RETRY_MAX 20 37 38 static unsigned int control_resp_timeout_s = 60; /* 60 sec default */ 39 module_param(control_resp_timeout_s, uint, 0600); 40 MODULE_PARM_DESC(control_resp_timeout_s, "Timeout for NNC responses from QSM"); 41 42 struct manage_msg { 43 u32 len; 44 u32 count; 45 u8 data[]; 46 }; 47 48 /* 49 * wire encoding structures for the manage protocol. 50 * All fields are little endian on the wire 51 */ 52 struct wire_msg_hdr { 53 __le32 crc32; /* crc of everything following this field in the message */ 54 __le32 magic_number; 55 __le32 sequence_number; 56 __le32 len; /* length of this message */ 57 __le32 count; /* number of transactions in this message */ 58 __le32 handle; /* unique id to track the resources consumed */ 59 __le32 partition_id; /* partition id for the request (signed) */ 60 __le32 padding; /* must be 0 */ 61 } __packed; 62 63 struct wire_msg { 64 struct wire_msg_hdr hdr; 65 u8 data[]; 66 } __packed; 67 68 struct wire_trans_hdr { 69 __le32 type; 70 __le32 len; 71 } __packed; 72 73 /* Each message sent from driver to device are organized in a list of wrapper_msg */ 74 struct wrapper_msg { 75 struct list_head list; 76 struct kref ref_count; 77 u32 len; /* length of data to transfer */ 78 struct wrapper_list *head; 79 union { 80 struct wire_msg msg; 81 struct wire_trans_hdr trans; 82 }; 83 }; 84 85 struct wrapper_list { 86 struct list_head list; 87 spinlock_t lock; /* Protects the list state during additions and removals */ 88 }; 89 90 struct wire_trans_passthrough { 91 struct wire_trans_hdr hdr; 92 u8 data[]; 93 } __packed; 94 95 struct wire_addr_size_pair { 96 __le64 addr; 97 __le64 size; 98 } __packed; 99 100 struct wire_trans_dma_xfer { 101 struct wire_trans_hdr hdr; 102 __le32 tag; 103 __le32 count; 104 __le32 dma_chunk_id; 105 __le32 padding; 106 struct wire_addr_size_pair data[]; 107 } __packed; 108 109 /* Initiated by device to continue the DMA xfer of a large piece of data */ 110 struct wire_trans_dma_xfer_cont { 111 struct wire_trans_hdr hdr; 112 __le32 dma_chunk_id; 113 __le32 padding; 114 __le64 xferred_size; 115 } __packed; 116 117 struct wire_trans_activate_to_dev { 118 struct wire_trans_hdr hdr; 119 __le64 req_q_addr; 120 __le64 rsp_q_addr; 121 __le32 req_q_size; 122 __le32 rsp_q_size; 123 __le32 buf_len; 124 __le32 options; /* unused, but BIT(16) has meaning to the device */ 125 } __packed; 126 127 struct wire_trans_activate_from_dev { 128 struct wire_trans_hdr hdr; 129 __le32 status; 130 __le32 dbc_id; 131 __le64 options; /* unused */ 132 } __packed; 133 134 struct wire_trans_deactivate_from_dev { 135 struct wire_trans_hdr hdr; 136 __le32 status; 137 __le32 dbc_id; 138 } __packed; 139 140 struct wire_trans_terminate_to_dev { 141 struct wire_trans_hdr hdr; 142 __le32 handle; 143 __le32 padding; 144 } __packed; 145 146 struct wire_trans_terminate_from_dev { 147 struct wire_trans_hdr hdr; 148 __le32 status; 149 __le32 padding; 150 } __packed; 151 152 struct wire_trans_status_to_dev { 153 struct wire_trans_hdr hdr; 154 } __packed; 155 156 struct wire_trans_status_from_dev { 157 struct wire_trans_hdr hdr; 158 __le16 major; 159 __le16 minor; 160 __le32 status; 161 __le64 status_flags; 162 } __packed; 163 164 struct wire_trans_validate_part_to_dev { 165 struct wire_trans_hdr hdr; 166 __le32 part_id; 167 __le32 padding; 168 } __packed; 169 170 struct wire_trans_validate_part_from_dev { 171 struct wire_trans_hdr hdr; 172 __le32 status; 173 __le32 padding; 174 } __packed; 175 176 struct xfer_queue_elem { 177 /* 178 * Node in list of ongoing transfer request on control channel. 179 * Maintained by root device struct. 180 */ 181 struct list_head list; 182 /* Sequence number of this transfer request */ 183 u32 seq_num; 184 /* This is used to wait on until completion of transfer request */ 185 struct completion xfer_done; 186 /* Received data from device */ 187 void *buf; 188 }; 189 190 struct dma_xfer { 191 /* Node in list of DMA transfers which is used for cleanup */ 192 struct list_head list; 193 /* SG table of memory used for DMA */ 194 struct sg_table *sgt; 195 /* Array pages used for DMA */ 196 struct page **page_list; 197 /* Number of pages used for DMA */ 198 unsigned long nr_pages; 199 }; 200 201 struct ioctl_resources { 202 /* List of all DMA transfers which is used later for cleanup */ 203 struct list_head dma_xfers; 204 /* Base address of request queue which belongs to a DBC */ 205 void *buf; 206 /* 207 * Base bus address of request queue which belongs to a DBC. Response 208 * queue base bus address can be calculated by adding size of request 209 * queue to base bus address of request queue. 210 */ 211 dma_addr_t dma_addr; 212 /* Total size of request queue and response queue in byte */ 213 u32 total_size; 214 /* Total number of elements that can be queued in each of request and response queue */ 215 u32 nelem; 216 /* Base address of response queue which belongs to a DBC */ 217 void *rsp_q_base; 218 /* Status of the NNC message received */ 219 u32 status; 220 /* DBC id of the DBC received from device */ 221 u32 dbc_id; 222 /* 223 * DMA transfer request messages can be big in size and it may not be 224 * possible to send them in one shot. In such cases the messages are 225 * broken into chunks, this field stores ID of such chunks. 226 */ 227 u32 dma_chunk_id; 228 /* Total number of bytes transferred for a DMA xfer request */ 229 u64 xferred_dma_size; 230 /* Header of transaction message received from user. Used during DMA xfer request. */ 231 void *trans_hdr; 232 }; 233 234 struct resp_work { 235 struct work_struct work; 236 struct qaic_device *qdev; 237 void *buf; 238 }; 239 240 /* 241 * Since we're working with little endian messages, its useful to be able to 242 * increment without filling a whole line with conversions back and forth just 243 * to add one(1) to a message count. 244 */ 245 static __le32 incr_le32(__le32 val) 246 { 247 return cpu_to_le32(le32_to_cpu(val) + 1); 248 } 249 250 static u32 gen_crc(void *msg) 251 { 252 struct wrapper_list *wrappers = msg; 253 struct wrapper_msg *w; 254 u32 crc = ~0; 255 256 list_for_each_entry(w, &wrappers->list, list) 257 crc = crc32(crc, &w->msg, w->len); 258 259 return crc ^ ~0; 260 } 261 262 static u32 gen_crc_stub(void *msg) 263 { 264 return 0; 265 } 266 267 static bool valid_crc(void *msg) 268 { 269 struct wire_msg_hdr *hdr = msg; 270 bool ret; 271 u32 crc; 272 273 /* 274 * The output of this algorithm is always converted to the native 275 * endianness. 276 */ 277 crc = le32_to_cpu(hdr->crc32); 278 hdr->crc32 = 0; 279 ret = (crc32(~0, msg, le32_to_cpu(hdr->len)) ^ ~0) == crc; 280 hdr->crc32 = cpu_to_le32(crc); 281 return ret; 282 } 283 284 static bool valid_crc_stub(void *msg) 285 { 286 return true; 287 } 288 289 static void free_wrapper(struct kref *ref) 290 { 291 struct wrapper_msg *wrapper = container_of(ref, struct wrapper_msg, ref_count); 292 293 list_del(&wrapper->list); 294 kfree(wrapper); 295 } 296 297 static void save_dbc_buf(struct qaic_device *qdev, struct ioctl_resources *resources, 298 struct qaic_user *usr) 299 { 300 u32 dbc_id = resources->dbc_id; 301 302 if (resources->buf) { 303 wait_event_interruptible(qdev->dbc[dbc_id].dbc_release, !qdev->dbc[dbc_id].in_use); 304 qdev->dbc[dbc_id].req_q_base = resources->buf; 305 qdev->dbc[dbc_id].rsp_q_base = resources->rsp_q_base; 306 qdev->dbc[dbc_id].dma_addr = resources->dma_addr; 307 qdev->dbc[dbc_id].total_size = resources->total_size; 308 qdev->dbc[dbc_id].nelem = resources->nelem; 309 enable_dbc(qdev, dbc_id, usr); 310 qdev->dbc[dbc_id].in_use = true; 311 resources->buf = NULL; 312 } 313 } 314 315 static void free_dbc_buf(struct qaic_device *qdev, struct ioctl_resources *resources) 316 { 317 if (resources->buf) 318 dma_free_coherent(&qdev->pdev->dev, resources->total_size, resources->buf, 319 resources->dma_addr); 320 resources->buf = NULL; 321 } 322 323 static void free_dma_xfers(struct qaic_device *qdev, struct ioctl_resources *resources) 324 { 325 struct dma_xfer *xfer; 326 struct dma_xfer *x; 327 int i; 328 329 list_for_each_entry_safe(xfer, x, &resources->dma_xfers, list) { 330 dma_unmap_sgtable(&qdev->pdev->dev, xfer->sgt, DMA_TO_DEVICE, 0); 331 sg_free_table(xfer->sgt); 332 kfree(xfer->sgt); 333 for (i = 0; i < xfer->nr_pages; ++i) 334 put_page(xfer->page_list[i]); 335 kfree(xfer->page_list); 336 list_del(&xfer->list); 337 kfree(xfer); 338 } 339 } 340 341 static struct wrapper_msg *add_wrapper(struct wrapper_list *wrappers, u32 size) 342 { 343 struct wrapper_msg *w = kzalloc(size, GFP_KERNEL); 344 345 if (!w) 346 return NULL; 347 list_add_tail(&w->list, &wrappers->list); 348 kref_init(&w->ref_count); 349 w->head = wrappers; 350 return w; 351 } 352 353 static int encode_passthrough(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers, 354 u32 *user_len) 355 { 356 struct qaic_manage_trans_passthrough *in_trans = trans; 357 struct wire_trans_passthrough *out_trans; 358 struct wrapper_msg *trans_wrapper; 359 struct wrapper_msg *wrapper; 360 struct wire_msg *msg; 361 u32 msg_hdr_len; 362 363 wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list); 364 msg = &wrapper->msg; 365 msg_hdr_len = le32_to_cpu(msg->hdr.len); 366 367 if (in_trans->hdr.len % 8 != 0) 368 return -EINVAL; 369 370 if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_EXT_MSG_LENGTH) 371 return -ENOSPC; 372 373 trans_wrapper = add_wrapper(wrappers, 374 offsetof(struct wrapper_msg, trans) + in_trans->hdr.len); 375 if (!trans_wrapper) 376 return -ENOMEM; 377 trans_wrapper->len = in_trans->hdr.len; 378 out_trans = (struct wire_trans_passthrough *)&trans_wrapper->trans; 379 380 memcpy(out_trans->data, in_trans->data, in_trans->hdr.len - sizeof(in_trans->hdr)); 381 msg->hdr.len = cpu_to_le32(msg_hdr_len + in_trans->hdr.len); 382 msg->hdr.count = incr_le32(msg->hdr.count); 383 *user_len += in_trans->hdr.len; 384 out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_PASSTHROUGH_TO_DEV); 385 out_trans->hdr.len = cpu_to_le32(in_trans->hdr.len); 386 387 return 0; 388 } 389 390 /* returns error code for failure, 0 if enough pages alloc'd, 1 if dma_cont is needed */ 391 static int find_and_map_user_pages(struct qaic_device *qdev, 392 struct qaic_manage_trans_dma_xfer *in_trans, 393 struct ioctl_resources *resources, struct dma_xfer *xfer) 394 { 395 unsigned long need_pages; 396 struct page **page_list; 397 unsigned long nr_pages; 398 struct sg_table *sgt; 399 u64 xfer_start_addr; 400 int ret; 401 int i; 402 403 xfer_start_addr = in_trans->addr + resources->xferred_dma_size; 404 405 need_pages = DIV_ROUND_UP(in_trans->size + offset_in_page(xfer_start_addr) - 406 resources->xferred_dma_size, PAGE_SIZE); 407 408 nr_pages = need_pages; 409 410 while (1) { 411 page_list = kmalloc_array(nr_pages, sizeof(*page_list), GFP_KERNEL | __GFP_NOWARN); 412 if (!page_list) { 413 nr_pages = nr_pages / 2; 414 if (!nr_pages) 415 return -ENOMEM; 416 } else { 417 break; 418 } 419 } 420 421 ret = get_user_pages_fast(xfer_start_addr, nr_pages, 0, page_list); 422 if (ret < 0 || ret != nr_pages) { 423 ret = -EFAULT; 424 goto free_page_list; 425 } 426 427 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); 428 if (!sgt) { 429 ret = -ENOMEM; 430 goto put_pages; 431 } 432 433 ret = sg_alloc_table_from_pages(sgt, page_list, nr_pages, 434 offset_in_page(xfer_start_addr), 435 in_trans->size - resources->xferred_dma_size, GFP_KERNEL); 436 if (ret) { 437 ret = -ENOMEM; 438 goto free_sgt; 439 } 440 441 ret = dma_map_sgtable(&qdev->pdev->dev, sgt, DMA_TO_DEVICE, 0); 442 if (ret) 443 goto free_table; 444 445 xfer->sgt = sgt; 446 xfer->page_list = page_list; 447 xfer->nr_pages = nr_pages; 448 449 return need_pages > nr_pages ? 1 : 0; 450 451 free_table: 452 sg_free_table(sgt); 453 free_sgt: 454 kfree(sgt); 455 put_pages: 456 for (i = 0; i < nr_pages; ++i) 457 put_page(page_list[i]); 458 free_page_list: 459 kfree(page_list); 460 return ret; 461 } 462 463 /* returns error code for failure, 0 if everything was encoded, 1 if dma_cont is needed */ 464 static int encode_addr_size_pairs(struct dma_xfer *xfer, struct wrapper_list *wrappers, 465 struct ioctl_resources *resources, u32 msg_hdr_len, u32 *size, 466 struct wire_trans_dma_xfer **out_trans) 467 { 468 struct wrapper_msg *trans_wrapper; 469 struct sg_table *sgt = xfer->sgt; 470 struct wire_addr_size_pair *asp; 471 struct scatterlist *sg; 472 struct wrapper_msg *w; 473 unsigned int dma_len; 474 u64 dma_chunk_len; 475 void *boundary; 476 int nents_dma; 477 int nents; 478 int i; 479 480 nents = sgt->nents; 481 nents_dma = nents; 482 *size = QAIC_MANAGE_EXT_MSG_LENGTH - msg_hdr_len - sizeof(**out_trans); 483 for_each_sgtable_sg(sgt, sg, i) { 484 *size -= sizeof(*asp); 485 /* Save 1K for possible follow-up transactions. */ 486 if (*size < SZ_1K) { 487 nents_dma = i; 488 break; 489 } 490 } 491 492 trans_wrapper = add_wrapper(wrappers, QAIC_WRAPPER_MAX_SIZE); 493 if (!trans_wrapper) 494 return -ENOMEM; 495 *out_trans = (struct wire_trans_dma_xfer *)&trans_wrapper->trans; 496 497 asp = (*out_trans)->data; 498 boundary = (void *)trans_wrapper + QAIC_WRAPPER_MAX_SIZE; 499 *size = 0; 500 501 dma_len = 0; 502 w = trans_wrapper; 503 dma_chunk_len = 0; 504 for_each_sg(sgt->sgl, sg, nents_dma, i) { 505 asp->size = cpu_to_le64(dma_len); 506 dma_chunk_len += dma_len; 507 if (dma_len) { 508 asp++; 509 if ((void *)asp + sizeof(*asp) > boundary) { 510 w->len = (void *)asp - (void *)&w->msg; 511 *size += w->len; 512 w = add_wrapper(wrappers, QAIC_WRAPPER_MAX_SIZE); 513 if (!w) 514 return -ENOMEM; 515 boundary = (void *)w + QAIC_WRAPPER_MAX_SIZE; 516 asp = (struct wire_addr_size_pair *)&w->msg; 517 } 518 } 519 asp->addr = cpu_to_le64(sg_dma_address(sg)); 520 dma_len = sg_dma_len(sg); 521 } 522 /* finalize the last segment */ 523 asp->size = cpu_to_le64(dma_len); 524 w->len = (void *)asp + sizeof(*asp) - (void *)&w->msg; 525 *size += w->len; 526 dma_chunk_len += dma_len; 527 resources->xferred_dma_size += dma_chunk_len; 528 529 return nents_dma < nents ? 1 : 0; 530 } 531 532 static void cleanup_xfer(struct qaic_device *qdev, struct dma_xfer *xfer) 533 { 534 int i; 535 536 dma_unmap_sgtable(&qdev->pdev->dev, xfer->sgt, DMA_TO_DEVICE, 0); 537 sg_free_table(xfer->sgt); 538 kfree(xfer->sgt); 539 for (i = 0; i < xfer->nr_pages; ++i) 540 put_page(xfer->page_list[i]); 541 kfree(xfer->page_list); 542 } 543 544 static int encode_dma(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers, 545 u32 *user_len, struct ioctl_resources *resources, struct qaic_user *usr) 546 { 547 struct qaic_manage_trans_dma_xfer *in_trans = trans; 548 struct wire_trans_dma_xfer *out_trans; 549 struct wrapper_msg *wrapper; 550 struct dma_xfer *xfer; 551 struct wire_msg *msg; 552 bool need_cont_dma; 553 u32 msg_hdr_len; 554 u32 size; 555 int ret; 556 557 wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list); 558 msg = &wrapper->msg; 559 msg_hdr_len = le32_to_cpu(msg->hdr.len); 560 561 /* There should be enough space to hold at least one ASP entry. */ 562 if (size_add(msg_hdr_len, sizeof(*out_trans) + sizeof(struct wire_addr_size_pair)) > 563 QAIC_MANAGE_EXT_MSG_LENGTH) 564 return -ENOMEM; 565 566 if (in_trans->addr + in_trans->size < in_trans->addr || !in_trans->size) 567 return -EINVAL; 568 569 xfer = kmalloc(sizeof(*xfer), GFP_KERNEL); 570 if (!xfer) 571 return -ENOMEM; 572 573 ret = find_and_map_user_pages(qdev, in_trans, resources, xfer); 574 if (ret < 0) 575 goto free_xfer; 576 577 need_cont_dma = (bool)ret; 578 579 ret = encode_addr_size_pairs(xfer, wrappers, resources, msg_hdr_len, &size, &out_trans); 580 if (ret < 0) 581 goto cleanup_xfer; 582 583 need_cont_dma = need_cont_dma || (bool)ret; 584 585 msg->hdr.len = cpu_to_le32(msg_hdr_len + size); 586 msg->hdr.count = incr_le32(msg->hdr.count); 587 588 out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_DMA_XFER_TO_DEV); 589 out_trans->hdr.len = cpu_to_le32(size); 590 out_trans->tag = cpu_to_le32(in_trans->tag); 591 out_trans->count = cpu_to_le32((size - sizeof(*out_trans)) / 592 sizeof(struct wire_addr_size_pair)); 593 594 *user_len += in_trans->hdr.len; 595 596 if (resources->dma_chunk_id) { 597 out_trans->dma_chunk_id = cpu_to_le32(resources->dma_chunk_id); 598 } else if (need_cont_dma) { 599 while (resources->dma_chunk_id == 0) 600 resources->dma_chunk_id = atomic_inc_return(&usr->chunk_id); 601 602 out_trans->dma_chunk_id = cpu_to_le32(resources->dma_chunk_id); 603 } 604 resources->trans_hdr = trans; 605 606 list_add(&xfer->list, &resources->dma_xfers); 607 return 0; 608 609 cleanup_xfer: 610 cleanup_xfer(qdev, xfer); 611 free_xfer: 612 kfree(xfer); 613 return ret; 614 } 615 616 static int encode_activate(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers, 617 u32 *user_len, struct ioctl_resources *resources) 618 { 619 struct qaic_manage_trans_activate_to_dev *in_trans = trans; 620 struct wire_trans_activate_to_dev *out_trans; 621 struct wrapper_msg *trans_wrapper; 622 struct wrapper_msg *wrapper; 623 struct wire_msg *msg; 624 dma_addr_t dma_addr; 625 u32 msg_hdr_len; 626 void *buf; 627 u32 nelem; 628 u32 size; 629 int ret; 630 631 wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list); 632 msg = &wrapper->msg; 633 msg_hdr_len = le32_to_cpu(msg->hdr.len); 634 635 if (size_add(msg_hdr_len, sizeof(*out_trans)) > QAIC_MANAGE_MAX_MSG_LENGTH) 636 return -ENOSPC; 637 638 if (!in_trans->queue_size) 639 return -EINVAL; 640 641 if (in_trans->pad) 642 return -EINVAL; 643 644 nelem = in_trans->queue_size; 645 size = (get_dbc_req_elem_size() + get_dbc_rsp_elem_size()) * nelem; 646 if (size / nelem != get_dbc_req_elem_size() + get_dbc_rsp_elem_size()) 647 return -EINVAL; 648 649 if (size + QAIC_DBC_Q_GAP + QAIC_DBC_Q_BUF_ALIGN < size) 650 return -EINVAL; 651 652 size = ALIGN((size + QAIC_DBC_Q_GAP), QAIC_DBC_Q_BUF_ALIGN); 653 654 buf = dma_alloc_coherent(&qdev->pdev->dev, size, &dma_addr, GFP_KERNEL); 655 if (!buf) 656 return -ENOMEM; 657 658 trans_wrapper = add_wrapper(wrappers, 659 offsetof(struct wrapper_msg, trans) + sizeof(*out_trans)); 660 if (!trans_wrapper) { 661 ret = -ENOMEM; 662 goto free_dma; 663 } 664 trans_wrapper->len = sizeof(*out_trans); 665 out_trans = (struct wire_trans_activate_to_dev *)&trans_wrapper->trans; 666 667 out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_ACTIVATE_TO_DEV); 668 out_trans->hdr.len = cpu_to_le32(sizeof(*out_trans)); 669 out_trans->buf_len = cpu_to_le32(size); 670 out_trans->req_q_addr = cpu_to_le64(dma_addr); 671 out_trans->req_q_size = cpu_to_le32(nelem); 672 out_trans->rsp_q_addr = cpu_to_le64(dma_addr + size - nelem * get_dbc_rsp_elem_size()); 673 out_trans->rsp_q_size = cpu_to_le32(nelem); 674 out_trans->options = cpu_to_le32(in_trans->options); 675 676 *user_len += in_trans->hdr.len; 677 msg->hdr.len = cpu_to_le32(msg_hdr_len + sizeof(*out_trans)); 678 msg->hdr.count = incr_le32(msg->hdr.count); 679 680 resources->buf = buf; 681 resources->dma_addr = dma_addr; 682 resources->total_size = size; 683 resources->nelem = nelem; 684 resources->rsp_q_base = buf + size - nelem * get_dbc_rsp_elem_size(); 685 return 0; 686 687 free_dma: 688 dma_free_coherent(&qdev->pdev->dev, size, buf, dma_addr); 689 return ret; 690 } 691 692 static int encode_deactivate(struct qaic_device *qdev, void *trans, 693 u32 *user_len, struct qaic_user *usr) 694 { 695 struct qaic_manage_trans_deactivate *in_trans = trans; 696 697 if (in_trans->dbc_id >= qdev->num_dbc || in_trans->pad) 698 return -EINVAL; 699 700 *user_len += in_trans->hdr.len; 701 702 return disable_dbc(qdev, in_trans->dbc_id, usr); 703 } 704 705 static int encode_status(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers, 706 u32 *user_len) 707 { 708 struct qaic_manage_trans_status_to_dev *in_trans = trans; 709 struct wire_trans_status_to_dev *out_trans; 710 struct wrapper_msg *trans_wrapper; 711 struct wrapper_msg *wrapper; 712 struct wire_msg *msg; 713 u32 msg_hdr_len; 714 715 wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list); 716 msg = &wrapper->msg; 717 msg_hdr_len = le32_to_cpu(msg->hdr.len); 718 719 if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_MAX_MSG_LENGTH) 720 return -ENOSPC; 721 722 trans_wrapper = add_wrapper(wrappers, sizeof(*trans_wrapper)); 723 if (!trans_wrapper) 724 return -ENOMEM; 725 726 trans_wrapper->len = sizeof(*out_trans); 727 out_trans = (struct wire_trans_status_to_dev *)&trans_wrapper->trans; 728 729 out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_STATUS_TO_DEV); 730 out_trans->hdr.len = cpu_to_le32(in_trans->hdr.len); 731 msg->hdr.len = cpu_to_le32(msg_hdr_len + in_trans->hdr.len); 732 msg->hdr.count = incr_le32(msg->hdr.count); 733 *user_len += in_trans->hdr.len; 734 735 return 0; 736 } 737 738 static int encode_message(struct qaic_device *qdev, struct manage_msg *user_msg, 739 struct wrapper_list *wrappers, struct ioctl_resources *resources, 740 struct qaic_user *usr) 741 { 742 struct qaic_manage_trans_hdr *trans_hdr; 743 struct wrapper_msg *wrapper; 744 struct wire_msg *msg; 745 u32 user_len = 0; 746 int ret; 747 int i; 748 749 if (!user_msg->count || 750 user_msg->len < sizeof(*trans_hdr)) { 751 ret = -EINVAL; 752 goto out; 753 } 754 755 wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list); 756 msg = &wrapper->msg; 757 758 msg->hdr.len = cpu_to_le32(sizeof(msg->hdr)); 759 760 if (resources->dma_chunk_id) { 761 ret = encode_dma(qdev, resources->trans_hdr, wrappers, &user_len, resources, usr); 762 msg->hdr.count = cpu_to_le32(1); 763 goto out; 764 } 765 766 for (i = 0; i < user_msg->count; ++i) { 767 if (user_len > user_msg->len - sizeof(*trans_hdr)) { 768 ret = -EINVAL; 769 break; 770 } 771 trans_hdr = (struct qaic_manage_trans_hdr *)(user_msg->data + user_len); 772 if (trans_hdr->len < sizeof(trans_hdr) || 773 size_add(user_len, trans_hdr->len) > user_msg->len) { 774 ret = -EINVAL; 775 break; 776 } 777 778 switch (trans_hdr->type) { 779 case QAIC_TRANS_PASSTHROUGH_FROM_USR: 780 ret = encode_passthrough(qdev, trans_hdr, wrappers, &user_len); 781 break; 782 case QAIC_TRANS_DMA_XFER_FROM_USR: 783 ret = encode_dma(qdev, trans_hdr, wrappers, &user_len, resources, usr); 784 break; 785 case QAIC_TRANS_ACTIVATE_FROM_USR: 786 ret = encode_activate(qdev, trans_hdr, wrappers, &user_len, resources); 787 break; 788 case QAIC_TRANS_DEACTIVATE_FROM_USR: 789 ret = encode_deactivate(qdev, trans_hdr, &user_len, usr); 790 break; 791 case QAIC_TRANS_STATUS_FROM_USR: 792 ret = encode_status(qdev, trans_hdr, wrappers, &user_len); 793 break; 794 default: 795 ret = -EINVAL; 796 break; 797 } 798 799 if (ret) 800 break; 801 } 802 803 if (user_len != user_msg->len) 804 ret = -EINVAL; 805 out: 806 if (ret) { 807 free_dma_xfers(qdev, resources); 808 free_dbc_buf(qdev, resources); 809 return ret; 810 } 811 812 return 0; 813 } 814 815 static int decode_passthrough(struct qaic_device *qdev, void *trans, struct manage_msg *user_msg, 816 u32 *msg_len) 817 { 818 struct qaic_manage_trans_passthrough *out_trans; 819 struct wire_trans_passthrough *in_trans = trans; 820 u32 len; 821 822 out_trans = (void *)user_msg->data + user_msg->len; 823 824 len = le32_to_cpu(in_trans->hdr.len); 825 if (len % 8 != 0) 826 return -EINVAL; 827 828 if (user_msg->len + len > QAIC_MANAGE_MAX_MSG_LENGTH) 829 return -ENOSPC; 830 831 memcpy(out_trans->data, in_trans->data, len - sizeof(in_trans->hdr)); 832 user_msg->len += len; 833 *msg_len += len; 834 out_trans->hdr.type = le32_to_cpu(in_trans->hdr.type); 835 out_trans->hdr.len = len; 836 837 return 0; 838 } 839 840 static int decode_activate(struct qaic_device *qdev, void *trans, struct manage_msg *user_msg, 841 u32 *msg_len, struct ioctl_resources *resources, struct qaic_user *usr) 842 { 843 struct qaic_manage_trans_activate_from_dev *out_trans; 844 struct wire_trans_activate_from_dev *in_trans = trans; 845 u32 len; 846 847 out_trans = (void *)user_msg->data + user_msg->len; 848 849 len = le32_to_cpu(in_trans->hdr.len); 850 if (user_msg->len + len > QAIC_MANAGE_MAX_MSG_LENGTH) 851 return -ENOSPC; 852 853 user_msg->len += len; 854 *msg_len += len; 855 out_trans->hdr.type = le32_to_cpu(in_trans->hdr.type); 856 out_trans->hdr.len = len; 857 out_trans->status = le32_to_cpu(in_trans->status); 858 out_trans->dbc_id = le32_to_cpu(in_trans->dbc_id); 859 out_trans->options = le64_to_cpu(in_trans->options); 860 861 if (!resources->buf) 862 /* how did we get an activate response without a request? */ 863 return -EINVAL; 864 865 if (out_trans->dbc_id >= qdev->num_dbc) 866 /* 867 * The device assigned an invalid resource, which should never 868 * happen. Return an error so the user can try to recover. 869 */ 870 return -ENODEV; 871 872 if (out_trans->status) 873 /* 874 * Allocating resources failed on device side. This is not an 875 * expected behaviour, user is expected to handle this situation. 876 */ 877 return -ECANCELED; 878 879 resources->status = out_trans->status; 880 resources->dbc_id = out_trans->dbc_id; 881 save_dbc_buf(qdev, resources, usr); 882 883 return 0; 884 } 885 886 static int decode_deactivate(struct qaic_device *qdev, void *trans, u32 *msg_len, 887 struct qaic_user *usr) 888 { 889 struct wire_trans_deactivate_from_dev *in_trans = trans; 890 u32 dbc_id = le32_to_cpu(in_trans->dbc_id); 891 u32 status = le32_to_cpu(in_trans->status); 892 893 if (dbc_id >= qdev->num_dbc) 894 /* 895 * The device assigned an invalid resource, which should never 896 * happen. Inject an error so the user can try to recover. 897 */ 898 return -ENODEV; 899 900 if (status) { 901 /* 902 * Releasing resources failed on the device side, which puts 903 * us in a bind since they may still be in use, so enable the 904 * dbc. User is expected to retry deactivation. 905 */ 906 enable_dbc(qdev, dbc_id, usr); 907 return -ECANCELED; 908 } 909 910 release_dbc(qdev, dbc_id); 911 *msg_len += sizeof(*in_trans); 912 913 return 0; 914 } 915 916 static int decode_status(struct qaic_device *qdev, void *trans, struct manage_msg *user_msg, 917 u32 *user_len, struct wire_msg *msg) 918 { 919 struct qaic_manage_trans_status_from_dev *out_trans; 920 struct wire_trans_status_from_dev *in_trans = trans; 921 u32 len; 922 923 out_trans = (void *)user_msg->data + user_msg->len; 924 925 len = le32_to_cpu(in_trans->hdr.len); 926 if (user_msg->len + len > QAIC_MANAGE_MAX_MSG_LENGTH) 927 return -ENOSPC; 928 929 out_trans->hdr.type = QAIC_TRANS_STATUS_FROM_DEV; 930 out_trans->hdr.len = len; 931 out_trans->major = le16_to_cpu(in_trans->major); 932 out_trans->minor = le16_to_cpu(in_trans->minor); 933 out_trans->status_flags = le64_to_cpu(in_trans->status_flags); 934 out_trans->status = le32_to_cpu(in_trans->status); 935 *user_len += le32_to_cpu(in_trans->hdr.len); 936 user_msg->len += len; 937 938 if (out_trans->status) 939 return -ECANCELED; 940 if (out_trans->status_flags & BIT(0) && !valid_crc(msg)) 941 return -EPIPE; 942 943 return 0; 944 } 945 946 static int decode_message(struct qaic_device *qdev, struct manage_msg *user_msg, 947 struct wire_msg *msg, struct ioctl_resources *resources, 948 struct qaic_user *usr) 949 { 950 u32 msg_hdr_len = le32_to_cpu(msg->hdr.len); 951 struct wire_trans_hdr *trans_hdr; 952 u32 msg_len = 0; 953 int ret; 954 int i; 955 956 if (msg_hdr_len < sizeof(*trans_hdr) || 957 msg_hdr_len > QAIC_MANAGE_MAX_MSG_LENGTH) 958 return -EINVAL; 959 960 user_msg->len = 0; 961 user_msg->count = le32_to_cpu(msg->hdr.count); 962 963 for (i = 0; i < user_msg->count; ++i) { 964 u32 hdr_len; 965 966 if (msg_len > msg_hdr_len - sizeof(*trans_hdr)) 967 return -EINVAL; 968 969 trans_hdr = (struct wire_trans_hdr *)(msg->data + msg_len); 970 hdr_len = le32_to_cpu(trans_hdr->len); 971 if (hdr_len < sizeof(*trans_hdr) || 972 size_add(msg_len, hdr_len) > msg_hdr_len) 973 return -EINVAL; 974 975 switch (le32_to_cpu(trans_hdr->type)) { 976 case QAIC_TRANS_PASSTHROUGH_FROM_DEV: 977 ret = decode_passthrough(qdev, trans_hdr, user_msg, &msg_len); 978 break; 979 case QAIC_TRANS_ACTIVATE_FROM_DEV: 980 ret = decode_activate(qdev, trans_hdr, user_msg, &msg_len, resources, usr); 981 break; 982 case QAIC_TRANS_DEACTIVATE_FROM_DEV: 983 ret = decode_deactivate(qdev, trans_hdr, &msg_len, usr); 984 break; 985 case QAIC_TRANS_STATUS_FROM_DEV: 986 ret = decode_status(qdev, trans_hdr, user_msg, &msg_len, msg); 987 break; 988 default: 989 return -EINVAL; 990 } 991 992 if (ret) 993 return ret; 994 } 995 996 if (msg_len != (msg_hdr_len - sizeof(msg->hdr))) 997 return -EINVAL; 998 999 return 0; 1000 } 1001 1002 static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u32 seq_num, 1003 bool ignore_signal) 1004 { 1005 struct xfer_queue_elem elem; 1006 struct wire_msg *out_buf; 1007 struct wrapper_msg *w; 1008 long ret = -EAGAIN; 1009 int xfer_count = 0; 1010 int retry_count; 1011 1012 if (qdev->in_reset) { 1013 mutex_unlock(&qdev->cntl_mutex); 1014 return ERR_PTR(-ENODEV); 1015 } 1016 1017 /* Attempt to avoid a partial commit of a message */ 1018 list_for_each_entry(w, &wrappers->list, list) 1019 xfer_count++; 1020 1021 for (retry_count = 0; retry_count < QAIC_MHI_RETRY_MAX; retry_count++) { 1022 if (xfer_count <= mhi_get_free_desc_count(qdev->cntl_ch, DMA_TO_DEVICE)) { 1023 ret = 0; 1024 break; 1025 } 1026 msleep_interruptible(QAIC_MHI_RETRY_WAIT_MS); 1027 if (signal_pending(current)) 1028 break; 1029 } 1030 1031 if (ret) { 1032 mutex_unlock(&qdev->cntl_mutex); 1033 return ERR_PTR(ret); 1034 } 1035 1036 elem.seq_num = seq_num; 1037 elem.buf = NULL; 1038 init_completion(&elem.xfer_done); 1039 if (likely(!qdev->cntl_lost_buf)) { 1040 /* 1041 * The max size of request to device is QAIC_MANAGE_EXT_MSG_LENGTH. 1042 * The max size of response from device is QAIC_MANAGE_MAX_MSG_LENGTH. 1043 */ 1044 out_buf = kmalloc(QAIC_MANAGE_MAX_MSG_LENGTH, GFP_KERNEL); 1045 if (!out_buf) { 1046 mutex_unlock(&qdev->cntl_mutex); 1047 return ERR_PTR(-ENOMEM); 1048 } 1049 1050 ret = mhi_queue_buf(qdev->cntl_ch, DMA_FROM_DEVICE, out_buf, 1051 QAIC_MANAGE_MAX_MSG_LENGTH, MHI_EOT); 1052 if (ret) { 1053 mutex_unlock(&qdev->cntl_mutex); 1054 return ERR_PTR(ret); 1055 } 1056 } else { 1057 /* 1058 * we lost a buffer because we queued a recv buf, but then 1059 * queuing the corresponding tx buf failed. To try to avoid 1060 * a memory leak, lets reclaim it and use it for this 1061 * transaction. 1062 */ 1063 qdev->cntl_lost_buf = false; 1064 } 1065 1066 list_for_each_entry(w, &wrappers->list, list) { 1067 kref_get(&w->ref_count); 1068 retry_count = 0; 1069 ret = mhi_queue_buf(qdev->cntl_ch, DMA_TO_DEVICE, &w->msg, w->len, 1070 list_is_last(&w->list, &wrappers->list) ? MHI_EOT : MHI_CHAIN); 1071 if (ret) { 1072 qdev->cntl_lost_buf = true; 1073 kref_put(&w->ref_count, free_wrapper); 1074 mutex_unlock(&qdev->cntl_mutex); 1075 return ERR_PTR(ret); 1076 } 1077 } 1078 1079 list_add_tail(&elem.list, &qdev->cntl_xfer_list); 1080 mutex_unlock(&qdev->cntl_mutex); 1081 1082 if (ignore_signal) 1083 ret = wait_for_completion_timeout(&elem.xfer_done, control_resp_timeout_s * HZ); 1084 else 1085 ret = wait_for_completion_interruptible_timeout(&elem.xfer_done, 1086 control_resp_timeout_s * HZ); 1087 /* 1088 * not using _interruptable because we have to cleanup or we'll 1089 * likely cause memory corruption 1090 */ 1091 mutex_lock(&qdev->cntl_mutex); 1092 if (!list_empty(&elem.list)) 1093 list_del(&elem.list); 1094 if (!ret && !elem.buf) 1095 ret = -ETIMEDOUT; 1096 else if (ret > 0 && !elem.buf) 1097 ret = -EIO; 1098 mutex_unlock(&qdev->cntl_mutex); 1099 1100 if (ret < 0) { 1101 kfree(elem.buf); 1102 return ERR_PTR(ret); 1103 } else if (!qdev->valid_crc(elem.buf)) { 1104 kfree(elem.buf); 1105 return ERR_PTR(-EPIPE); 1106 } 1107 1108 return elem.buf; 1109 } 1110 1111 /* Add a transaction to abort the outstanding DMA continuation */ 1112 static int abort_dma_cont(struct qaic_device *qdev, struct wrapper_list *wrappers, u32 dma_chunk_id) 1113 { 1114 struct wire_trans_dma_xfer *out_trans; 1115 u32 size = sizeof(*out_trans); 1116 struct wrapper_msg *wrapper; 1117 struct wrapper_msg *w; 1118 struct wire_msg *msg; 1119 1120 wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list); 1121 msg = &wrapper->msg; 1122 1123 /* Remove all but the first wrapper which has the msg header */ 1124 list_for_each_entry_safe(wrapper, w, &wrappers->list, list) 1125 if (!list_is_first(&wrapper->list, &wrappers->list)) 1126 kref_put(&wrapper->ref_count, free_wrapper); 1127 1128 wrapper = add_wrapper(wrappers, offsetof(struct wrapper_msg, trans) + sizeof(*out_trans)); 1129 1130 if (!wrapper) 1131 return -ENOMEM; 1132 1133 out_trans = (struct wire_trans_dma_xfer *)&wrapper->trans; 1134 out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_DMA_XFER_TO_DEV); 1135 out_trans->hdr.len = cpu_to_le32(size); 1136 out_trans->tag = cpu_to_le32(0); 1137 out_trans->count = cpu_to_le32(0); 1138 out_trans->dma_chunk_id = cpu_to_le32(dma_chunk_id); 1139 1140 msg->hdr.len = cpu_to_le32(size + sizeof(*msg)); 1141 msg->hdr.count = cpu_to_le32(1); 1142 wrapper->len = size; 1143 1144 return 0; 1145 } 1146 1147 static struct wrapper_list *alloc_wrapper_list(void) 1148 { 1149 struct wrapper_list *wrappers; 1150 1151 wrappers = kmalloc(sizeof(*wrappers), GFP_KERNEL); 1152 if (!wrappers) 1153 return NULL; 1154 INIT_LIST_HEAD(&wrappers->list); 1155 spin_lock_init(&wrappers->lock); 1156 1157 return wrappers; 1158 } 1159 1160 static int qaic_manage_msg_xfer(struct qaic_device *qdev, struct qaic_user *usr, 1161 struct manage_msg *user_msg, struct ioctl_resources *resources, 1162 struct wire_msg **rsp) 1163 { 1164 struct wrapper_list *wrappers; 1165 struct wrapper_msg *wrapper; 1166 struct wrapper_msg *w; 1167 bool all_done = false; 1168 struct wire_msg *msg; 1169 int ret; 1170 1171 wrappers = alloc_wrapper_list(); 1172 if (!wrappers) 1173 return -ENOMEM; 1174 1175 wrapper = add_wrapper(wrappers, sizeof(*wrapper)); 1176 if (!wrapper) { 1177 kfree(wrappers); 1178 return -ENOMEM; 1179 } 1180 1181 msg = &wrapper->msg; 1182 wrapper->len = sizeof(*msg); 1183 1184 ret = encode_message(qdev, user_msg, wrappers, resources, usr); 1185 if (ret && resources->dma_chunk_id) 1186 ret = abort_dma_cont(qdev, wrappers, resources->dma_chunk_id); 1187 if (ret) 1188 goto encode_failed; 1189 1190 ret = mutex_lock_interruptible(&qdev->cntl_mutex); 1191 if (ret) 1192 goto lock_failed; 1193 1194 msg->hdr.magic_number = MANAGE_MAGIC_NUMBER; 1195 msg->hdr.sequence_number = cpu_to_le32(qdev->next_seq_num++); 1196 1197 if (usr) { 1198 msg->hdr.handle = cpu_to_le32(usr->handle); 1199 msg->hdr.partition_id = cpu_to_le32(usr->qddev->partition_id); 1200 } else { 1201 msg->hdr.handle = 0; 1202 msg->hdr.partition_id = cpu_to_le32(QAIC_NO_PARTITION); 1203 } 1204 1205 msg->hdr.padding = cpu_to_le32(0); 1206 msg->hdr.crc32 = cpu_to_le32(qdev->gen_crc(wrappers)); 1207 1208 /* msg_xfer releases the mutex */ 1209 *rsp = msg_xfer(qdev, wrappers, qdev->next_seq_num - 1, false); 1210 if (IS_ERR(*rsp)) 1211 ret = PTR_ERR(*rsp); 1212 1213 lock_failed: 1214 free_dma_xfers(qdev, resources); 1215 encode_failed: 1216 spin_lock(&wrappers->lock); 1217 list_for_each_entry_safe(wrapper, w, &wrappers->list, list) 1218 kref_put(&wrapper->ref_count, free_wrapper); 1219 all_done = list_empty(&wrappers->list); 1220 spin_unlock(&wrappers->lock); 1221 if (all_done) 1222 kfree(wrappers); 1223 1224 return ret; 1225 } 1226 1227 static int qaic_manage(struct qaic_device *qdev, struct qaic_user *usr, struct manage_msg *user_msg) 1228 { 1229 struct wire_trans_dma_xfer_cont *dma_cont = NULL; 1230 struct ioctl_resources resources; 1231 struct wire_msg *rsp = NULL; 1232 int ret; 1233 1234 memset(&resources, 0, sizeof(struct ioctl_resources)); 1235 1236 INIT_LIST_HEAD(&resources.dma_xfers); 1237 1238 if (user_msg->len > QAIC_MANAGE_MAX_MSG_LENGTH || 1239 user_msg->count > QAIC_MANAGE_MAX_MSG_LENGTH / sizeof(struct qaic_manage_trans_hdr)) 1240 return -EINVAL; 1241 1242 dma_xfer_continue: 1243 ret = qaic_manage_msg_xfer(qdev, usr, user_msg, &resources, &rsp); 1244 if (ret) 1245 return ret; 1246 /* dma_cont should be the only transaction if present */ 1247 if (le32_to_cpu(rsp->hdr.count) == 1) { 1248 dma_cont = (struct wire_trans_dma_xfer_cont *)rsp->data; 1249 if (le32_to_cpu(dma_cont->hdr.type) != QAIC_TRANS_DMA_XFER_CONT) 1250 dma_cont = NULL; 1251 } 1252 if (dma_cont) { 1253 if (le32_to_cpu(dma_cont->dma_chunk_id) == resources.dma_chunk_id && 1254 le64_to_cpu(dma_cont->xferred_size) == resources.xferred_dma_size) { 1255 kfree(rsp); 1256 goto dma_xfer_continue; 1257 } 1258 1259 ret = -EINVAL; 1260 goto dma_cont_failed; 1261 } 1262 1263 ret = decode_message(qdev, user_msg, rsp, &resources, usr); 1264 1265 dma_cont_failed: 1266 free_dbc_buf(qdev, &resources); 1267 kfree(rsp); 1268 return ret; 1269 } 1270 1271 int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) 1272 { 1273 struct qaic_manage_msg *user_msg = data; 1274 struct qaic_device *qdev; 1275 struct manage_msg *msg; 1276 struct qaic_user *usr; 1277 u8 __user *user_data; 1278 int qdev_rcu_id; 1279 int usr_rcu_id; 1280 int ret; 1281 1282 if (user_msg->len > QAIC_MANAGE_MAX_MSG_LENGTH) 1283 return -EINVAL; 1284 1285 usr = file_priv->driver_priv; 1286 1287 usr_rcu_id = srcu_read_lock(&usr->qddev_lock); 1288 if (!usr->qddev) { 1289 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 1290 return -ENODEV; 1291 } 1292 1293 qdev = usr->qddev->qdev; 1294 1295 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); 1296 if (qdev->in_reset) { 1297 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 1298 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 1299 return -ENODEV; 1300 } 1301 1302 msg = kzalloc(QAIC_MANAGE_MAX_MSG_LENGTH + sizeof(*msg), GFP_KERNEL); 1303 if (!msg) { 1304 ret = -ENOMEM; 1305 goto out; 1306 } 1307 1308 msg->len = user_msg->len; 1309 msg->count = user_msg->count; 1310 1311 user_data = u64_to_user_ptr(user_msg->data); 1312 1313 if (copy_from_user(msg->data, user_data, user_msg->len)) { 1314 ret = -EFAULT; 1315 goto free_msg; 1316 } 1317 1318 ret = qaic_manage(qdev, usr, msg); 1319 1320 /* 1321 * If the qaic_manage() is successful then we copy the message onto 1322 * userspace memory but we have an exception for -ECANCELED. 1323 * For -ECANCELED, it means that device has NACKed the message with a 1324 * status error code which userspace would like to know. 1325 */ 1326 if (ret == -ECANCELED || !ret) { 1327 if (copy_to_user(user_data, msg->data, msg->len)) { 1328 ret = -EFAULT; 1329 } else { 1330 user_msg->len = msg->len; 1331 user_msg->count = msg->count; 1332 } 1333 } 1334 1335 free_msg: 1336 kfree(msg); 1337 out: 1338 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 1339 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 1340 return ret; 1341 } 1342 1343 int get_cntl_version(struct qaic_device *qdev, struct qaic_user *usr, u16 *major, u16 *minor) 1344 { 1345 struct qaic_manage_trans_status_from_dev *status_result; 1346 struct qaic_manage_trans_status_to_dev *status_query; 1347 struct manage_msg *user_msg; 1348 int ret; 1349 1350 user_msg = kmalloc(sizeof(*user_msg) + sizeof(*status_result), GFP_KERNEL); 1351 if (!user_msg) { 1352 ret = -ENOMEM; 1353 goto out; 1354 } 1355 user_msg->len = sizeof(*status_query); 1356 user_msg->count = 1; 1357 1358 status_query = (struct qaic_manage_trans_status_to_dev *)user_msg->data; 1359 status_query->hdr.type = QAIC_TRANS_STATUS_FROM_USR; 1360 status_query->hdr.len = sizeof(status_query->hdr); 1361 1362 ret = qaic_manage(qdev, usr, user_msg); 1363 if (ret) 1364 goto kfree_user_msg; 1365 status_result = (struct qaic_manage_trans_status_from_dev *)user_msg->data; 1366 *major = status_result->major; 1367 *minor = status_result->minor; 1368 1369 if (status_result->status_flags & BIT(0)) { /* device is using CRC */ 1370 /* By default qdev->gen_crc is programmed to generate CRC */ 1371 qdev->valid_crc = valid_crc; 1372 } else { 1373 /* By default qdev->valid_crc is programmed to bypass CRC */ 1374 qdev->gen_crc = gen_crc_stub; 1375 } 1376 1377 kfree_user_msg: 1378 kfree(user_msg); 1379 out: 1380 return ret; 1381 } 1382 1383 static void resp_worker(struct work_struct *work) 1384 { 1385 struct resp_work *resp = container_of(work, struct resp_work, work); 1386 struct qaic_device *qdev = resp->qdev; 1387 struct wire_msg *msg = resp->buf; 1388 struct xfer_queue_elem *elem; 1389 struct xfer_queue_elem *i; 1390 bool found = false; 1391 1392 mutex_lock(&qdev->cntl_mutex); 1393 list_for_each_entry_safe(elem, i, &qdev->cntl_xfer_list, list) { 1394 if (elem->seq_num == le32_to_cpu(msg->hdr.sequence_number)) { 1395 found = true; 1396 list_del_init(&elem->list); 1397 elem->buf = msg; 1398 complete_all(&elem->xfer_done); 1399 break; 1400 } 1401 } 1402 mutex_unlock(&qdev->cntl_mutex); 1403 1404 if (!found) 1405 /* request must have timed out, drop packet */ 1406 kfree(msg); 1407 1408 kfree(resp); 1409 } 1410 1411 static void free_wrapper_from_list(struct wrapper_list *wrappers, struct wrapper_msg *wrapper) 1412 { 1413 bool all_done = false; 1414 1415 spin_lock(&wrappers->lock); 1416 kref_put(&wrapper->ref_count, free_wrapper); 1417 all_done = list_empty(&wrappers->list); 1418 spin_unlock(&wrappers->lock); 1419 1420 if (all_done) 1421 kfree(wrappers); 1422 } 1423 1424 void qaic_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result) 1425 { 1426 struct wire_msg *msg = mhi_result->buf_addr; 1427 struct wrapper_msg *wrapper = container_of(msg, struct wrapper_msg, msg); 1428 1429 free_wrapper_from_list(wrapper->head, wrapper); 1430 } 1431 1432 void qaic_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result) 1433 { 1434 struct qaic_device *qdev = dev_get_drvdata(&mhi_dev->dev); 1435 struct wire_msg *msg = mhi_result->buf_addr; 1436 struct resp_work *resp; 1437 1438 if (mhi_result->transaction_status || msg->hdr.magic_number != MANAGE_MAGIC_NUMBER) { 1439 kfree(msg); 1440 return; 1441 } 1442 1443 resp = kmalloc(sizeof(*resp), GFP_ATOMIC); 1444 if (!resp) { 1445 kfree(msg); 1446 return; 1447 } 1448 1449 INIT_WORK(&resp->work, resp_worker); 1450 resp->qdev = qdev; 1451 resp->buf = msg; 1452 queue_work(qdev->cntl_wq, &resp->work); 1453 } 1454 1455 int qaic_control_open(struct qaic_device *qdev) 1456 { 1457 if (!qdev->cntl_ch) 1458 return -ENODEV; 1459 1460 qdev->cntl_lost_buf = false; 1461 /* 1462 * By default qaic should assume that device has CRC enabled. 1463 * Qaic comes to know if device has CRC enabled or disabled during the 1464 * device status transaction, which is the first transaction performed 1465 * on control channel. 1466 * 1467 * So CRC validation of first device status transaction response is 1468 * ignored (by calling valid_crc_stub) and is done later during decoding 1469 * if device has CRC enabled. 1470 * Now that qaic knows whether device has CRC enabled or not it acts 1471 * accordingly. 1472 */ 1473 qdev->gen_crc = gen_crc; 1474 qdev->valid_crc = valid_crc_stub; 1475 1476 return mhi_prepare_for_transfer(qdev->cntl_ch); 1477 } 1478 1479 void qaic_control_close(struct qaic_device *qdev) 1480 { 1481 mhi_unprepare_from_transfer(qdev->cntl_ch); 1482 } 1483 1484 void qaic_release_usr(struct qaic_device *qdev, struct qaic_user *usr) 1485 { 1486 struct wire_trans_terminate_to_dev *trans; 1487 struct wrapper_list *wrappers; 1488 struct wrapper_msg *wrapper; 1489 struct wire_msg *msg; 1490 struct wire_msg *rsp; 1491 1492 wrappers = alloc_wrapper_list(); 1493 if (!wrappers) 1494 return; 1495 1496 wrapper = add_wrapper(wrappers, sizeof(*wrapper) + sizeof(*msg) + sizeof(*trans)); 1497 if (!wrapper) 1498 return; 1499 1500 msg = &wrapper->msg; 1501 1502 trans = (struct wire_trans_terminate_to_dev *)msg->data; 1503 1504 trans->hdr.type = cpu_to_le32(QAIC_TRANS_TERMINATE_TO_DEV); 1505 trans->hdr.len = cpu_to_le32(sizeof(*trans)); 1506 trans->handle = cpu_to_le32(usr->handle); 1507 1508 mutex_lock(&qdev->cntl_mutex); 1509 wrapper->len = sizeof(msg->hdr) + sizeof(*trans); 1510 msg->hdr.magic_number = MANAGE_MAGIC_NUMBER; 1511 msg->hdr.sequence_number = cpu_to_le32(qdev->next_seq_num++); 1512 msg->hdr.len = cpu_to_le32(wrapper->len); 1513 msg->hdr.count = cpu_to_le32(1); 1514 msg->hdr.handle = cpu_to_le32(usr->handle); 1515 msg->hdr.padding = cpu_to_le32(0); 1516 msg->hdr.crc32 = cpu_to_le32(qdev->gen_crc(wrappers)); 1517 1518 /* 1519 * msg_xfer releases the mutex 1520 * We don't care about the return of msg_xfer since we will not do 1521 * anything different based on what happens. 1522 * We ignore pending signals since one will be set if the user is 1523 * killed, and we need give the device a chance to cleanup, otherwise 1524 * DMA may still be in progress when we return. 1525 */ 1526 rsp = msg_xfer(qdev, wrappers, qdev->next_seq_num - 1, true); 1527 if (!IS_ERR(rsp)) 1528 kfree(rsp); 1529 free_wrapper_from_list(wrappers, wrapper); 1530 } 1531 1532 void wake_all_cntl(struct qaic_device *qdev) 1533 { 1534 struct xfer_queue_elem *elem; 1535 struct xfer_queue_elem *i; 1536 1537 mutex_lock(&qdev->cntl_mutex); 1538 list_for_each_entry_safe(elem, i, &qdev->cntl_xfer_list, list) { 1539 list_del_init(&elem->list); 1540 complete_all(&elem->xfer_done); 1541 } 1542 mutex_unlock(&qdev->cntl_mutex); 1543 } 1544