1 /* 2 * Copyright (C) 2017 Broadcom 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License as 6 * published by the Free Software Foundation version 2. 7 * 8 * This program is distributed "as is" WITHOUT ANY WARRANTY of any 9 * kind, whether express or implied; without even the implied warranty 10 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 */ 13 14 /* 15 * Broadcom SBA RAID Driver 16 * 17 * The Broadcom stream buffer accelerator (SBA) provides offloading 18 * capabilities for RAID operations. The SBA offload engine is accessible 19 * via Broadcom SoC specific ring manager. Two or more offload engines 20 * can share same Broadcom SoC specific ring manager due to this Broadcom 21 * SoC specific ring manager driver is implemented as a mailbox controller 22 * driver and offload engine drivers are implemented as mallbox clients. 23 * 24 * Typically, Broadcom SoC specific ring manager will implement larger 25 * number of hardware rings over one or more SBA hardware devices. By 26 * design, the internal buffer size of SBA hardware device is limited 27 * but all offload operations supported by SBA can be broken down into 28 * multiple small size requests and executed parallely on multiple SBA 29 * hardware devices for achieving high through-put. 30 * 31 * The Broadcom SBA RAID driver does not require any register programming 32 * except submitting request to SBA hardware device via mailbox channels. 33 * This driver implements a DMA device with one DMA channel using a single 34 * mailbox channel provided by Broadcom SoC specific ring manager driver. 35 * For having more SBA DMA channels, we can create more SBA device nodes 36 * in Broadcom SoC specific DTS based on number of hardware rings supported 37 * by Broadcom SoC ring manager. 38 */ 39 40 #include <linux/bitops.h> 41 #include <linux/debugfs.h> 42 #include <linux/dma-mapping.h> 43 #include <linux/dmaengine.h> 44 #include <linux/list.h> 45 #include <linux/mailbox_client.h> 46 #include <linux/mailbox/brcm-message.h> 47 #include <linux/module.h> 48 #include <linux/of_device.h> 49 #include <linux/slab.h> 50 #include <linux/raid/pq.h> 51 52 #include "dmaengine.h" 53 54 /* ====== Driver macros and defines ===== */ 55 56 #define SBA_TYPE_SHIFT 48 57 #define SBA_TYPE_MASK GENMASK(1, 0) 58 #define SBA_TYPE_A 0x0 59 #define SBA_TYPE_B 0x2 60 #define SBA_TYPE_C 0x3 61 #define SBA_USER_DEF_SHIFT 32 62 #define SBA_USER_DEF_MASK GENMASK(15, 0) 63 #define SBA_R_MDATA_SHIFT 24 64 #define SBA_R_MDATA_MASK GENMASK(7, 0) 65 #define SBA_C_MDATA_MS_SHIFT 18 66 #define SBA_C_MDATA_MS_MASK GENMASK(1, 0) 67 #define SBA_INT_SHIFT 17 68 #define SBA_INT_MASK BIT(0) 69 #define SBA_RESP_SHIFT 16 70 #define SBA_RESP_MASK BIT(0) 71 #define SBA_C_MDATA_SHIFT 8 72 #define SBA_C_MDATA_MASK GENMASK(7, 0) 73 #define SBA_C_MDATA_BNUMx_SHIFT(__bnum) (2 * (__bnum)) 74 #define SBA_C_MDATA_BNUMx_MASK GENMASK(1, 0) 75 #define SBA_C_MDATA_DNUM_SHIFT 5 76 #define SBA_C_MDATA_DNUM_MASK GENMASK(4, 0) 77 #define SBA_C_MDATA_LS(__v) ((__v) & 0xff) 78 #define SBA_C_MDATA_MS(__v) (((__v) >> 8) & 0x3) 79 #define SBA_CMD_SHIFT 0 80 #define SBA_CMD_MASK GENMASK(3, 0) 81 #define SBA_CMD_ZERO_BUFFER 0x4 82 #define SBA_CMD_ZERO_ALL_BUFFERS 0x8 83 #define SBA_CMD_LOAD_BUFFER 0x9 84 #define SBA_CMD_XOR 0xa 85 #define SBA_CMD_GALOIS_XOR 0xb 86 #define SBA_CMD_WRITE_BUFFER 0xc 87 #define SBA_CMD_GALOIS 0xe 88 89 #define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192 90 #define SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL 8 91 92 /* Driver helper macros */ 93 #define to_sba_request(tx) \ 94 container_of(tx, struct sba_request, tx) 95 #define to_sba_device(dchan) \ 96 container_of(dchan, struct sba_device, dma_chan) 97 98 /* ===== Driver data structures ===== */ 99 100 enum sba_request_flags { 101 SBA_REQUEST_STATE_FREE = 0x001, 102 SBA_REQUEST_STATE_ALLOCED = 0x002, 103 SBA_REQUEST_STATE_PENDING = 0x004, 104 SBA_REQUEST_STATE_ACTIVE = 0x008, 105 SBA_REQUEST_STATE_ABORTED = 0x010, 106 SBA_REQUEST_STATE_MASK = 0x0ff, 107 SBA_REQUEST_FENCE = 0x100, 108 }; 109 110 struct sba_request { 111 /* Global state */ 112 struct list_head node; 113 struct sba_device *sba; 114 u32 flags; 115 /* Chained requests management */ 116 struct sba_request *first; 117 struct list_head next; 118 atomic_t next_pending_count; 119 /* BRCM message data */ 120 struct brcm_message msg; 121 struct dma_async_tx_descriptor tx; 122 /* SBA commands */ 123 struct brcm_sba_command cmds[0]; 124 }; 125 126 enum sba_version { 127 SBA_VER_1 = 0, 128 SBA_VER_2 129 }; 130 131 struct sba_device { 132 /* Underlying device */ 133 struct device *dev; 134 /* DT configuration parameters */ 135 enum sba_version ver; 136 /* Derived configuration parameters */ 137 u32 max_req; 138 u32 hw_buf_size; 139 u32 hw_resp_size; 140 u32 max_pq_coefs; 141 u32 max_pq_srcs; 142 u32 max_cmd_per_req; 143 u32 max_xor_srcs; 144 u32 max_resp_pool_size; 145 u32 max_cmds_pool_size; 146 /* Maibox client and Mailbox channels */ 147 struct mbox_client client; 148 struct mbox_chan *mchan; 149 struct device *mbox_dev; 150 /* DMA device and DMA channel */ 151 struct dma_device dma_dev; 152 struct dma_chan dma_chan; 153 /* DMA channel resources */ 154 void *resp_base; 155 dma_addr_t resp_dma_base; 156 void *cmds_base; 157 dma_addr_t cmds_dma_base; 158 spinlock_t reqs_lock; 159 bool reqs_fence; 160 struct list_head reqs_alloc_list; 161 struct list_head reqs_pending_list; 162 struct list_head reqs_active_list; 163 struct list_head reqs_aborted_list; 164 struct list_head reqs_free_list; 165 /* DebugFS directory entries */ 166 struct dentry *root; 167 struct dentry *stats; 168 }; 169 170 /* ====== Command helper routines ===== */ 171 172 static inline u64 __pure sba_cmd_enc(u64 cmd, u32 val, u32 shift, u32 mask) 173 { 174 cmd &= ~((u64)mask << shift); 175 cmd |= ((u64)(val & mask) << shift); 176 return cmd; 177 } 178 179 static inline u32 __pure sba_cmd_load_c_mdata(u32 b0) 180 { 181 return b0 & SBA_C_MDATA_BNUMx_MASK; 182 } 183 184 static inline u32 __pure sba_cmd_write_c_mdata(u32 b0) 185 { 186 return b0 & SBA_C_MDATA_BNUMx_MASK; 187 } 188 189 static inline u32 __pure sba_cmd_xor_c_mdata(u32 b1, u32 b0) 190 { 191 return (b0 & SBA_C_MDATA_BNUMx_MASK) | 192 ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1)); 193 } 194 195 static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0) 196 { 197 return (b0 & SBA_C_MDATA_BNUMx_MASK) | 198 ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1)) | 199 ((d & SBA_C_MDATA_DNUM_MASK) << SBA_C_MDATA_DNUM_SHIFT); 200 } 201 202 /* ====== General helper routines ===== */ 203 204 static struct sba_request *sba_alloc_request(struct sba_device *sba) 205 { 206 bool found = false; 207 unsigned long flags; 208 struct sba_request *req = NULL; 209 210 spin_lock_irqsave(&sba->reqs_lock, flags); 211 list_for_each_entry(req, &sba->reqs_free_list, node) { 212 if (async_tx_test_ack(&req->tx)) { 213 list_move_tail(&req->node, &sba->reqs_alloc_list); 214 found = true; 215 break; 216 } 217 } 218 spin_unlock_irqrestore(&sba->reqs_lock, flags); 219 220 if (!found) { 221 /* 222 * We have no more free requests so, we peek 223 * mailbox channels hoping few active requests 224 * would have completed which will create more 225 * room for new requests. 226 */ 227 mbox_client_peek_data(sba->mchan); 228 return NULL; 229 } 230 231 req->flags = SBA_REQUEST_STATE_ALLOCED; 232 req->first = req; 233 INIT_LIST_HEAD(&req->next); 234 atomic_set(&req->next_pending_count, 1); 235 236 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); 237 async_tx_ack(&req->tx); 238 239 return req; 240 } 241 242 /* Note: Must be called with sba->reqs_lock held */ 243 static void _sba_pending_request(struct sba_device *sba, 244 struct sba_request *req) 245 { 246 lockdep_assert_held(&sba->reqs_lock); 247 req->flags &= ~SBA_REQUEST_STATE_MASK; 248 req->flags |= SBA_REQUEST_STATE_PENDING; 249 list_move_tail(&req->node, &sba->reqs_pending_list); 250 if (list_empty(&sba->reqs_active_list)) 251 sba->reqs_fence = false; 252 } 253 254 /* Note: Must be called with sba->reqs_lock held */ 255 static bool _sba_active_request(struct sba_device *sba, 256 struct sba_request *req) 257 { 258 lockdep_assert_held(&sba->reqs_lock); 259 if (list_empty(&sba->reqs_active_list)) 260 sba->reqs_fence = false; 261 if (sba->reqs_fence) 262 return false; 263 req->flags &= ~SBA_REQUEST_STATE_MASK; 264 req->flags |= SBA_REQUEST_STATE_ACTIVE; 265 list_move_tail(&req->node, &sba->reqs_active_list); 266 if (req->flags & SBA_REQUEST_FENCE) 267 sba->reqs_fence = true; 268 return true; 269 } 270 271 /* Note: Must be called with sba->reqs_lock held */ 272 static void _sba_abort_request(struct sba_device *sba, 273 struct sba_request *req) 274 { 275 lockdep_assert_held(&sba->reqs_lock); 276 req->flags &= ~SBA_REQUEST_STATE_MASK; 277 req->flags |= SBA_REQUEST_STATE_ABORTED; 278 list_move_tail(&req->node, &sba->reqs_aborted_list); 279 if (list_empty(&sba->reqs_active_list)) 280 sba->reqs_fence = false; 281 } 282 283 /* Note: Must be called with sba->reqs_lock held */ 284 static void _sba_free_request(struct sba_device *sba, 285 struct sba_request *req) 286 { 287 lockdep_assert_held(&sba->reqs_lock); 288 req->flags &= ~SBA_REQUEST_STATE_MASK; 289 req->flags |= SBA_REQUEST_STATE_FREE; 290 list_move_tail(&req->node, &sba->reqs_free_list); 291 if (list_empty(&sba->reqs_active_list)) 292 sba->reqs_fence = false; 293 } 294 295 static void sba_free_chained_requests(struct sba_request *req) 296 { 297 unsigned long flags; 298 struct sba_request *nreq; 299 struct sba_device *sba = req->sba; 300 301 spin_lock_irqsave(&sba->reqs_lock, flags); 302 303 _sba_free_request(sba, req); 304 list_for_each_entry(nreq, &req->next, next) 305 _sba_free_request(sba, nreq); 306 307 spin_unlock_irqrestore(&sba->reqs_lock, flags); 308 } 309 310 static void sba_chain_request(struct sba_request *first, 311 struct sba_request *req) 312 { 313 unsigned long flags; 314 struct sba_device *sba = req->sba; 315 316 spin_lock_irqsave(&sba->reqs_lock, flags); 317 318 list_add_tail(&req->next, &first->next); 319 req->first = first; 320 atomic_inc(&first->next_pending_count); 321 322 spin_unlock_irqrestore(&sba->reqs_lock, flags); 323 } 324 325 static void sba_cleanup_nonpending_requests(struct sba_device *sba) 326 { 327 unsigned long flags; 328 struct sba_request *req, *req1; 329 330 spin_lock_irqsave(&sba->reqs_lock, flags); 331 332 /* Freeup all alloced request */ 333 list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node) 334 _sba_free_request(sba, req); 335 336 /* Set all active requests as aborted */ 337 list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node) 338 _sba_abort_request(sba, req); 339 340 /* 341 * Note: We expect that aborted request will be eventually 342 * freed by sba_receive_message() 343 */ 344 345 spin_unlock_irqrestore(&sba->reqs_lock, flags); 346 } 347 348 static void sba_cleanup_pending_requests(struct sba_device *sba) 349 { 350 unsigned long flags; 351 struct sba_request *req, *req1; 352 353 spin_lock_irqsave(&sba->reqs_lock, flags); 354 355 /* Freeup all pending request */ 356 list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node) 357 _sba_free_request(sba, req); 358 359 spin_unlock_irqrestore(&sba->reqs_lock, flags); 360 } 361 362 static int sba_send_mbox_request(struct sba_device *sba, 363 struct sba_request *req) 364 { 365 int ret = 0; 366 367 /* Send message for the request */ 368 req->msg.error = 0; 369 ret = mbox_send_message(sba->mchan, &req->msg); 370 if (ret < 0) { 371 dev_err(sba->dev, "send message failed with error %d", ret); 372 return ret; 373 } 374 375 /* Check error returned by mailbox controller */ 376 ret = req->msg.error; 377 if (ret < 0) { 378 dev_err(sba->dev, "message error %d", ret); 379 } 380 381 /* Signal txdone for mailbox channel */ 382 mbox_client_txdone(sba->mchan, ret); 383 384 return ret; 385 } 386 387 /* Note: Must be called with sba->reqs_lock held */ 388 static void _sba_process_pending_requests(struct sba_device *sba) 389 { 390 int ret; 391 u32 count; 392 struct sba_request *req; 393 394 /* Process few pending requests */ 395 count = SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL; 396 while (!list_empty(&sba->reqs_pending_list) && count) { 397 /* Get the first pending request */ 398 req = list_first_entry(&sba->reqs_pending_list, 399 struct sba_request, node); 400 401 /* Try to make request active */ 402 if (!_sba_active_request(sba, req)) 403 break; 404 405 /* Send request to mailbox channel */ 406 ret = sba_send_mbox_request(sba, req); 407 if (ret < 0) { 408 _sba_pending_request(sba, req); 409 break; 410 } 411 412 count--; 413 } 414 } 415 416 static void sba_process_received_request(struct sba_device *sba, 417 struct sba_request *req) 418 { 419 unsigned long flags; 420 struct dma_async_tx_descriptor *tx; 421 struct sba_request *nreq, *first = req->first; 422 423 /* Process only after all chained requests are received */ 424 if (!atomic_dec_return(&first->next_pending_count)) { 425 tx = &first->tx; 426 427 WARN_ON(tx->cookie < 0); 428 if (tx->cookie > 0) { 429 spin_lock_irqsave(&sba->reqs_lock, flags); 430 dma_cookie_complete(tx); 431 spin_unlock_irqrestore(&sba->reqs_lock, flags); 432 dmaengine_desc_get_callback_invoke(tx, NULL); 433 dma_descriptor_unmap(tx); 434 tx->callback = NULL; 435 tx->callback_result = NULL; 436 } 437 438 dma_run_dependencies(tx); 439 440 spin_lock_irqsave(&sba->reqs_lock, flags); 441 442 /* Free all requests chained to first request */ 443 list_for_each_entry(nreq, &first->next, next) 444 _sba_free_request(sba, nreq); 445 INIT_LIST_HEAD(&first->next); 446 447 /* Free the first request */ 448 _sba_free_request(sba, first); 449 450 /* Process pending requests */ 451 _sba_process_pending_requests(sba); 452 453 spin_unlock_irqrestore(&sba->reqs_lock, flags); 454 } 455 } 456 457 static void sba_write_stats_in_seqfile(struct sba_device *sba, 458 struct seq_file *file) 459 { 460 unsigned long flags; 461 struct sba_request *req; 462 u32 free_count = 0, alloced_count = 0; 463 u32 pending_count = 0, active_count = 0, aborted_count = 0; 464 465 spin_lock_irqsave(&sba->reqs_lock, flags); 466 467 list_for_each_entry(req, &sba->reqs_free_list, node) 468 if (async_tx_test_ack(&req->tx)) 469 free_count++; 470 471 list_for_each_entry(req, &sba->reqs_alloc_list, node) 472 alloced_count++; 473 474 list_for_each_entry(req, &sba->reqs_pending_list, node) 475 pending_count++; 476 477 list_for_each_entry(req, &sba->reqs_active_list, node) 478 active_count++; 479 480 list_for_each_entry(req, &sba->reqs_aborted_list, node) 481 aborted_count++; 482 483 spin_unlock_irqrestore(&sba->reqs_lock, flags); 484 485 seq_printf(file, "maximum requests = %d\n", sba->max_req); 486 seq_printf(file, "free requests = %d\n", free_count); 487 seq_printf(file, "alloced requests = %d\n", alloced_count); 488 seq_printf(file, "pending requests = %d\n", pending_count); 489 seq_printf(file, "active requests = %d\n", active_count); 490 seq_printf(file, "aborted requests = %d\n", aborted_count); 491 } 492 493 /* ====== DMAENGINE callbacks ===== */ 494 495 static void sba_free_chan_resources(struct dma_chan *dchan) 496 { 497 /* 498 * Channel resources are pre-alloced so we just free-up 499 * whatever we can so that we can re-use pre-alloced 500 * channel resources next time. 501 */ 502 sba_cleanup_nonpending_requests(to_sba_device(dchan)); 503 } 504 505 static int sba_device_terminate_all(struct dma_chan *dchan) 506 { 507 /* Cleanup all pending requests */ 508 sba_cleanup_pending_requests(to_sba_device(dchan)); 509 510 return 0; 511 } 512 513 static void sba_issue_pending(struct dma_chan *dchan) 514 { 515 unsigned long flags; 516 struct sba_device *sba = to_sba_device(dchan); 517 518 /* Process pending requests */ 519 spin_lock_irqsave(&sba->reqs_lock, flags); 520 _sba_process_pending_requests(sba); 521 spin_unlock_irqrestore(&sba->reqs_lock, flags); 522 } 523 524 static dma_cookie_t sba_tx_submit(struct dma_async_tx_descriptor *tx) 525 { 526 unsigned long flags; 527 dma_cookie_t cookie; 528 struct sba_device *sba; 529 struct sba_request *req, *nreq; 530 531 if (unlikely(!tx)) 532 return -EINVAL; 533 534 sba = to_sba_device(tx->chan); 535 req = to_sba_request(tx); 536 537 /* Assign cookie and mark all chained requests pending */ 538 spin_lock_irqsave(&sba->reqs_lock, flags); 539 cookie = dma_cookie_assign(tx); 540 _sba_pending_request(sba, req); 541 list_for_each_entry(nreq, &req->next, next) 542 _sba_pending_request(sba, nreq); 543 spin_unlock_irqrestore(&sba->reqs_lock, flags); 544 545 return cookie; 546 } 547 548 static enum dma_status sba_tx_status(struct dma_chan *dchan, 549 dma_cookie_t cookie, 550 struct dma_tx_state *txstate) 551 { 552 enum dma_status ret; 553 struct sba_device *sba = to_sba_device(dchan); 554 555 ret = dma_cookie_status(dchan, cookie, txstate); 556 if (ret == DMA_COMPLETE) 557 return ret; 558 559 mbox_client_peek_data(sba->mchan); 560 561 return dma_cookie_status(dchan, cookie, txstate); 562 } 563 564 static void sba_fillup_interrupt_msg(struct sba_request *req, 565 struct brcm_sba_command *cmds, 566 struct brcm_message *msg) 567 { 568 u64 cmd; 569 u32 c_mdata; 570 dma_addr_t resp_dma = req->tx.phys; 571 struct brcm_sba_command *cmdsp = cmds; 572 573 /* Type-B command to load dummy data into buf0 */ 574 cmd = sba_cmd_enc(0x0, SBA_TYPE_B, 575 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 576 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size, 577 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 578 c_mdata = sba_cmd_load_c_mdata(0); 579 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 580 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 581 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, 582 SBA_CMD_SHIFT, SBA_CMD_MASK); 583 cmdsp->cmd = cmd; 584 *cmdsp->cmd_dma = cpu_to_le64(cmd); 585 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; 586 cmdsp->data = resp_dma; 587 cmdsp->data_len = req->sba->hw_resp_size; 588 cmdsp++; 589 590 /* Type-A command to write buf0 to dummy location */ 591 cmd = sba_cmd_enc(0x0, SBA_TYPE_A, 592 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 593 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size, 594 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 595 cmd = sba_cmd_enc(cmd, 0x1, 596 SBA_RESP_SHIFT, SBA_RESP_MASK); 597 c_mdata = sba_cmd_write_c_mdata(0); 598 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 599 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 600 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, 601 SBA_CMD_SHIFT, SBA_CMD_MASK); 602 cmdsp->cmd = cmd; 603 *cmdsp->cmd_dma = cpu_to_le64(cmd); 604 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; 605 if (req->sba->hw_resp_size) { 606 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; 607 cmdsp->resp = resp_dma; 608 cmdsp->resp_len = req->sba->hw_resp_size; 609 } 610 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; 611 cmdsp->data = resp_dma; 612 cmdsp->data_len = req->sba->hw_resp_size; 613 cmdsp++; 614 615 /* Fillup brcm_message */ 616 msg->type = BRCM_MESSAGE_SBA; 617 msg->sba.cmds = cmds; 618 msg->sba.cmds_count = cmdsp - cmds; 619 msg->ctx = req; 620 msg->error = 0; 621 } 622 623 static struct dma_async_tx_descriptor * 624 sba_prep_dma_interrupt(struct dma_chan *dchan, unsigned long flags) 625 { 626 struct sba_request *req = NULL; 627 struct sba_device *sba = to_sba_device(dchan); 628 629 /* Alloc new request */ 630 req = sba_alloc_request(sba); 631 if (!req) 632 return NULL; 633 634 /* 635 * Force fence so that no requests are submitted 636 * until DMA callback for this request is invoked. 637 */ 638 req->flags |= SBA_REQUEST_FENCE; 639 640 /* Fillup request message */ 641 sba_fillup_interrupt_msg(req, req->cmds, &req->msg); 642 643 /* Init async_tx descriptor */ 644 req->tx.flags = flags; 645 req->tx.cookie = -EBUSY; 646 647 return &req->tx; 648 } 649 650 static void sba_fillup_memcpy_msg(struct sba_request *req, 651 struct brcm_sba_command *cmds, 652 struct brcm_message *msg, 653 dma_addr_t msg_offset, size_t msg_len, 654 dma_addr_t dst, dma_addr_t src) 655 { 656 u64 cmd; 657 u32 c_mdata; 658 dma_addr_t resp_dma = req->tx.phys; 659 struct brcm_sba_command *cmdsp = cmds; 660 661 /* Type-B command to load data into buf0 */ 662 cmd = sba_cmd_enc(0x0, SBA_TYPE_B, 663 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 664 cmd = sba_cmd_enc(cmd, msg_len, 665 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 666 c_mdata = sba_cmd_load_c_mdata(0); 667 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 668 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 669 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, 670 SBA_CMD_SHIFT, SBA_CMD_MASK); 671 cmdsp->cmd = cmd; 672 *cmdsp->cmd_dma = cpu_to_le64(cmd); 673 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; 674 cmdsp->data = src + msg_offset; 675 cmdsp->data_len = msg_len; 676 cmdsp++; 677 678 /* Type-A command to write buf0 */ 679 cmd = sba_cmd_enc(0x0, SBA_TYPE_A, 680 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 681 cmd = sba_cmd_enc(cmd, msg_len, 682 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 683 cmd = sba_cmd_enc(cmd, 0x1, 684 SBA_RESP_SHIFT, SBA_RESP_MASK); 685 c_mdata = sba_cmd_write_c_mdata(0); 686 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 687 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 688 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, 689 SBA_CMD_SHIFT, SBA_CMD_MASK); 690 cmdsp->cmd = cmd; 691 *cmdsp->cmd_dma = cpu_to_le64(cmd); 692 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; 693 if (req->sba->hw_resp_size) { 694 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; 695 cmdsp->resp = resp_dma; 696 cmdsp->resp_len = req->sba->hw_resp_size; 697 } 698 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; 699 cmdsp->data = dst + msg_offset; 700 cmdsp->data_len = msg_len; 701 cmdsp++; 702 703 /* Fillup brcm_message */ 704 msg->type = BRCM_MESSAGE_SBA; 705 msg->sba.cmds = cmds; 706 msg->sba.cmds_count = cmdsp - cmds; 707 msg->ctx = req; 708 msg->error = 0; 709 } 710 711 static struct sba_request * 712 sba_prep_dma_memcpy_req(struct sba_device *sba, 713 dma_addr_t off, dma_addr_t dst, dma_addr_t src, 714 size_t len, unsigned long flags) 715 { 716 struct sba_request *req = NULL; 717 718 /* Alloc new request */ 719 req = sba_alloc_request(sba); 720 if (!req) 721 return NULL; 722 if (flags & DMA_PREP_FENCE) 723 req->flags |= SBA_REQUEST_FENCE; 724 725 /* Fillup request message */ 726 sba_fillup_memcpy_msg(req, req->cmds, &req->msg, 727 off, len, dst, src); 728 729 /* Init async_tx descriptor */ 730 req->tx.flags = flags; 731 req->tx.cookie = -EBUSY; 732 733 return req; 734 } 735 736 static struct dma_async_tx_descriptor * 737 sba_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src, 738 size_t len, unsigned long flags) 739 { 740 size_t req_len; 741 dma_addr_t off = 0; 742 struct sba_device *sba = to_sba_device(dchan); 743 struct sba_request *first = NULL, *req; 744 745 /* Create chained requests where each request is upto hw_buf_size */ 746 while (len) { 747 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size; 748 749 req = sba_prep_dma_memcpy_req(sba, off, dst, src, 750 req_len, flags); 751 if (!req) { 752 if (first) 753 sba_free_chained_requests(first); 754 return NULL; 755 } 756 757 if (first) 758 sba_chain_request(first, req); 759 else 760 first = req; 761 762 off += req_len; 763 len -= req_len; 764 } 765 766 return (first) ? &first->tx : NULL; 767 } 768 769 static void sba_fillup_xor_msg(struct sba_request *req, 770 struct brcm_sba_command *cmds, 771 struct brcm_message *msg, 772 dma_addr_t msg_offset, size_t msg_len, 773 dma_addr_t dst, dma_addr_t *src, u32 src_cnt) 774 { 775 u64 cmd; 776 u32 c_mdata; 777 unsigned int i; 778 dma_addr_t resp_dma = req->tx.phys; 779 struct brcm_sba_command *cmdsp = cmds; 780 781 /* Type-B command to load data into buf0 */ 782 cmd = sba_cmd_enc(0x0, SBA_TYPE_B, 783 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 784 cmd = sba_cmd_enc(cmd, msg_len, 785 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 786 c_mdata = sba_cmd_load_c_mdata(0); 787 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 788 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 789 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, 790 SBA_CMD_SHIFT, SBA_CMD_MASK); 791 cmdsp->cmd = cmd; 792 *cmdsp->cmd_dma = cpu_to_le64(cmd); 793 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; 794 cmdsp->data = src[0] + msg_offset; 795 cmdsp->data_len = msg_len; 796 cmdsp++; 797 798 /* Type-B commands to xor data with buf0 and put it back in buf0 */ 799 for (i = 1; i < src_cnt; i++) { 800 cmd = sba_cmd_enc(0x0, SBA_TYPE_B, 801 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 802 cmd = sba_cmd_enc(cmd, msg_len, 803 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 804 c_mdata = sba_cmd_xor_c_mdata(0, 0); 805 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 806 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 807 cmd = sba_cmd_enc(cmd, SBA_CMD_XOR, 808 SBA_CMD_SHIFT, SBA_CMD_MASK); 809 cmdsp->cmd = cmd; 810 *cmdsp->cmd_dma = cpu_to_le64(cmd); 811 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; 812 cmdsp->data = src[i] + msg_offset; 813 cmdsp->data_len = msg_len; 814 cmdsp++; 815 } 816 817 /* Type-A command to write buf0 */ 818 cmd = sba_cmd_enc(0x0, SBA_TYPE_A, 819 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 820 cmd = sba_cmd_enc(cmd, msg_len, 821 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 822 cmd = sba_cmd_enc(cmd, 0x1, 823 SBA_RESP_SHIFT, SBA_RESP_MASK); 824 c_mdata = sba_cmd_write_c_mdata(0); 825 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 826 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 827 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, 828 SBA_CMD_SHIFT, SBA_CMD_MASK); 829 cmdsp->cmd = cmd; 830 *cmdsp->cmd_dma = cpu_to_le64(cmd); 831 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; 832 if (req->sba->hw_resp_size) { 833 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; 834 cmdsp->resp = resp_dma; 835 cmdsp->resp_len = req->sba->hw_resp_size; 836 } 837 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; 838 cmdsp->data = dst + msg_offset; 839 cmdsp->data_len = msg_len; 840 cmdsp++; 841 842 /* Fillup brcm_message */ 843 msg->type = BRCM_MESSAGE_SBA; 844 msg->sba.cmds = cmds; 845 msg->sba.cmds_count = cmdsp - cmds; 846 msg->ctx = req; 847 msg->error = 0; 848 } 849 850 static struct sba_request * 851 sba_prep_dma_xor_req(struct sba_device *sba, 852 dma_addr_t off, dma_addr_t dst, dma_addr_t *src, 853 u32 src_cnt, size_t len, unsigned long flags) 854 { 855 struct sba_request *req = NULL; 856 857 /* Alloc new request */ 858 req = sba_alloc_request(sba); 859 if (!req) 860 return NULL; 861 if (flags & DMA_PREP_FENCE) 862 req->flags |= SBA_REQUEST_FENCE; 863 864 /* Fillup request message */ 865 sba_fillup_xor_msg(req, req->cmds, &req->msg, 866 off, len, dst, src, src_cnt); 867 868 /* Init async_tx descriptor */ 869 req->tx.flags = flags; 870 req->tx.cookie = -EBUSY; 871 872 return req; 873 } 874 875 static struct dma_async_tx_descriptor * 876 sba_prep_dma_xor(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src, 877 u32 src_cnt, size_t len, unsigned long flags) 878 { 879 size_t req_len; 880 dma_addr_t off = 0; 881 struct sba_device *sba = to_sba_device(dchan); 882 struct sba_request *first = NULL, *req; 883 884 /* Sanity checks */ 885 if (unlikely(src_cnt > sba->max_xor_srcs)) 886 return NULL; 887 888 /* Create chained requests where each request is upto hw_buf_size */ 889 while (len) { 890 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size; 891 892 req = sba_prep_dma_xor_req(sba, off, dst, src, src_cnt, 893 req_len, flags); 894 if (!req) { 895 if (first) 896 sba_free_chained_requests(first); 897 return NULL; 898 } 899 900 if (first) 901 sba_chain_request(first, req); 902 else 903 first = req; 904 905 off += req_len; 906 len -= req_len; 907 } 908 909 return (first) ? &first->tx : NULL; 910 } 911 912 static void sba_fillup_pq_msg(struct sba_request *req, 913 bool pq_continue, 914 struct brcm_sba_command *cmds, 915 struct brcm_message *msg, 916 dma_addr_t msg_offset, size_t msg_len, 917 dma_addr_t *dst_p, dma_addr_t *dst_q, 918 const u8 *scf, dma_addr_t *src, u32 src_cnt) 919 { 920 u64 cmd; 921 u32 c_mdata; 922 unsigned int i; 923 dma_addr_t resp_dma = req->tx.phys; 924 struct brcm_sba_command *cmdsp = cmds; 925 926 if (pq_continue) { 927 /* Type-B command to load old P into buf0 */ 928 if (dst_p) { 929 cmd = sba_cmd_enc(0x0, SBA_TYPE_B, 930 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 931 cmd = sba_cmd_enc(cmd, msg_len, 932 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 933 c_mdata = sba_cmd_load_c_mdata(0); 934 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 935 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 936 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, 937 SBA_CMD_SHIFT, SBA_CMD_MASK); 938 cmdsp->cmd = cmd; 939 *cmdsp->cmd_dma = cpu_to_le64(cmd); 940 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; 941 cmdsp->data = *dst_p + msg_offset; 942 cmdsp->data_len = msg_len; 943 cmdsp++; 944 } 945 946 /* Type-B command to load old Q into buf1 */ 947 if (dst_q) { 948 cmd = sba_cmd_enc(0x0, SBA_TYPE_B, 949 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 950 cmd = sba_cmd_enc(cmd, msg_len, 951 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 952 c_mdata = sba_cmd_load_c_mdata(1); 953 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 954 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 955 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, 956 SBA_CMD_SHIFT, SBA_CMD_MASK); 957 cmdsp->cmd = cmd; 958 *cmdsp->cmd_dma = cpu_to_le64(cmd); 959 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; 960 cmdsp->data = *dst_q + msg_offset; 961 cmdsp->data_len = msg_len; 962 cmdsp++; 963 } 964 } else { 965 /* Type-A command to zero all buffers */ 966 cmd = sba_cmd_enc(0x0, SBA_TYPE_A, 967 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 968 cmd = sba_cmd_enc(cmd, msg_len, 969 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 970 cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS, 971 SBA_CMD_SHIFT, SBA_CMD_MASK); 972 cmdsp->cmd = cmd; 973 *cmdsp->cmd_dma = cpu_to_le64(cmd); 974 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; 975 cmdsp++; 976 } 977 978 /* Type-B commands for generate P onto buf0 and Q onto buf1 */ 979 for (i = 0; i < src_cnt; i++) { 980 cmd = sba_cmd_enc(0x0, SBA_TYPE_B, 981 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 982 cmd = sba_cmd_enc(cmd, msg_len, 983 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 984 c_mdata = sba_cmd_pq_c_mdata(raid6_gflog[scf[i]], 1, 0); 985 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 986 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 987 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata), 988 SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK); 989 cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS_XOR, 990 SBA_CMD_SHIFT, SBA_CMD_MASK); 991 cmdsp->cmd = cmd; 992 *cmdsp->cmd_dma = cpu_to_le64(cmd); 993 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; 994 cmdsp->data = src[i] + msg_offset; 995 cmdsp->data_len = msg_len; 996 cmdsp++; 997 } 998 999 /* Type-A command to write buf0 */ 1000 if (dst_p) { 1001 cmd = sba_cmd_enc(0x0, SBA_TYPE_A, 1002 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 1003 cmd = sba_cmd_enc(cmd, msg_len, 1004 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 1005 cmd = sba_cmd_enc(cmd, 0x1, 1006 SBA_RESP_SHIFT, SBA_RESP_MASK); 1007 c_mdata = sba_cmd_write_c_mdata(0); 1008 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 1009 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 1010 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, 1011 SBA_CMD_SHIFT, SBA_CMD_MASK); 1012 cmdsp->cmd = cmd; 1013 *cmdsp->cmd_dma = cpu_to_le64(cmd); 1014 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; 1015 if (req->sba->hw_resp_size) { 1016 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; 1017 cmdsp->resp = resp_dma; 1018 cmdsp->resp_len = req->sba->hw_resp_size; 1019 } 1020 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; 1021 cmdsp->data = *dst_p + msg_offset; 1022 cmdsp->data_len = msg_len; 1023 cmdsp++; 1024 } 1025 1026 /* Type-A command to write buf1 */ 1027 if (dst_q) { 1028 cmd = sba_cmd_enc(0x0, SBA_TYPE_A, 1029 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 1030 cmd = sba_cmd_enc(cmd, msg_len, 1031 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 1032 cmd = sba_cmd_enc(cmd, 0x1, 1033 SBA_RESP_SHIFT, SBA_RESP_MASK); 1034 c_mdata = sba_cmd_write_c_mdata(1); 1035 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 1036 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 1037 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, 1038 SBA_CMD_SHIFT, SBA_CMD_MASK); 1039 cmdsp->cmd = cmd; 1040 *cmdsp->cmd_dma = cpu_to_le64(cmd); 1041 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; 1042 if (req->sba->hw_resp_size) { 1043 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; 1044 cmdsp->resp = resp_dma; 1045 cmdsp->resp_len = req->sba->hw_resp_size; 1046 } 1047 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; 1048 cmdsp->data = *dst_q + msg_offset; 1049 cmdsp->data_len = msg_len; 1050 cmdsp++; 1051 } 1052 1053 /* Fillup brcm_message */ 1054 msg->type = BRCM_MESSAGE_SBA; 1055 msg->sba.cmds = cmds; 1056 msg->sba.cmds_count = cmdsp - cmds; 1057 msg->ctx = req; 1058 msg->error = 0; 1059 } 1060 1061 static struct sba_request * 1062 sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off, 1063 dma_addr_t *dst_p, dma_addr_t *dst_q, dma_addr_t *src, 1064 u32 src_cnt, const u8 *scf, size_t len, unsigned long flags) 1065 { 1066 struct sba_request *req = NULL; 1067 1068 /* Alloc new request */ 1069 req = sba_alloc_request(sba); 1070 if (!req) 1071 return NULL; 1072 if (flags & DMA_PREP_FENCE) 1073 req->flags |= SBA_REQUEST_FENCE; 1074 1075 /* Fillup request messages */ 1076 sba_fillup_pq_msg(req, dmaf_continue(flags), 1077 req->cmds, &req->msg, 1078 off, len, dst_p, dst_q, scf, src, src_cnt); 1079 1080 /* Init async_tx descriptor */ 1081 req->tx.flags = flags; 1082 req->tx.cookie = -EBUSY; 1083 1084 return req; 1085 } 1086 1087 static void sba_fillup_pq_single_msg(struct sba_request *req, 1088 bool pq_continue, 1089 struct brcm_sba_command *cmds, 1090 struct brcm_message *msg, 1091 dma_addr_t msg_offset, size_t msg_len, 1092 dma_addr_t *dst_p, dma_addr_t *dst_q, 1093 dma_addr_t src, u8 scf) 1094 { 1095 u64 cmd; 1096 u32 c_mdata; 1097 u8 pos, dpos = raid6_gflog[scf]; 1098 dma_addr_t resp_dma = req->tx.phys; 1099 struct brcm_sba_command *cmdsp = cmds; 1100 1101 if (!dst_p) 1102 goto skip_p; 1103 1104 if (pq_continue) { 1105 /* Type-B command to load old P into buf0 */ 1106 cmd = sba_cmd_enc(0x0, SBA_TYPE_B, 1107 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 1108 cmd = sba_cmd_enc(cmd, msg_len, 1109 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 1110 c_mdata = sba_cmd_load_c_mdata(0); 1111 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 1112 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 1113 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, 1114 SBA_CMD_SHIFT, SBA_CMD_MASK); 1115 cmdsp->cmd = cmd; 1116 *cmdsp->cmd_dma = cpu_to_le64(cmd); 1117 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; 1118 cmdsp->data = *dst_p + msg_offset; 1119 cmdsp->data_len = msg_len; 1120 cmdsp++; 1121 1122 /* 1123 * Type-B commands to xor data with buf0 and put it 1124 * back in buf0 1125 */ 1126 cmd = sba_cmd_enc(0x0, SBA_TYPE_B, 1127 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 1128 cmd = sba_cmd_enc(cmd, msg_len, 1129 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 1130 c_mdata = sba_cmd_xor_c_mdata(0, 0); 1131 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 1132 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 1133 cmd = sba_cmd_enc(cmd, SBA_CMD_XOR, 1134 SBA_CMD_SHIFT, SBA_CMD_MASK); 1135 cmdsp->cmd = cmd; 1136 *cmdsp->cmd_dma = cpu_to_le64(cmd); 1137 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; 1138 cmdsp->data = src + msg_offset; 1139 cmdsp->data_len = msg_len; 1140 cmdsp++; 1141 } else { 1142 /* Type-B command to load old P into buf0 */ 1143 cmd = sba_cmd_enc(0x0, SBA_TYPE_B, 1144 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 1145 cmd = sba_cmd_enc(cmd, msg_len, 1146 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 1147 c_mdata = sba_cmd_load_c_mdata(0); 1148 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 1149 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 1150 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, 1151 SBA_CMD_SHIFT, SBA_CMD_MASK); 1152 cmdsp->cmd = cmd; 1153 *cmdsp->cmd_dma = cpu_to_le64(cmd); 1154 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; 1155 cmdsp->data = src + msg_offset; 1156 cmdsp->data_len = msg_len; 1157 cmdsp++; 1158 } 1159 1160 /* Type-A command to write buf0 */ 1161 cmd = sba_cmd_enc(0x0, SBA_TYPE_A, 1162 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 1163 cmd = sba_cmd_enc(cmd, msg_len, 1164 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 1165 cmd = sba_cmd_enc(cmd, 0x1, 1166 SBA_RESP_SHIFT, SBA_RESP_MASK); 1167 c_mdata = sba_cmd_write_c_mdata(0); 1168 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 1169 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 1170 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, 1171 SBA_CMD_SHIFT, SBA_CMD_MASK); 1172 cmdsp->cmd = cmd; 1173 *cmdsp->cmd_dma = cpu_to_le64(cmd); 1174 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; 1175 if (req->sba->hw_resp_size) { 1176 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; 1177 cmdsp->resp = resp_dma; 1178 cmdsp->resp_len = req->sba->hw_resp_size; 1179 } 1180 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; 1181 cmdsp->data = *dst_p + msg_offset; 1182 cmdsp->data_len = msg_len; 1183 cmdsp++; 1184 1185 skip_p: 1186 if (!dst_q) 1187 goto skip_q; 1188 1189 /* Type-A command to zero all buffers */ 1190 cmd = sba_cmd_enc(0x0, SBA_TYPE_A, 1191 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 1192 cmd = sba_cmd_enc(cmd, msg_len, 1193 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 1194 cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS, 1195 SBA_CMD_SHIFT, SBA_CMD_MASK); 1196 cmdsp->cmd = cmd; 1197 *cmdsp->cmd_dma = cpu_to_le64(cmd); 1198 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; 1199 cmdsp++; 1200 1201 if (dpos == 255) 1202 goto skip_q_computation; 1203 pos = (dpos < req->sba->max_pq_coefs) ? 1204 dpos : (req->sba->max_pq_coefs - 1); 1205 1206 /* 1207 * Type-B command to generate initial Q from data 1208 * and store output into buf0 1209 */ 1210 cmd = sba_cmd_enc(0x0, SBA_TYPE_B, 1211 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 1212 cmd = sba_cmd_enc(cmd, msg_len, 1213 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 1214 c_mdata = sba_cmd_pq_c_mdata(pos, 0, 0); 1215 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 1216 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 1217 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata), 1218 SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK); 1219 cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS, 1220 SBA_CMD_SHIFT, SBA_CMD_MASK); 1221 cmdsp->cmd = cmd; 1222 *cmdsp->cmd_dma = cpu_to_le64(cmd); 1223 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; 1224 cmdsp->data = src + msg_offset; 1225 cmdsp->data_len = msg_len; 1226 cmdsp++; 1227 1228 dpos -= pos; 1229 1230 /* Multiple Type-A command to generate final Q */ 1231 while (dpos) { 1232 pos = (dpos < req->sba->max_pq_coefs) ? 1233 dpos : (req->sba->max_pq_coefs - 1); 1234 1235 /* 1236 * Type-A command to generate Q with buf0 and 1237 * buf1 store result in buf0 1238 */ 1239 cmd = sba_cmd_enc(0x0, SBA_TYPE_A, 1240 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 1241 cmd = sba_cmd_enc(cmd, msg_len, 1242 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 1243 c_mdata = sba_cmd_pq_c_mdata(pos, 0, 1); 1244 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 1245 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 1246 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata), 1247 SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK); 1248 cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS, 1249 SBA_CMD_SHIFT, SBA_CMD_MASK); 1250 cmdsp->cmd = cmd; 1251 *cmdsp->cmd_dma = cpu_to_le64(cmd); 1252 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; 1253 cmdsp++; 1254 1255 dpos -= pos; 1256 } 1257 1258 skip_q_computation: 1259 if (pq_continue) { 1260 /* 1261 * Type-B command to XOR previous output with 1262 * buf0 and write it into buf0 1263 */ 1264 cmd = sba_cmd_enc(0x0, SBA_TYPE_B, 1265 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 1266 cmd = sba_cmd_enc(cmd, msg_len, 1267 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 1268 c_mdata = sba_cmd_xor_c_mdata(0, 0); 1269 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 1270 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 1271 cmd = sba_cmd_enc(cmd, SBA_CMD_XOR, 1272 SBA_CMD_SHIFT, SBA_CMD_MASK); 1273 cmdsp->cmd = cmd; 1274 *cmdsp->cmd_dma = cpu_to_le64(cmd); 1275 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; 1276 cmdsp->data = *dst_q + msg_offset; 1277 cmdsp->data_len = msg_len; 1278 cmdsp++; 1279 } 1280 1281 /* Type-A command to write buf0 */ 1282 cmd = sba_cmd_enc(0x0, SBA_TYPE_A, 1283 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 1284 cmd = sba_cmd_enc(cmd, msg_len, 1285 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 1286 cmd = sba_cmd_enc(cmd, 0x1, 1287 SBA_RESP_SHIFT, SBA_RESP_MASK); 1288 c_mdata = sba_cmd_write_c_mdata(0); 1289 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 1290 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 1291 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, 1292 SBA_CMD_SHIFT, SBA_CMD_MASK); 1293 cmdsp->cmd = cmd; 1294 *cmdsp->cmd_dma = cpu_to_le64(cmd); 1295 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; 1296 if (req->sba->hw_resp_size) { 1297 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; 1298 cmdsp->resp = resp_dma; 1299 cmdsp->resp_len = req->sba->hw_resp_size; 1300 } 1301 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; 1302 cmdsp->data = *dst_q + msg_offset; 1303 cmdsp->data_len = msg_len; 1304 cmdsp++; 1305 1306 skip_q: 1307 /* Fillup brcm_message */ 1308 msg->type = BRCM_MESSAGE_SBA; 1309 msg->sba.cmds = cmds; 1310 msg->sba.cmds_count = cmdsp - cmds; 1311 msg->ctx = req; 1312 msg->error = 0; 1313 } 1314 1315 static struct sba_request * 1316 sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off, 1317 dma_addr_t *dst_p, dma_addr_t *dst_q, 1318 dma_addr_t src, u8 scf, size_t len, 1319 unsigned long flags) 1320 { 1321 struct sba_request *req = NULL; 1322 1323 /* Alloc new request */ 1324 req = sba_alloc_request(sba); 1325 if (!req) 1326 return NULL; 1327 if (flags & DMA_PREP_FENCE) 1328 req->flags |= SBA_REQUEST_FENCE; 1329 1330 /* Fillup request messages */ 1331 sba_fillup_pq_single_msg(req, dmaf_continue(flags), 1332 req->cmds, &req->msg, off, len, 1333 dst_p, dst_q, src, scf); 1334 1335 /* Init async_tx descriptor */ 1336 req->tx.flags = flags; 1337 req->tx.cookie = -EBUSY; 1338 1339 return req; 1340 } 1341 1342 static struct dma_async_tx_descriptor * 1343 sba_prep_dma_pq(struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src, 1344 u32 src_cnt, const u8 *scf, size_t len, unsigned long flags) 1345 { 1346 u32 i, dst_q_index; 1347 size_t req_len; 1348 bool slow = false; 1349 dma_addr_t off = 0; 1350 dma_addr_t *dst_p = NULL, *dst_q = NULL; 1351 struct sba_device *sba = to_sba_device(dchan); 1352 struct sba_request *first = NULL, *req; 1353 1354 /* Sanity checks */ 1355 if (unlikely(src_cnt > sba->max_pq_srcs)) 1356 return NULL; 1357 for (i = 0; i < src_cnt; i++) 1358 if (sba->max_pq_coefs <= raid6_gflog[scf[i]]) 1359 slow = true; 1360 1361 /* Figure-out P and Q destination addresses */ 1362 if (!(flags & DMA_PREP_PQ_DISABLE_P)) 1363 dst_p = &dst[0]; 1364 if (!(flags & DMA_PREP_PQ_DISABLE_Q)) 1365 dst_q = &dst[1]; 1366 1367 /* Create chained requests where each request is upto hw_buf_size */ 1368 while (len) { 1369 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size; 1370 1371 if (slow) { 1372 dst_q_index = src_cnt; 1373 1374 if (dst_q) { 1375 for (i = 0; i < src_cnt; i++) { 1376 if (*dst_q == src[i]) { 1377 dst_q_index = i; 1378 break; 1379 } 1380 } 1381 } 1382 1383 if (dst_q_index < src_cnt) { 1384 i = dst_q_index; 1385 req = sba_prep_dma_pq_single_req(sba, 1386 off, dst_p, dst_q, src[i], scf[i], 1387 req_len, flags | DMA_PREP_FENCE); 1388 if (!req) 1389 goto fail; 1390 1391 if (first) 1392 sba_chain_request(first, req); 1393 else 1394 first = req; 1395 1396 flags |= DMA_PREP_CONTINUE; 1397 } 1398 1399 for (i = 0; i < src_cnt; i++) { 1400 if (dst_q_index == i) 1401 continue; 1402 1403 req = sba_prep_dma_pq_single_req(sba, 1404 off, dst_p, dst_q, src[i], scf[i], 1405 req_len, flags | DMA_PREP_FENCE); 1406 if (!req) 1407 goto fail; 1408 1409 if (first) 1410 sba_chain_request(first, req); 1411 else 1412 first = req; 1413 1414 flags |= DMA_PREP_CONTINUE; 1415 } 1416 } else { 1417 req = sba_prep_dma_pq_req(sba, off, 1418 dst_p, dst_q, src, src_cnt, 1419 scf, req_len, flags); 1420 if (!req) 1421 goto fail; 1422 1423 if (first) 1424 sba_chain_request(first, req); 1425 else 1426 first = req; 1427 } 1428 1429 off += req_len; 1430 len -= req_len; 1431 } 1432 1433 return (first) ? &first->tx : NULL; 1434 1435 fail: 1436 if (first) 1437 sba_free_chained_requests(first); 1438 return NULL; 1439 } 1440 1441 /* ====== Mailbox callbacks ===== */ 1442 1443 static void sba_receive_message(struct mbox_client *cl, void *msg) 1444 { 1445 struct brcm_message *m = msg; 1446 struct sba_request *req = m->ctx; 1447 struct sba_device *sba = req->sba; 1448 1449 /* Error count if message has error */ 1450 if (m->error < 0) 1451 dev_err(sba->dev, "%s got message with error %d", 1452 dma_chan_name(&sba->dma_chan), m->error); 1453 1454 /* Process received request */ 1455 sba_process_received_request(sba, req); 1456 } 1457 1458 /* ====== Debugfs callbacks ====== */ 1459 1460 static int sba_debugfs_stats_show(struct seq_file *file, void *offset) 1461 { 1462 struct sba_device *sba = dev_get_drvdata(file->private); 1463 1464 /* Write stats in file */ 1465 sba_write_stats_in_seqfile(sba, file); 1466 1467 return 0; 1468 } 1469 1470 /* ====== Platform driver routines ===== */ 1471 1472 static int sba_prealloc_channel_resources(struct sba_device *sba) 1473 { 1474 int i, j, ret = 0; 1475 struct sba_request *req = NULL; 1476 1477 sba->resp_base = dma_alloc_coherent(sba->mbox_dev, 1478 sba->max_resp_pool_size, 1479 &sba->resp_dma_base, GFP_KERNEL); 1480 if (!sba->resp_base) 1481 return -ENOMEM; 1482 1483 sba->cmds_base = dma_alloc_coherent(sba->mbox_dev, 1484 sba->max_cmds_pool_size, 1485 &sba->cmds_dma_base, GFP_KERNEL); 1486 if (!sba->cmds_base) { 1487 ret = -ENOMEM; 1488 goto fail_free_resp_pool; 1489 } 1490 1491 spin_lock_init(&sba->reqs_lock); 1492 sba->reqs_fence = false; 1493 INIT_LIST_HEAD(&sba->reqs_alloc_list); 1494 INIT_LIST_HEAD(&sba->reqs_pending_list); 1495 INIT_LIST_HEAD(&sba->reqs_active_list); 1496 INIT_LIST_HEAD(&sba->reqs_aborted_list); 1497 INIT_LIST_HEAD(&sba->reqs_free_list); 1498 1499 for (i = 0; i < sba->max_req; i++) { 1500 req = devm_kzalloc(sba->dev, 1501 struct_size(req, cmds, sba->max_cmd_per_req), 1502 GFP_KERNEL); 1503 if (!req) { 1504 ret = -ENOMEM; 1505 goto fail_free_cmds_pool; 1506 } 1507 INIT_LIST_HEAD(&req->node); 1508 req->sba = sba; 1509 req->flags = SBA_REQUEST_STATE_FREE; 1510 INIT_LIST_HEAD(&req->next); 1511 atomic_set(&req->next_pending_count, 0); 1512 for (j = 0; j < sba->max_cmd_per_req; j++) { 1513 req->cmds[j].cmd = 0; 1514 req->cmds[j].cmd_dma = sba->cmds_base + 1515 (i * sba->max_cmd_per_req + j) * sizeof(u64); 1516 req->cmds[j].cmd_dma_addr = sba->cmds_dma_base + 1517 (i * sba->max_cmd_per_req + j) * sizeof(u64); 1518 req->cmds[j].flags = 0; 1519 } 1520 memset(&req->msg, 0, sizeof(req->msg)); 1521 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); 1522 async_tx_ack(&req->tx); 1523 req->tx.tx_submit = sba_tx_submit; 1524 req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size; 1525 list_add_tail(&req->node, &sba->reqs_free_list); 1526 } 1527 1528 return 0; 1529 1530 fail_free_cmds_pool: 1531 dma_free_coherent(sba->mbox_dev, 1532 sba->max_cmds_pool_size, 1533 sba->cmds_base, sba->cmds_dma_base); 1534 fail_free_resp_pool: 1535 dma_free_coherent(sba->mbox_dev, 1536 sba->max_resp_pool_size, 1537 sba->resp_base, sba->resp_dma_base); 1538 return ret; 1539 } 1540 1541 static void sba_freeup_channel_resources(struct sba_device *sba) 1542 { 1543 dmaengine_terminate_all(&sba->dma_chan); 1544 dma_free_coherent(sba->mbox_dev, sba->max_cmds_pool_size, 1545 sba->cmds_base, sba->cmds_dma_base); 1546 dma_free_coherent(sba->mbox_dev, sba->max_resp_pool_size, 1547 sba->resp_base, sba->resp_dma_base); 1548 sba->resp_base = NULL; 1549 sba->resp_dma_base = 0; 1550 } 1551 1552 static int sba_async_register(struct sba_device *sba) 1553 { 1554 int ret; 1555 struct dma_device *dma_dev = &sba->dma_dev; 1556 1557 /* Initialize DMA channel cookie */ 1558 sba->dma_chan.device = dma_dev; 1559 dma_cookie_init(&sba->dma_chan); 1560 1561 /* Initialize DMA device capability mask */ 1562 dma_cap_zero(dma_dev->cap_mask); 1563 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask); 1564 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); 1565 dma_cap_set(DMA_XOR, dma_dev->cap_mask); 1566 dma_cap_set(DMA_PQ, dma_dev->cap_mask); 1567 1568 /* 1569 * Set mailbox channel device as the base device of 1570 * our dma_device because the actual memory accesses 1571 * will be done by mailbox controller 1572 */ 1573 dma_dev->dev = sba->mbox_dev; 1574 1575 /* Set base prep routines */ 1576 dma_dev->device_free_chan_resources = sba_free_chan_resources; 1577 dma_dev->device_terminate_all = sba_device_terminate_all; 1578 dma_dev->device_issue_pending = sba_issue_pending; 1579 dma_dev->device_tx_status = sba_tx_status; 1580 1581 /* Set interrupt routine */ 1582 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) 1583 dma_dev->device_prep_dma_interrupt = sba_prep_dma_interrupt; 1584 1585 /* Set memcpy routine */ 1586 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) 1587 dma_dev->device_prep_dma_memcpy = sba_prep_dma_memcpy; 1588 1589 /* Set xor routine and capability */ 1590 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1591 dma_dev->device_prep_dma_xor = sba_prep_dma_xor; 1592 dma_dev->max_xor = sba->max_xor_srcs; 1593 } 1594 1595 /* Set pq routine and capability */ 1596 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { 1597 dma_dev->device_prep_dma_pq = sba_prep_dma_pq; 1598 dma_set_maxpq(dma_dev, sba->max_pq_srcs, 0); 1599 } 1600 1601 /* Initialize DMA device channel list */ 1602 INIT_LIST_HEAD(&dma_dev->channels); 1603 list_add_tail(&sba->dma_chan.device_node, &dma_dev->channels); 1604 1605 /* Register with Linux async DMA framework*/ 1606 ret = dma_async_device_register(dma_dev); 1607 if (ret) { 1608 dev_err(sba->dev, "async device register error %d", ret); 1609 return ret; 1610 } 1611 1612 dev_info(sba->dev, "%s capabilities: %s%s%s%s\n", 1613 dma_chan_name(&sba->dma_chan), 1614 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "interrupt " : "", 1615 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "memcpy " : "", 1616 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", 1617 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : ""); 1618 1619 return 0; 1620 } 1621 1622 static int sba_probe(struct platform_device *pdev) 1623 { 1624 int ret = 0; 1625 struct sba_device *sba; 1626 struct platform_device *mbox_pdev; 1627 struct of_phandle_args args; 1628 1629 /* Allocate main SBA struct */ 1630 sba = devm_kzalloc(&pdev->dev, sizeof(*sba), GFP_KERNEL); 1631 if (!sba) 1632 return -ENOMEM; 1633 1634 sba->dev = &pdev->dev; 1635 platform_set_drvdata(pdev, sba); 1636 1637 /* Number of mailbox channels should be atleast 1 */ 1638 ret = of_count_phandle_with_args(pdev->dev.of_node, 1639 "mboxes", "#mbox-cells"); 1640 if (ret <= 0) 1641 return -ENODEV; 1642 1643 /* Determine SBA version from DT compatible string */ 1644 if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba")) 1645 sba->ver = SBA_VER_1; 1646 else if (of_device_is_compatible(sba->dev->of_node, 1647 "brcm,iproc-sba-v2")) 1648 sba->ver = SBA_VER_2; 1649 else 1650 return -ENODEV; 1651 1652 /* Derived Configuration parameters */ 1653 switch (sba->ver) { 1654 case SBA_VER_1: 1655 sba->hw_buf_size = 4096; 1656 sba->hw_resp_size = 8; 1657 sba->max_pq_coefs = 6; 1658 sba->max_pq_srcs = 6; 1659 break; 1660 case SBA_VER_2: 1661 sba->hw_buf_size = 4096; 1662 sba->hw_resp_size = 8; 1663 sba->max_pq_coefs = 30; 1664 /* 1665 * We can support max_pq_srcs == max_pq_coefs because 1666 * we are limited by number of SBA commands that we can 1667 * fit in one message for underlying ring manager HW. 1668 */ 1669 sba->max_pq_srcs = 12; 1670 break; 1671 default: 1672 return -EINVAL; 1673 } 1674 sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL; 1675 sba->max_cmd_per_req = sba->max_pq_srcs + 3; 1676 sba->max_xor_srcs = sba->max_cmd_per_req - 1; 1677 sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size; 1678 sba->max_cmds_pool_size = sba->max_req * 1679 sba->max_cmd_per_req * sizeof(u64); 1680 1681 /* Setup mailbox client */ 1682 sba->client.dev = &pdev->dev; 1683 sba->client.rx_callback = sba_receive_message; 1684 sba->client.tx_block = false; 1685 sba->client.knows_txdone = true; 1686 sba->client.tx_tout = 0; 1687 1688 /* Request mailbox channel */ 1689 sba->mchan = mbox_request_channel(&sba->client, 0); 1690 if (IS_ERR(sba->mchan)) { 1691 ret = PTR_ERR(sba->mchan); 1692 goto fail_free_mchan; 1693 } 1694 1695 /* Find-out underlying mailbox device */ 1696 ret = of_parse_phandle_with_args(pdev->dev.of_node, 1697 "mboxes", "#mbox-cells", 0, &args); 1698 if (ret) 1699 goto fail_free_mchan; 1700 mbox_pdev = of_find_device_by_node(args.np); 1701 of_node_put(args.np); 1702 if (!mbox_pdev) { 1703 ret = -ENODEV; 1704 goto fail_free_mchan; 1705 } 1706 sba->mbox_dev = &mbox_pdev->dev; 1707 1708 /* Prealloc channel resource */ 1709 ret = sba_prealloc_channel_resources(sba); 1710 if (ret) 1711 goto fail_free_mchan; 1712 1713 /* Check availability of debugfs */ 1714 if (!debugfs_initialized()) 1715 goto skip_debugfs; 1716 1717 /* Create debugfs root entry */ 1718 sba->root = debugfs_create_dir(dev_name(sba->dev), NULL); 1719 if (IS_ERR_OR_NULL(sba->root)) { 1720 dev_err(sba->dev, "failed to create debugfs root entry\n"); 1721 sba->root = NULL; 1722 goto skip_debugfs; 1723 } 1724 1725 /* Create debugfs stats entry */ 1726 sba->stats = debugfs_create_devm_seqfile(sba->dev, "stats", sba->root, 1727 sba_debugfs_stats_show); 1728 if (IS_ERR_OR_NULL(sba->stats)) 1729 dev_err(sba->dev, "failed to create debugfs stats file\n"); 1730 skip_debugfs: 1731 1732 /* Register DMA device with Linux async framework */ 1733 ret = sba_async_register(sba); 1734 if (ret) 1735 goto fail_free_resources; 1736 1737 /* Print device info */ 1738 dev_info(sba->dev, "%s using SBAv%d mailbox channel from %s", 1739 dma_chan_name(&sba->dma_chan), sba->ver+1, 1740 dev_name(sba->mbox_dev)); 1741 1742 return 0; 1743 1744 fail_free_resources: 1745 debugfs_remove_recursive(sba->root); 1746 sba_freeup_channel_resources(sba); 1747 fail_free_mchan: 1748 mbox_free_channel(sba->mchan); 1749 return ret; 1750 } 1751 1752 static int sba_remove(struct platform_device *pdev) 1753 { 1754 struct sba_device *sba = platform_get_drvdata(pdev); 1755 1756 dma_async_device_unregister(&sba->dma_dev); 1757 1758 debugfs_remove_recursive(sba->root); 1759 1760 sba_freeup_channel_resources(sba); 1761 1762 mbox_free_channel(sba->mchan); 1763 1764 return 0; 1765 } 1766 1767 static const struct of_device_id sba_of_match[] = { 1768 { .compatible = "brcm,iproc-sba", }, 1769 { .compatible = "brcm,iproc-sba-v2", }, 1770 {}, 1771 }; 1772 MODULE_DEVICE_TABLE(of, sba_of_match); 1773 1774 static struct platform_driver sba_driver = { 1775 .probe = sba_probe, 1776 .remove = sba_remove, 1777 .driver = { 1778 .name = "bcm-sba-raid", 1779 .of_match_table = sba_of_match, 1780 }, 1781 }; 1782 module_platform_driver(sba_driver); 1783 1784 MODULE_DESCRIPTION("Broadcom SBA RAID driver"); 1785 MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>"); 1786 MODULE_LICENSE("GPL v2"); 1787