1 /* 2 * Copyright (C) 2017 Broadcom 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8 9 /* 10 * Broadcom SBA RAID Driver 11 * 12 * The Broadcom stream buffer accelerator (SBA) provides offloading 13 * capabilities for RAID operations. The SBA offload engine is accessible 14 * via Broadcom SoC specific ring manager. Two or more offload engines 15 * can share same Broadcom SoC specific ring manager due to this Broadcom 16 * SoC specific ring manager driver is implemented as a mailbox controller 17 * driver and offload engine drivers are implemented as mallbox clients. 18 * 19 * Typically, Broadcom SoC specific ring manager will implement larger 20 * number of hardware rings over one or more SBA hardware devices. By 21 * design, the internal buffer size of SBA hardware device is limited 22 * but all offload operations supported by SBA can be broken down into 23 * multiple small size requests and executed parallely on multiple SBA 24 * hardware devices for achieving high through-put. 25 * 26 * The Broadcom SBA RAID driver does not require any register programming 27 * except submitting request to SBA hardware device via mailbox channels. 28 * This driver implements a DMA device with one DMA channel using a set 29 * of mailbox channels provided by Broadcom SoC specific ring manager 30 * driver. To exploit parallelism (as described above), all DMA request 31 * coming to SBA RAID DMA channel are broken down to smaller requests 32 * and submitted to multiple mailbox channels in round-robin fashion. 33 * For having more SBA DMA channels, we can create more SBA device nodes 34 * in Broadcom SoC specific DTS based on number of hardware rings supported 35 * by Broadcom SoC ring manager. 36 */ 37 38 #include <linux/bitops.h> 39 #include <linux/dma-mapping.h> 40 #include <linux/dmaengine.h> 41 #include <linux/list.h> 42 #include <linux/mailbox_client.h> 43 #include <linux/mailbox/brcm-message.h> 44 #include <linux/module.h> 45 #include <linux/of_device.h> 46 #include <linux/slab.h> 47 #include <linux/raid/pq.h> 48 49 #include "dmaengine.h" 50 51 /* SBA command related defines */ 52 #define SBA_TYPE_SHIFT 48 53 #define SBA_TYPE_MASK GENMASK(1, 0) 54 #define SBA_TYPE_A 0x0 55 #define SBA_TYPE_B 0x2 56 #define SBA_TYPE_C 0x3 57 #define SBA_USER_DEF_SHIFT 32 58 #define SBA_USER_DEF_MASK GENMASK(15, 0) 59 #define SBA_R_MDATA_SHIFT 24 60 #define SBA_R_MDATA_MASK GENMASK(7, 0) 61 #define SBA_C_MDATA_MS_SHIFT 18 62 #define SBA_C_MDATA_MS_MASK GENMASK(1, 0) 63 #define SBA_INT_SHIFT 17 64 #define SBA_INT_MASK BIT(0) 65 #define SBA_RESP_SHIFT 16 66 #define SBA_RESP_MASK BIT(0) 67 #define SBA_C_MDATA_SHIFT 8 68 #define SBA_C_MDATA_MASK GENMASK(7, 0) 69 #define SBA_C_MDATA_BNUMx_SHIFT(__bnum) (2 * (__bnum)) 70 #define SBA_C_MDATA_BNUMx_MASK GENMASK(1, 0) 71 #define SBA_C_MDATA_DNUM_SHIFT 5 72 #define SBA_C_MDATA_DNUM_MASK GENMASK(4, 0) 73 #define SBA_C_MDATA_LS(__v) ((__v) & 0xff) 74 #define SBA_C_MDATA_MS(__v) (((__v) >> 8) & 0x3) 75 #define SBA_CMD_SHIFT 0 76 #define SBA_CMD_MASK GENMASK(3, 0) 77 #define SBA_CMD_ZERO_BUFFER 0x4 78 #define SBA_CMD_ZERO_ALL_BUFFERS 0x8 79 #define SBA_CMD_LOAD_BUFFER 0x9 80 #define SBA_CMD_XOR 0xa 81 #define SBA_CMD_GALOIS_XOR 0xb 82 #define SBA_CMD_WRITE_BUFFER 0xc 83 #define SBA_CMD_GALOIS 0xe 84 85 /* Driver helper macros */ 86 #define to_sba_request(tx) \ 87 container_of(tx, struct sba_request, tx) 88 #define to_sba_device(dchan) \ 89 container_of(dchan, struct sba_device, dma_chan) 90 91 enum sba_request_state { 92 SBA_REQUEST_STATE_FREE = 1, 93 SBA_REQUEST_STATE_ALLOCED = 2, 94 SBA_REQUEST_STATE_PENDING = 3, 95 SBA_REQUEST_STATE_ACTIVE = 4, 96 SBA_REQUEST_STATE_RECEIVED = 5, 97 SBA_REQUEST_STATE_COMPLETED = 6, 98 SBA_REQUEST_STATE_ABORTED = 7, 99 }; 100 101 struct sba_request { 102 /* Global state */ 103 struct list_head node; 104 struct sba_device *sba; 105 enum sba_request_state state; 106 bool fence; 107 /* Chained requests management */ 108 struct sba_request *first; 109 struct list_head next; 110 unsigned int next_count; 111 atomic_t next_pending_count; 112 /* BRCM message data */ 113 void *resp; 114 dma_addr_t resp_dma; 115 struct brcm_sba_command *cmds; 116 struct brcm_message msg; 117 struct dma_async_tx_descriptor tx; 118 }; 119 120 enum sba_version { 121 SBA_VER_1 = 0, 122 SBA_VER_2 123 }; 124 125 struct sba_device { 126 /* Underlying device */ 127 struct device *dev; 128 /* DT configuration parameters */ 129 enum sba_version ver; 130 /* Derived configuration parameters */ 131 u32 max_req; 132 u32 hw_buf_size; 133 u32 hw_resp_size; 134 u32 max_pq_coefs; 135 u32 max_pq_srcs; 136 u32 max_cmd_per_req; 137 u32 max_xor_srcs; 138 u32 max_resp_pool_size; 139 u32 max_cmds_pool_size; 140 /* Maibox client and Mailbox channels */ 141 struct mbox_client client; 142 int mchans_count; 143 atomic_t mchans_current; 144 struct mbox_chan **mchans; 145 struct device *mbox_dev; 146 /* DMA device and DMA channel */ 147 struct dma_device dma_dev; 148 struct dma_chan dma_chan; 149 /* DMA channel resources */ 150 void *resp_base; 151 dma_addr_t resp_dma_base; 152 void *cmds_base; 153 dma_addr_t cmds_dma_base; 154 spinlock_t reqs_lock; 155 struct sba_request *reqs; 156 bool reqs_fence; 157 struct list_head reqs_alloc_list; 158 struct list_head reqs_pending_list; 159 struct list_head reqs_active_list; 160 struct list_head reqs_received_list; 161 struct list_head reqs_completed_list; 162 struct list_head reqs_aborted_list; 163 struct list_head reqs_free_list; 164 int reqs_free_count; 165 }; 166 167 /* ====== SBA command helper routines ===== */ 168 169 static inline u64 __pure sba_cmd_enc(u64 cmd, u32 val, u32 shift, u32 mask) 170 { 171 cmd &= ~((u64)mask << shift); 172 cmd |= ((u64)(val & mask) << shift); 173 return cmd; 174 } 175 176 static inline u32 __pure sba_cmd_load_c_mdata(u32 b0) 177 { 178 return b0 & SBA_C_MDATA_BNUMx_MASK; 179 } 180 181 static inline u32 __pure sba_cmd_write_c_mdata(u32 b0) 182 { 183 return b0 & SBA_C_MDATA_BNUMx_MASK; 184 } 185 186 static inline u32 __pure sba_cmd_xor_c_mdata(u32 b1, u32 b0) 187 { 188 return (b0 & SBA_C_MDATA_BNUMx_MASK) | 189 ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1)); 190 } 191 192 static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0) 193 { 194 return (b0 & SBA_C_MDATA_BNUMx_MASK) | 195 ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1)) | 196 ((d & SBA_C_MDATA_DNUM_MASK) << SBA_C_MDATA_DNUM_SHIFT); 197 } 198 199 /* ====== Channel resource management routines ===== */ 200 201 static struct sba_request *sba_alloc_request(struct sba_device *sba) 202 { 203 unsigned long flags; 204 struct sba_request *req = NULL; 205 206 spin_lock_irqsave(&sba->reqs_lock, flags); 207 208 req = list_first_entry_or_null(&sba->reqs_free_list, 209 struct sba_request, node); 210 if (req) { 211 list_move_tail(&req->node, &sba->reqs_alloc_list); 212 req->state = SBA_REQUEST_STATE_ALLOCED; 213 req->fence = false; 214 req->first = req; 215 INIT_LIST_HEAD(&req->next); 216 req->next_count = 1; 217 atomic_set(&req->next_pending_count, 1); 218 219 sba->reqs_free_count--; 220 221 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); 222 } 223 224 spin_unlock_irqrestore(&sba->reqs_lock, flags); 225 226 return req; 227 } 228 229 /* Note: Must be called with sba->reqs_lock held */ 230 static void _sba_pending_request(struct sba_device *sba, 231 struct sba_request *req) 232 { 233 lockdep_assert_held(&sba->reqs_lock); 234 req->state = SBA_REQUEST_STATE_PENDING; 235 list_move_tail(&req->node, &sba->reqs_pending_list); 236 if (list_empty(&sba->reqs_active_list)) 237 sba->reqs_fence = false; 238 } 239 240 /* Note: Must be called with sba->reqs_lock held */ 241 static bool _sba_active_request(struct sba_device *sba, 242 struct sba_request *req) 243 { 244 lockdep_assert_held(&sba->reqs_lock); 245 if (list_empty(&sba->reqs_active_list)) 246 sba->reqs_fence = false; 247 if (sba->reqs_fence) 248 return false; 249 req->state = SBA_REQUEST_STATE_ACTIVE; 250 list_move_tail(&req->node, &sba->reqs_active_list); 251 if (req->fence) 252 sba->reqs_fence = true; 253 return true; 254 } 255 256 /* Note: Must be called with sba->reqs_lock held */ 257 static void _sba_abort_request(struct sba_device *sba, 258 struct sba_request *req) 259 { 260 lockdep_assert_held(&sba->reqs_lock); 261 req->state = SBA_REQUEST_STATE_ABORTED; 262 list_move_tail(&req->node, &sba->reqs_aborted_list); 263 if (list_empty(&sba->reqs_active_list)) 264 sba->reqs_fence = false; 265 } 266 267 /* Note: Must be called with sba->reqs_lock held */ 268 static void _sba_free_request(struct sba_device *sba, 269 struct sba_request *req) 270 { 271 lockdep_assert_held(&sba->reqs_lock); 272 req->state = SBA_REQUEST_STATE_FREE; 273 list_move_tail(&req->node, &sba->reqs_free_list); 274 if (list_empty(&sba->reqs_active_list)) 275 sba->reqs_fence = false; 276 sba->reqs_free_count++; 277 } 278 279 static void sba_received_request(struct sba_request *req) 280 { 281 unsigned long flags; 282 struct sba_device *sba = req->sba; 283 284 spin_lock_irqsave(&sba->reqs_lock, flags); 285 req->state = SBA_REQUEST_STATE_RECEIVED; 286 list_move_tail(&req->node, &sba->reqs_received_list); 287 spin_unlock_irqrestore(&sba->reqs_lock, flags); 288 } 289 290 static void sba_complete_chained_requests(struct sba_request *req) 291 { 292 unsigned long flags; 293 struct sba_request *nreq; 294 struct sba_device *sba = req->sba; 295 296 spin_lock_irqsave(&sba->reqs_lock, flags); 297 298 req->state = SBA_REQUEST_STATE_COMPLETED; 299 list_move_tail(&req->node, &sba->reqs_completed_list); 300 list_for_each_entry(nreq, &req->next, next) { 301 nreq->state = SBA_REQUEST_STATE_COMPLETED; 302 list_move_tail(&nreq->node, &sba->reqs_completed_list); 303 } 304 if (list_empty(&sba->reqs_active_list)) 305 sba->reqs_fence = false; 306 307 spin_unlock_irqrestore(&sba->reqs_lock, flags); 308 } 309 310 static void sba_free_chained_requests(struct sba_request *req) 311 { 312 unsigned long flags; 313 struct sba_request *nreq; 314 struct sba_device *sba = req->sba; 315 316 spin_lock_irqsave(&sba->reqs_lock, flags); 317 318 _sba_free_request(sba, req); 319 list_for_each_entry(nreq, &req->next, next) 320 _sba_free_request(sba, nreq); 321 322 spin_unlock_irqrestore(&sba->reqs_lock, flags); 323 } 324 325 static void sba_chain_request(struct sba_request *first, 326 struct sba_request *req) 327 { 328 unsigned long flags; 329 struct sba_device *sba = req->sba; 330 331 spin_lock_irqsave(&sba->reqs_lock, flags); 332 333 list_add_tail(&req->next, &first->next); 334 req->first = first; 335 first->next_count++; 336 atomic_set(&first->next_pending_count, first->next_count); 337 338 spin_unlock_irqrestore(&sba->reqs_lock, flags); 339 } 340 341 static void sba_cleanup_nonpending_requests(struct sba_device *sba) 342 { 343 unsigned long flags; 344 struct sba_request *req, *req1; 345 346 spin_lock_irqsave(&sba->reqs_lock, flags); 347 348 /* Freeup all alloced request */ 349 list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node) 350 _sba_free_request(sba, req); 351 352 /* Freeup all received request */ 353 list_for_each_entry_safe(req, req1, &sba->reqs_received_list, node) 354 _sba_free_request(sba, req); 355 356 /* Freeup all completed request */ 357 list_for_each_entry_safe(req, req1, &sba->reqs_completed_list, node) 358 _sba_free_request(sba, req); 359 360 /* Set all active requests as aborted */ 361 list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node) 362 _sba_abort_request(sba, req); 363 364 /* 365 * Note: We expect that aborted request will be eventually 366 * freed by sba_receive_message() 367 */ 368 369 spin_unlock_irqrestore(&sba->reqs_lock, flags); 370 } 371 372 static void sba_cleanup_pending_requests(struct sba_device *sba) 373 { 374 unsigned long flags; 375 struct sba_request *req, *req1; 376 377 spin_lock_irqsave(&sba->reqs_lock, flags); 378 379 /* Freeup all pending request */ 380 list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node) 381 _sba_free_request(sba, req); 382 383 spin_unlock_irqrestore(&sba->reqs_lock, flags); 384 } 385 386 /* ====== DMAENGINE callbacks ===== */ 387 388 static void sba_free_chan_resources(struct dma_chan *dchan) 389 { 390 /* 391 * Channel resources are pre-alloced so we just free-up 392 * whatever we can so that we can re-use pre-alloced 393 * channel resources next time. 394 */ 395 sba_cleanup_nonpending_requests(to_sba_device(dchan)); 396 } 397 398 static int sba_device_terminate_all(struct dma_chan *dchan) 399 { 400 /* Cleanup all pending requests */ 401 sba_cleanup_pending_requests(to_sba_device(dchan)); 402 403 return 0; 404 } 405 406 static int sba_send_mbox_request(struct sba_device *sba, 407 struct sba_request *req) 408 { 409 int mchans_idx, ret = 0; 410 411 /* Select mailbox channel in round-robin fashion */ 412 mchans_idx = atomic_inc_return(&sba->mchans_current); 413 mchans_idx = mchans_idx % sba->mchans_count; 414 415 /* Send message for the request */ 416 req->msg.error = 0; 417 ret = mbox_send_message(sba->mchans[mchans_idx], &req->msg); 418 if (ret < 0) { 419 dev_err(sba->dev, "send message failed with error %d", ret); 420 return ret; 421 } 422 ret = req->msg.error; 423 if (ret < 0) { 424 dev_err(sba->dev, "message error %d", ret); 425 return ret; 426 } 427 428 return 0; 429 } 430 431 static void sba_issue_pending(struct dma_chan *dchan) 432 { 433 int ret; 434 unsigned long flags; 435 struct sba_request *req, *req1; 436 struct sba_device *sba = to_sba_device(dchan); 437 438 spin_lock_irqsave(&sba->reqs_lock, flags); 439 440 /* Process all pending request */ 441 list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node) { 442 /* Try to make request active */ 443 if (!_sba_active_request(sba, req)) 444 break; 445 446 /* Send request to mailbox channel */ 447 spin_unlock_irqrestore(&sba->reqs_lock, flags); 448 ret = sba_send_mbox_request(sba, req); 449 spin_lock_irqsave(&sba->reqs_lock, flags); 450 451 /* If something went wrong then keep request pending */ 452 if (ret < 0) { 453 _sba_pending_request(sba, req); 454 break; 455 } 456 } 457 458 spin_unlock_irqrestore(&sba->reqs_lock, flags); 459 } 460 461 static dma_cookie_t sba_tx_submit(struct dma_async_tx_descriptor *tx) 462 { 463 unsigned long flags; 464 dma_cookie_t cookie; 465 struct sba_device *sba; 466 struct sba_request *req, *nreq; 467 468 if (unlikely(!tx)) 469 return -EINVAL; 470 471 sba = to_sba_device(tx->chan); 472 req = to_sba_request(tx); 473 474 /* Assign cookie and mark all chained requests pending */ 475 spin_lock_irqsave(&sba->reqs_lock, flags); 476 cookie = dma_cookie_assign(tx); 477 _sba_pending_request(sba, req); 478 list_for_each_entry(nreq, &req->next, next) 479 _sba_pending_request(sba, nreq); 480 spin_unlock_irqrestore(&sba->reqs_lock, flags); 481 482 return cookie; 483 } 484 485 static enum dma_status sba_tx_status(struct dma_chan *dchan, 486 dma_cookie_t cookie, 487 struct dma_tx_state *txstate) 488 { 489 int mchan_idx; 490 enum dma_status ret; 491 struct sba_device *sba = to_sba_device(dchan); 492 493 for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++) 494 mbox_client_peek_data(sba->mchans[mchan_idx]); 495 496 ret = dma_cookie_status(dchan, cookie, txstate); 497 if (ret == DMA_COMPLETE) 498 return ret; 499 500 return dma_cookie_status(dchan, cookie, txstate); 501 } 502 503 static void sba_fillup_interrupt_msg(struct sba_request *req, 504 struct brcm_sba_command *cmds, 505 struct brcm_message *msg) 506 { 507 u64 cmd; 508 u32 c_mdata; 509 struct brcm_sba_command *cmdsp = cmds; 510 511 /* Type-B command to load dummy data into buf0 */ 512 cmd = sba_cmd_enc(0x0, SBA_TYPE_B, 513 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 514 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size, 515 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 516 c_mdata = sba_cmd_load_c_mdata(0); 517 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 518 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 519 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, 520 SBA_CMD_SHIFT, SBA_CMD_MASK); 521 cmdsp->cmd = cmd; 522 *cmdsp->cmd_dma = cpu_to_le64(cmd); 523 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; 524 cmdsp->data = req->resp_dma; 525 cmdsp->data_len = req->sba->hw_resp_size; 526 cmdsp++; 527 528 /* Type-A command to write buf0 to dummy location */ 529 cmd = sba_cmd_enc(0x0, SBA_TYPE_A, 530 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 531 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size, 532 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 533 cmd = sba_cmd_enc(cmd, 0x1, 534 SBA_RESP_SHIFT, SBA_RESP_MASK); 535 c_mdata = sba_cmd_write_c_mdata(0); 536 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 537 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 538 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, 539 SBA_CMD_SHIFT, SBA_CMD_MASK); 540 cmdsp->cmd = cmd; 541 *cmdsp->cmd_dma = cpu_to_le64(cmd); 542 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; 543 if (req->sba->hw_resp_size) { 544 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; 545 cmdsp->resp = req->resp_dma; 546 cmdsp->resp_len = req->sba->hw_resp_size; 547 } 548 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; 549 cmdsp->data = req->resp_dma; 550 cmdsp->data_len = req->sba->hw_resp_size; 551 cmdsp++; 552 553 /* Fillup brcm_message */ 554 msg->type = BRCM_MESSAGE_SBA; 555 msg->sba.cmds = cmds; 556 msg->sba.cmds_count = cmdsp - cmds; 557 msg->ctx = req; 558 msg->error = 0; 559 } 560 561 static struct dma_async_tx_descriptor * 562 sba_prep_dma_interrupt(struct dma_chan *dchan, unsigned long flags) 563 { 564 struct sba_request *req = NULL; 565 struct sba_device *sba = to_sba_device(dchan); 566 567 /* Alloc new request */ 568 req = sba_alloc_request(sba); 569 if (!req) 570 return NULL; 571 572 /* 573 * Force fence so that no requests are submitted 574 * until DMA callback for this request is invoked. 575 */ 576 req->fence = true; 577 578 /* Fillup request message */ 579 sba_fillup_interrupt_msg(req, req->cmds, &req->msg); 580 581 /* Init async_tx descriptor */ 582 req->tx.flags = flags; 583 req->tx.cookie = -EBUSY; 584 585 return &req->tx; 586 } 587 588 static void sba_fillup_memcpy_msg(struct sba_request *req, 589 struct brcm_sba_command *cmds, 590 struct brcm_message *msg, 591 dma_addr_t msg_offset, size_t msg_len, 592 dma_addr_t dst, dma_addr_t src) 593 { 594 u64 cmd; 595 u32 c_mdata; 596 struct brcm_sba_command *cmdsp = cmds; 597 598 /* Type-B command to load data into buf0 */ 599 cmd = sba_cmd_enc(0x0, SBA_TYPE_B, 600 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 601 cmd = sba_cmd_enc(cmd, msg_len, 602 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 603 c_mdata = sba_cmd_load_c_mdata(0); 604 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 605 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 606 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, 607 SBA_CMD_SHIFT, SBA_CMD_MASK); 608 cmdsp->cmd = cmd; 609 *cmdsp->cmd_dma = cpu_to_le64(cmd); 610 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; 611 cmdsp->data = src + msg_offset; 612 cmdsp->data_len = msg_len; 613 cmdsp++; 614 615 /* Type-A command to write buf0 */ 616 cmd = sba_cmd_enc(0x0, SBA_TYPE_A, 617 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 618 cmd = sba_cmd_enc(cmd, msg_len, 619 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 620 cmd = sba_cmd_enc(cmd, 0x1, 621 SBA_RESP_SHIFT, SBA_RESP_MASK); 622 c_mdata = sba_cmd_write_c_mdata(0); 623 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 624 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 625 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, 626 SBA_CMD_SHIFT, SBA_CMD_MASK); 627 cmdsp->cmd = cmd; 628 *cmdsp->cmd_dma = cpu_to_le64(cmd); 629 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; 630 if (req->sba->hw_resp_size) { 631 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; 632 cmdsp->resp = req->resp_dma; 633 cmdsp->resp_len = req->sba->hw_resp_size; 634 } 635 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; 636 cmdsp->data = dst + msg_offset; 637 cmdsp->data_len = msg_len; 638 cmdsp++; 639 640 /* Fillup brcm_message */ 641 msg->type = BRCM_MESSAGE_SBA; 642 msg->sba.cmds = cmds; 643 msg->sba.cmds_count = cmdsp - cmds; 644 msg->ctx = req; 645 msg->error = 0; 646 } 647 648 static struct sba_request * 649 sba_prep_dma_memcpy_req(struct sba_device *sba, 650 dma_addr_t off, dma_addr_t dst, dma_addr_t src, 651 size_t len, unsigned long flags) 652 { 653 struct sba_request *req = NULL; 654 655 /* Alloc new request */ 656 req = sba_alloc_request(sba); 657 if (!req) 658 return NULL; 659 req->fence = (flags & DMA_PREP_FENCE) ? true : false; 660 661 /* Fillup request message */ 662 sba_fillup_memcpy_msg(req, req->cmds, &req->msg, 663 off, len, dst, src); 664 665 /* Init async_tx descriptor */ 666 req->tx.flags = flags; 667 req->tx.cookie = -EBUSY; 668 669 return req; 670 } 671 672 static struct dma_async_tx_descriptor * 673 sba_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src, 674 size_t len, unsigned long flags) 675 { 676 size_t req_len; 677 dma_addr_t off = 0; 678 struct sba_device *sba = to_sba_device(dchan); 679 struct sba_request *first = NULL, *req; 680 681 /* Create chained requests where each request is upto hw_buf_size */ 682 while (len) { 683 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size; 684 685 req = sba_prep_dma_memcpy_req(sba, off, dst, src, 686 req_len, flags); 687 if (!req) { 688 if (first) 689 sba_free_chained_requests(first); 690 return NULL; 691 } 692 693 if (first) 694 sba_chain_request(first, req); 695 else 696 first = req; 697 698 off += req_len; 699 len -= req_len; 700 } 701 702 return (first) ? &first->tx : NULL; 703 } 704 705 static void sba_fillup_xor_msg(struct sba_request *req, 706 struct brcm_sba_command *cmds, 707 struct brcm_message *msg, 708 dma_addr_t msg_offset, size_t msg_len, 709 dma_addr_t dst, dma_addr_t *src, u32 src_cnt) 710 { 711 u64 cmd; 712 u32 c_mdata; 713 unsigned int i; 714 struct brcm_sba_command *cmdsp = cmds; 715 716 /* Type-B command to load data into buf0 */ 717 cmd = sba_cmd_enc(0x0, SBA_TYPE_B, 718 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 719 cmd = sba_cmd_enc(cmd, msg_len, 720 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 721 c_mdata = sba_cmd_load_c_mdata(0); 722 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 723 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 724 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, 725 SBA_CMD_SHIFT, SBA_CMD_MASK); 726 cmdsp->cmd = cmd; 727 *cmdsp->cmd_dma = cpu_to_le64(cmd); 728 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; 729 cmdsp->data = src[0] + msg_offset; 730 cmdsp->data_len = msg_len; 731 cmdsp++; 732 733 /* Type-B commands to xor data with buf0 and put it back in buf0 */ 734 for (i = 1; i < src_cnt; i++) { 735 cmd = sba_cmd_enc(0x0, SBA_TYPE_B, 736 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 737 cmd = sba_cmd_enc(cmd, msg_len, 738 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 739 c_mdata = sba_cmd_xor_c_mdata(0, 0); 740 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 741 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 742 cmd = sba_cmd_enc(cmd, SBA_CMD_XOR, 743 SBA_CMD_SHIFT, SBA_CMD_MASK); 744 cmdsp->cmd = cmd; 745 *cmdsp->cmd_dma = cpu_to_le64(cmd); 746 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; 747 cmdsp->data = src[i] + msg_offset; 748 cmdsp->data_len = msg_len; 749 cmdsp++; 750 } 751 752 /* Type-A command to write buf0 */ 753 cmd = sba_cmd_enc(0x0, SBA_TYPE_A, 754 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 755 cmd = sba_cmd_enc(cmd, msg_len, 756 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 757 cmd = sba_cmd_enc(cmd, 0x1, 758 SBA_RESP_SHIFT, SBA_RESP_MASK); 759 c_mdata = sba_cmd_write_c_mdata(0); 760 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 761 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 762 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, 763 SBA_CMD_SHIFT, SBA_CMD_MASK); 764 cmdsp->cmd = cmd; 765 *cmdsp->cmd_dma = cpu_to_le64(cmd); 766 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; 767 if (req->sba->hw_resp_size) { 768 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; 769 cmdsp->resp = req->resp_dma; 770 cmdsp->resp_len = req->sba->hw_resp_size; 771 } 772 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; 773 cmdsp->data = dst + msg_offset; 774 cmdsp->data_len = msg_len; 775 cmdsp++; 776 777 /* Fillup brcm_message */ 778 msg->type = BRCM_MESSAGE_SBA; 779 msg->sba.cmds = cmds; 780 msg->sba.cmds_count = cmdsp - cmds; 781 msg->ctx = req; 782 msg->error = 0; 783 } 784 785 struct sba_request * 786 sba_prep_dma_xor_req(struct sba_device *sba, 787 dma_addr_t off, dma_addr_t dst, dma_addr_t *src, 788 u32 src_cnt, size_t len, unsigned long flags) 789 { 790 struct sba_request *req = NULL; 791 792 /* Alloc new request */ 793 req = sba_alloc_request(sba); 794 if (!req) 795 return NULL; 796 req->fence = (flags & DMA_PREP_FENCE) ? true : false; 797 798 /* Fillup request message */ 799 sba_fillup_xor_msg(req, req->cmds, &req->msg, 800 off, len, dst, src, src_cnt); 801 802 /* Init async_tx descriptor */ 803 req->tx.flags = flags; 804 req->tx.cookie = -EBUSY; 805 806 return req; 807 } 808 809 static struct dma_async_tx_descriptor * 810 sba_prep_dma_xor(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src, 811 u32 src_cnt, size_t len, unsigned long flags) 812 { 813 size_t req_len; 814 dma_addr_t off = 0; 815 struct sba_device *sba = to_sba_device(dchan); 816 struct sba_request *first = NULL, *req; 817 818 /* Sanity checks */ 819 if (unlikely(src_cnt > sba->max_xor_srcs)) 820 return NULL; 821 822 /* Create chained requests where each request is upto hw_buf_size */ 823 while (len) { 824 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size; 825 826 req = sba_prep_dma_xor_req(sba, off, dst, src, src_cnt, 827 req_len, flags); 828 if (!req) { 829 if (first) 830 sba_free_chained_requests(first); 831 return NULL; 832 } 833 834 if (first) 835 sba_chain_request(first, req); 836 else 837 first = req; 838 839 off += req_len; 840 len -= req_len; 841 } 842 843 return (first) ? &first->tx : NULL; 844 } 845 846 static void sba_fillup_pq_msg(struct sba_request *req, 847 bool pq_continue, 848 struct brcm_sba_command *cmds, 849 struct brcm_message *msg, 850 dma_addr_t msg_offset, size_t msg_len, 851 dma_addr_t *dst_p, dma_addr_t *dst_q, 852 const u8 *scf, dma_addr_t *src, u32 src_cnt) 853 { 854 u64 cmd; 855 u32 c_mdata; 856 unsigned int i; 857 struct brcm_sba_command *cmdsp = cmds; 858 859 if (pq_continue) { 860 /* Type-B command to load old P into buf0 */ 861 if (dst_p) { 862 cmd = sba_cmd_enc(0x0, SBA_TYPE_B, 863 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 864 cmd = sba_cmd_enc(cmd, msg_len, 865 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 866 c_mdata = sba_cmd_load_c_mdata(0); 867 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 868 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 869 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, 870 SBA_CMD_SHIFT, SBA_CMD_MASK); 871 cmdsp->cmd = cmd; 872 *cmdsp->cmd_dma = cpu_to_le64(cmd); 873 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; 874 cmdsp->data = *dst_p + msg_offset; 875 cmdsp->data_len = msg_len; 876 cmdsp++; 877 } 878 879 /* Type-B command to load old Q into buf1 */ 880 if (dst_q) { 881 cmd = sba_cmd_enc(0x0, SBA_TYPE_B, 882 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 883 cmd = sba_cmd_enc(cmd, msg_len, 884 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 885 c_mdata = sba_cmd_load_c_mdata(1); 886 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 887 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 888 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, 889 SBA_CMD_SHIFT, SBA_CMD_MASK); 890 cmdsp->cmd = cmd; 891 *cmdsp->cmd_dma = cpu_to_le64(cmd); 892 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; 893 cmdsp->data = *dst_q + msg_offset; 894 cmdsp->data_len = msg_len; 895 cmdsp++; 896 } 897 } else { 898 /* Type-A command to zero all buffers */ 899 cmd = sba_cmd_enc(0x0, SBA_TYPE_A, 900 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 901 cmd = sba_cmd_enc(cmd, msg_len, 902 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 903 cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS, 904 SBA_CMD_SHIFT, SBA_CMD_MASK); 905 cmdsp->cmd = cmd; 906 *cmdsp->cmd_dma = cpu_to_le64(cmd); 907 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; 908 cmdsp++; 909 } 910 911 /* Type-B commands for generate P onto buf0 and Q onto buf1 */ 912 for (i = 0; i < src_cnt; i++) { 913 cmd = sba_cmd_enc(0x0, SBA_TYPE_B, 914 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 915 cmd = sba_cmd_enc(cmd, msg_len, 916 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 917 c_mdata = sba_cmd_pq_c_mdata(raid6_gflog[scf[i]], 1, 0); 918 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 919 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 920 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata), 921 SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK); 922 cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS_XOR, 923 SBA_CMD_SHIFT, SBA_CMD_MASK); 924 cmdsp->cmd = cmd; 925 *cmdsp->cmd_dma = cpu_to_le64(cmd); 926 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; 927 cmdsp->data = src[i] + msg_offset; 928 cmdsp->data_len = msg_len; 929 cmdsp++; 930 } 931 932 /* Type-A command to write buf0 */ 933 if (dst_p) { 934 cmd = sba_cmd_enc(0x0, SBA_TYPE_A, 935 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 936 cmd = sba_cmd_enc(cmd, msg_len, 937 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 938 cmd = sba_cmd_enc(cmd, 0x1, 939 SBA_RESP_SHIFT, SBA_RESP_MASK); 940 c_mdata = sba_cmd_write_c_mdata(0); 941 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 942 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 943 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, 944 SBA_CMD_SHIFT, SBA_CMD_MASK); 945 cmdsp->cmd = cmd; 946 *cmdsp->cmd_dma = cpu_to_le64(cmd); 947 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; 948 if (req->sba->hw_resp_size) { 949 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; 950 cmdsp->resp = req->resp_dma; 951 cmdsp->resp_len = req->sba->hw_resp_size; 952 } 953 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; 954 cmdsp->data = *dst_p + msg_offset; 955 cmdsp->data_len = msg_len; 956 cmdsp++; 957 } 958 959 /* Type-A command to write buf1 */ 960 if (dst_q) { 961 cmd = sba_cmd_enc(0x0, SBA_TYPE_A, 962 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 963 cmd = sba_cmd_enc(cmd, msg_len, 964 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 965 cmd = sba_cmd_enc(cmd, 0x1, 966 SBA_RESP_SHIFT, SBA_RESP_MASK); 967 c_mdata = sba_cmd_write_c_mdata(1); 968 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 969 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 970 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, 971 SBA_CMD_SHIFT, SBA_CMD_MASK); 972 cmdsp->cmd = cmd; 973 *cmdsp->cmd_dma = cpu_to_le64(cmd); 974 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; 975 if (req->sba->hw_resp_size) { 976 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; 977 cmdsp->resp = req->resp_dma; 978 cmdsp->resp_len = req->sba->hw_resp_size; 979 } 980 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; 981 cmdsp->data = *dst_q + msg_offset; 982 cmdsp->data_len = msg_len; 983 cmdsp++; 984 } 985 986 /* Fillup brcm_message */ 987 msg->type = BRCM_MESSAGE_SBA; 988 msg->sba.cmds = cmds; 989 msg->sba.cmds_count = cmdsp - cmds; 990 msg->ctx = req; 991 msg->error = 0; 992 } 993 994 struct sba_request * 995 sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off, 996 dma_addr_t *dst_p, dma_addr_t *dst_q, dma_addr_t *src, 997 u32 src_cnt, const u8 *scf, size_t len, unsigned long flags) 998 { 999 struct sba_request *req = NULL; 1000 1001 /* Alloc new request */ 1002 req = sba_alloc_request(sba); 1003 if (!req) 1004 return NULL; 1005 req->fence = (flags & DMA_PREP_FENCE) ? true : false; 1006 1007 /* Fillup request messages */ 1008 sba_fillup_pq_msg(req, dmaf_continue(flags), 1009 req->cmds, &req->msg, 1010 off, len, dst_p, dst_q, scf, src, src_cnt); 1011 1012 /* Init async_tx descriptor */ 1013 req->tx.flags = flags; 1014 req->tx.cookie = -EBUSY; 1015 1016 return req; 1017 } 1018 1019 static void sba_fillup_pq_single_msg(struct sba_request *req, 1020 bool pq_continue, 1021 struct brcm_sba_command *cmds, 1022 struct brcm_message *msg, 1023 dma_addr_t msg_offset, size_t msg_len, 1024 dma_addr_t *dst_p, dma_addr_t *dst_q, 1025 dma_addr_t src, u8 scf) 1026 { 1027 u64 cmd; 1028 u32 c_mdata; 1029 u8 pos, dpos = raid6_gflog[scf]; 1030 struct brcm_sba_command *cmdsp = cmds; 1031 1032 if (!dst_p) 1033 goto skip_p; 1034 1035 if (pq_continue) { 1036 /* Type-B command to load old P into buf0 */ 1037 cmd = sba_cmd_enc(0x0, SBA_TYPE_B, 1038 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 1039 cmd = sba_cmd_enc(cmd, msg_len, 1040 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 1041 c_mdata = sba_cmd_load_c_mdata(0); 1042 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 1043 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 1044 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, 1045 SBA_CMD_SHIFT, SBA_CMD_MASK); 1046 cmdsp->cmd = cmd; 1047 *cmdsp->cmd_dma = cpu_to_le64(cmd); 1048 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; 1049 cmdsp->data = *dst_p + msg_offset; 1050 cmdsp->data_len = msg_len; 1051 cmdsp++; 1052 1053 /* 1054 * Type-B commands to xor data with buf0 and put it 1055 * back in buf0 1056 */ 1057 cmd = sba_cmd_enc(0x0, SBA_TYPE_B, 1058 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 1059 cmd = sba_cmd_enc(cmd, msg_len, 1060 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 1061 c_mdata = sba_cmd_xor_c_mdata(0, 0); 1062 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 1063 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 1064 cmd = sba_cmd_enc(cmd, SBA_CMD_XOR, 1065 SBA_CMD_SHIFT, SBA_CMD_MASK); 1066 cmdsp->cmd = cmd; 1067 *cmdsp->cmd_dma = cpu_to_le64(cmd); 1068 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; 1069 cmdsp->data = src + msg_offset; 1070 cmdsp->data_len = msg_len; 1071 cmdsp++; 1072 } else { 1073 /* Type-B command to load old P into buf0 */ 1074 cmd = sba_cmd_enc(0x0, SBA_TYPE_B, 1075 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 1076 cmd = sba_cmd_enc(cmd, msg_len, 1077 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 1078 c_mdata = sba_cmd_load_c_mdata(0); 1079 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 1080 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 1081 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, 1082 SBA_CMD_SHIFT, SBA_CMD_MASK); 1083 cmdsp->cmd = cmd; 1084 *cmdsp->cmd_dma = cpu_to_le64(cmd); 1085 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; 1086 cmdsp->data = src + msg_offset; 1087 cmdsp->data_len = msg_len; 1088 cmdsp++; 1089 } 1090 1091 /* Type-A command to write buf0 */ 1092 cmd = sba_cmd_enc(0x0, SBA_TYPE_A, 1093 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 1094 cmd = sba_cmd_enc(cmd, msg_len, 1095 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 1096 cmd = sba_cmd_enc(cmd, 0x1, 1097 SBA_RESP_SHIFT, SBA_RESP_MASK); 1098 c_mdata = sba_cmd_write_c_mdata(0); 1099 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 1100 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 1101 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, 1102 SBA_CMD_SHIFT, SBA_CMD_MASK); 1103 cmdsp->cmd = cmd; 1104 *cmdsp->cmd_dma = cpu_to_le64(cmd); 1105 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; 1106 if (req->sba->hw_resp_size) { 1107 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; 1108 cmdsp->resp = req->resp_dma; 1109 cmdsp->resp_len = req->sba->hw_resp_size; 1110 } 1111 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; 1112 cmdsp->data = *dst_p + msg_offset; 1113 cmdsp->data_len = msg_len; 1114 cmdsp++; 1115 1116 skip_p: 1117 if (!dst_q) 1118 goto skip_q; 1119 1120 /* Type-A command to zero all buffers */ 1121 cmd = sba_cmd_enc(0x0, SBA_TYPE_A, 1122 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 1123 cmd = sba_cmd_enc(cmd, msg_len, 1124 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 1125 cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS, 1126 SBA_CMD_SHIFT, SBA_CMD_MASK); 1127 cmdsp->cmd = cmd; 1128 *cmdsp->cmd_dma = cpu_to_le64(cmd); 1129 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; 1130 cmdsp++; 1131 1132 if (dpos == 255) 1133 goto skip_q_computation; 1134 pos = (dpos < req->sba->max_pq_coefs) ? 1135 dpos : (req->sba->max_pq_coefs - 1); 1136 1137 /* 1138 * Type-B command to generate initial Q from data 1139 * and store output into buf0 1140 */ 1141 cmd = sba_cmd_enc(0x0, SBA_TYPE_B, 1142 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 1143 cmd = sba_cmd_enc(cmd, msg_len, 1144 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 1145 c_mdata = sba_cmd_pq_c_mdata(pos, 0, 0); 1146 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 1147 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 1148 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata), 1149 SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK); 1150 cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS, 1151 SBA_CMD_SHIFT, SBA_CMD_MASK); 1152 cmdsp->cmd = cmd; 1153 *cmdsp->cmd_dma = cpu_to_le64(cmd); 1154 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; 1155 cmdsp->data = src + msg_offset; 1156 cmdsp->data_len = msg_len; 1157 cmdsp++; 1158 1159 dpos -= pos; 1160 1161 /* Multiple Type-A command to generate final Q */ 1162 while (dpos) { 1163 pos = (dpos < req->sba->max_pq_coefs) ? 1164 dpos : (req->sba->max_pq_coefs - 1); 1165 1166 /* 1167 * Type-A command to generate Q with buf0 and 1168 * buf1 store result in buf0 1169 */ 1170 cmd = sba_cmd_enc(0x0, SBA_TYPE_A, 1171 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 1172 cmd = sba_cmd_enc(cmd, msg_len, 1173 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 1174 c_mdata = sba_cmd_pq_c_mdata(pos, 0, 1); 1175 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 1176 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 1177 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata), 1178 SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK); 1179 cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS, 1180 SBA_CMD_SHIFT, SBA_CMD_MASK); 1181 cmdsp->cmd = cmd; 1182 *cmdsp->cmd_dma = cpu_to_le64(cmd); 1183 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; 1184 cmdsp++; 1185 1186 dpos -= pos; 1187 } 1188 1189 skip_q_computation: 1190 if (pq_continue) { 1191 /* 1192 * Type-B command to XOR previous output with 1193 * buf0 and write it into buf0 1194 */ 1195 cmd = sba_cmd_enc(0x0, SBA_TYPE_B, 1196 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 1197 cmd = sba_cmd_enc(cmd, msg_len, 1198 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 1199 c_mdata = sba_cmd_xor_c_mdata(0, 0); 1200 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 1201 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 1202 cmd = sba_cmd_enc(cmd, SBA_CMD_XOR, 1203 SBA_CMD_SHIFT, SBA_CMD_MASK); 1204 cmdsp->cmd = cmd; 1205 *cmdsp->cmd_dma = cpu_to_le64(cmd); 1206 cmdsp->flags = BRCM_SBA_CMD_TYPE_B; 1207 cmdsp->data = *dst_q + msg_offset; 1208 cmdsp->data_len = msg_len; 1209 cmdsp++; 1210 } 1211 1212 /* Type-A command to write buf0 */ 1213 cmd = sba_cmd_enc(0x0, SBA_TYPE_A, 1214 SBA_TYPE_SHIFT, SBA_TYPE_MASK); 1215 cmd = sba_cmd_enc(cmd, msg_len, 1216 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); 1217 cmd = sba_cmd_enc(cmd, 0x1, 1218 SBA_RESP_SHIFT, SBA_RESP_MASK); 1219 c_mdata = sba_cmd_write_c_mdata(0); 1220 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), 1221 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); 1222 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, 1223 SBA_CMD_SHIFT, SBA_CMD_MASK); 1224 cmdsp->cmd = cmd; 1225 *cmdsp->cmd_dma = cpu_to_le64(cmd); 1226 cmdsp->flags = BRCM_SBA_CMD_TYPE_A; 1227 if (req->sba->hw_resp_size) { 1228 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; 1229 cmdsp->resp = req->resp_dma; 1230 cmdsp->resp_len = req->sba->hw_resp_size; 1231 } 1232 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; 1233 cmdsp->data = *dst_q + msg_offset; 1234 cmdsp->data_len = msg_len; 1235 cmdsp++; 1236 1237 skip_q: 1238 /* Fillup brcm_message */ 1239 msg->type = BRCM_MESSAGE_SBA; 1240 msg->sba.cmds = cmds; 1241 msg->sba.cmds_count = cmdsp - cmds; 1242 msg->ctx = req; 1243 msg->error = 0; 1244 } 1245 1246 struct sba_request * 1247 sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off, 1248 dma_addr_t *dst_p, dma_addr_t *dst_q, 1249 dma_addr_t src, u8 scf, size_t len, 1250 unsigned long flags) 1251 { 1252 struct sba_request *req = NULL; 1253 1254 /* Alloc new request */ 1255 req = sba_alloc_request(sba); 1256 if (!req) 1257 return NULL; 1258 req->fence = (flags & DMA_PREP_FENCE) ? true : false; 1259 1260 /* Fillup request messages */ 1261 sba_fillup_pq_single_msg(req, dmaf_continue(flags), 1262 req->cmds, &req->msg, off, len, 1263 dst_p, dst_q, src, scf); 1264 1265 /* Init async_tx descriptor */ 1266 req->tx.flags = flags; 1267 req->tx.cookie = -EBUSY; 1268 1269 return req; 1270 } 1271 1272 static struct dma_async_tx_descriptor * 1273 sba_prep_dma_pq(struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src, 1274 u32 src_cnt, const u8 *scf, size_t len, unsigned long flags) 1275 { 1276 u32 i, dst_q_index; 1277 size_t req_len; 1278 bool slow = false; 1279 dma_addr_t off = 0; 1280 dma_addr_t *dst_p = NULL, *dst_q = NULL; 1281 struct sba_device *sba = to_sba_device(dchan); 1282 struct sba_request *first = NULL, *req; 1283 1284 /* Sanity checks */ 1285 if (unlikely(src_cnt > sba->max_pq_srcs)) 1286 return NULL; 1287 for (i = 0; i < src_cnt; i++) 1288 if (sba->max_pq_coefs <= raid6_gflog[scf[i]]) 1289 slow = true; 1290 1291 /* Figure-out P and Q destination addresses */ 1292 if (!(flags & DMA_PREP_PQ_DISABLE_P)) 1293 dst_p = &dst[0]; 1294 if (!(flags & DMA_PREP_PQ_DISABLE_Q)) 1295 dst_q = &dst[1]; 1296 1297 /* Create chained requests where each request is upto hw_buf_size */ 1298 while (len) { 1299 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size; 1300 1301 if (slow) { 1302 dst_q_index = src_cnt; 1303 1304 if (dst_q) { 1305 for (i = 0; i < src_cnt; i++) { 1306 if (*dst_q == src[i]) { 1307 dst_q_index = i; 1308 break; 1309 } 1310 } 1311 } 1312 1313 if (dst_q_index < src_cnt) { 1314 i = dst_q_index; 1315 req = sba_prep_dma_pq_single_req(sba, 1316 off, dst_p, dst_q, src[i], scf[i], 1317 req_len, flags | DMA_PREP_FENCE); 1318 if (!req) 1319 goto fail; 1320 1321 if (first) 1322 sba_chain_request(first, req); 1323 else 1324 first = req; 1325 1326 flags |= DMA_PREP_CONTINUE; 1327 } 1328 1329 for (i = 0; i < src_cnt; i++) { 1330 if (dst_q_index == i) 1331 continue; 1332 1333 req = sba_prep_dma_pq_single_req(sba, 1334 off, dst_p, dst_q, src[i], scf[i], 1335 req_len, flags | DMA_PREP_FENCE); 1336 if (!req) 1337 goto fail; 1338 1339 if (first) 1340 sba_chain_request(first, req); 1341 else 1342 first = req; 1343 1344 flags |= DMA_PREP_CONTINUE; 1345 } 1346 } else { 1347 req = sba_prep_dma_pq_req(sba, off, 1348 dst_p, dst_q, src, src_cnt, 1349 scf, req_len, flags); 1350 if (!req) 1351 goto fail; 1352 1353 if (first) 1354 sba_chain_request(first, req); 1355 else 1356 first = req; 1357 } 1358 1359 off += req_len; 1360 len -= req_len; 1361 } 1362 1363 return (first) ? &first->tx : NULL; 1364 1365 fail: 1366 if (first) 1367 sba_free_chained_requests(first); 1368 return NULL; 1369 } 1370 1371 /* ====== Mailbox callbacks ===== */ 1372 1373 static void sba_dma_tx_actions(struct sba_request *req) 1374 { 1375 struct dma_async_tx_descriptor *tx = &req->tx; 1376 1377 WARN_ON(tx->cookie < 0); 1378 1379 if (tx->cookie > 0) { 1380 dma_cookie_complete(tx); 1381 1382 /* 1383 * Call the callback (must not sleep or submit new 1384 * operations to this channel) 1385 */ 1386 if (tx->callback) 1387 tx->callback(tx->callback_param); 1388 1389 dma_descriptor_unmap(tx); 1390 } 1391 1392 /* Run dependent operations */ 1393 dma_run_dependencies(tx); 1394 1395 /* If waiting for 'ack' then move to completed list */ 1396 if (!async_tx_test_ack(&req->tx)) 1397 sba_complete_chained_requests(req); 1398 else 1399 sba_free_chained_requests(req); 1400 } 1401 1402 static void sba_receive_message(struct mbox_client *cl, void *msg) 1403 { 1404 unsigned long flags; 1405 struct brcm_message *m = msg; 1406 struct sba_request *req = m->ctx, *req1; 1407 struct sba_device *sba = req->sba; 1408 1409 /* Error count if message has error */ 1410 if (m->error < 0) 1411 dev_err(sba->dev, "%s got message with error %d", 1412 dma_chan_name(&sba->dma_chan), m->error); 1413 1414 /* Mark request as received */ 1415 sba_received_request(req); 1416 1417 /* Wait for all chained requests to be completed */ 1418 if (atomic_dec_return(&req->first->next_pending_count)) 1419 goto done; 1420 1421 /* Point to first request */ 1422 req = req->first; 1423 1424 /* Update request */ 1425 if (req->state == SBA_REQUEST_STATE_RECEIVED) 1426 sba_dma_tx_actions(req); 1427 else 1428 sba_free_chained_requests(req); 1429 1430 spin_lock_irqsave(&sba->reqs_lock, flags); 1431 1432 /* Re-check all completed request waiting for 'ack' */ 1433 list_for_each_entry_safe(req, req1, &sba->reqs_completed_list, node) { 1434 spin_unlock_irqrestore(&sba->reqs_lock, flags); 1435 sba_dma_tx_actions(req); 1436 spin_lock_irqsave(&sba->reqs_lock, flags); 1437 } 1438 1439 spin_unlock_irqrestore(&sba->reqs_lock, flags); 1440 1441 done: 1442 /* Try to submit pending request */ 1443 sba_issue_pending(&sba->dma_chan); 1444 } 1445 1446 /* ====== Platform driver routines ===== */ 1447 1448 static int sba_prealloc_channel_resources(struct sba_device *sba) 1449 { 1450 int i, j, p, ret = 0; 1451 struct sba_request *req = NULL; 1452 1453 sba->resp_base = dma_alloc_coherent(sba->dma_dev.dev, 1454 sba->max_resp_pool_size, 1455 &sba->resp_dma_base, GFP_KERNEL); 1456 if (!sba->resp_base) 1457 return -ENOMEM; 1458 1459 sba->cmds_base = dma_alloc_coherent(sba->dma_dev.dev, 1460 sba->max_cmds_pool_size, 1461 &sba->cmds_dma_base, GFP_KERNEL); 1462 if (!sba->cmds_base) { 1463 ret = -ENOMEM; 1464 goto fail_free_resp_pool; 1465 } 1466 1467 spin_lock_init(&sba->reqs_lock); 1468 sba->reqs_fence = false; 1469 INIT_LIST_HEAD(&sba->reqs_alloc_list); 1470 INIT_LIST_HEAD(&sba->reqs_pending_list); 1471 INIT_LIST_HEAD(&sba->reqs_active_list); 1472 INIT_LIST_HEAD(&sba->reqs_received_list); 1473 INIT_LIST_HEAD(&sba->reqs_completed_list); 1474 INIT_LIST_HEAD(&sba->reqs_aborted_list); 1475 INIT_LIST_HEAD(&sba->reqs_free_list); 1476 1477 sba->reqs = devm_kcalloc(sba->dev, sba->max_req, 1478 sizeof(*req), GFP_KERNEL); 1479 if (!sba->reqs) { 1480 ret = -ENOMEM; 1481 goto fail_free_cmds_pool; 1482 } 1483 1484 for (i = 0, p = 0; i < sba->max_req; i++) { 1485 req = &sba->reqs[i]; 1486 INIT_LIST_HEAD(&req->node); 1487 req->sba = sba; 1488 req->state = SBA_REQUEST_STATE_FREE; 1489 INIT_LIST_HEAD(&req->next); 1490 req->next_count = 1; 1491 atomic_set(&req->next_pending_count, 0); 1492 req->fence = false; 1493 req->resp = sba->resp_base + p; 1494 req->resp_dma = sba->resp_dma_base + p; 1495 p += sba->hw_resp_size; 1496 req->cmds = devm_kcalloc(sba->dev, sba->max_cmd_per_req, 1497 sizeof(*req->cmds), GFP_KERNEL); 1498 if (!req->cmds) { 1499 ret = -ENOMEM; 1500 goto fail_free_cmds_pool; 1501 } 1502 for (j = 0; j < sba->max_cmd_per_req; j++) { 1503 req->cmds[j].cmd = 0; 1504 req->cmds[j].cmd_dma = sba->cmds_base + 1505 (i * sba->max_cmd_per_req + j) * sizeof(u64); 1506 req->cmds[j].cmd_dma_addr = sba->cmds_dma_base + 1507 (i * sba->max_cmd_per_req + j) * sizeof(u64); 1508 req->cmds[j].flags = 0; 1509 } 1510 memset(&req->msg, 0, sizeof(req->msg)); 1511 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); 1512 req->tx.tx_submit = sba_tx_submit; 1513 req->tx.phys = req->resp_dma; 1514 list_add_tail(&req->node, &sba->reqs_free_list); 1515 } 1516 1517 sba->reqs_free_count = sba->max_req; 1518 1519 return 0; 1520 1521 fail_free_cmds_pool: 1522 dma_free_coherent(sba->dma_dev.dev, 1523 sba->max_cmds_pool_size, 1524 sba->cmds_base, sba->cmds_dma_base); 1525 fail_free_resp_pool: 1526 dma_free_coherent(sba->dma_dev.dev, 1527 sba->max_resp_pool_size, 1528 sba->resp_base, sba->resp_dma_base); 1529 return ret; 1530 } 1531 1532 static void sba_freeup_channel_resources(struct sba_device *sba) 1533 { 1534 dmaengine_terminate_all(&sba->dma_chan); 1535 dma_free_coherent(sba->dma_dev.dev, sba->max_cmds_pool_size, 1536 sba->cmds_base, sba->cmds_dma_base); 1537 dma_free_coherent(sba->dma_dev.dev, sba->max_resp_pool_size, 1538 sba->resp_base, sba->resp_dma_base); 1539 sba->resp_base = NULL; 1540 sba->resp_dma_base = 0; 1541 } 1542 1543 static int sba_async_register(struct sba_device *sba) 1544 { 1545 int ret; 1546 struct dma_device *dma_dev = &sba->dma_dev; 1547 1548 /* Initialize DMA channel cookie */ 1549 sba->dma_chan.device = dma_dev; 1550 dma_cookie_init(&sba->dma_chan); 1551 1552 /* Initialize DMA device capability mask */ 1553 dma_cap_zero(dma_dev->cap_mask); 1554 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask); 1555 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); 1556 dma_cap_set(DMA_XOR, dma_dev->cap_mask); 1557 dma_cap_set(DMA_PQ, dma_dev->cap_mask); 1558 1559 /* 1560 * Set mailbox channel device as the base device of 1561 * our dma_device because the actual memory accesses 1562 * will be done by mailbox controller 1563 */ 1564 dma_dev->dev = sba->mbox_dev; 1565 1566 /* Set base prep routines */ 1567 dma_dev->device_free_chan_resources = sba_free_chan_resources; 1568 dma_dev->device_terminate_all = sba_device_terminate_all; 1569 dma_dev->device_issue_pending = sba_issue_pending; 1570 dma_dev->device_tx_status = sba_tx_status; 1571 1572 /* Set interrupt routine */ 1573 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) 1574 dma_dev->device_prep_dma_interrupt = sba_prep_dma_interrupt; 1575 1576 /* Set memcpy routine */ 1577 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) 1578 dma_dev->device_prep_dma_memcpy = sba_prep_dma_memcpy; 1579 1580 /* Set xor routine and capability */ 1581 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1582 dma_dev->device_prep_dma_xor = sba_prep_dma_xor; 1583 dma_dev->max_xor = sba->max_xor_srcs; 1584 } 1585 1586 /* Set pq routine and capability */ 1587 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { 1588 dma_dev->device_prep_dma_pq = sba_prep_dma_pq; 1589 dma_set_maxpq(dma_dev, sba->max_pq_srcs, 0); 1590 } 1591 1592 /* Initialize DMA device channel list */ 1593 INIT_LIST_HEAD(&dma_dev->channels); 1594 list_add_tail(&sba->dma_chan.device_node, &dma_dev->channels); 1595 1596 /* Register with Linux async DMA framework*/ 1597 ret = dma_async_device_register(dma_dev); 1598 if (ret) { 1599 dev_err(sba->dev, "async device register error %d", ret); 1600 return ret; 1601 } 1602 1603 dev_info(sba->dev, "%s capabilities: %s%s%s%s\n", 1604 dma_chan_name(&sba->dma_chan), 1605 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "interrupt " : "", 1606 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "memcpy " : "", 1607 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", 1608 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : ""); 1609 1610 return 0; 1611 } 1612 1613 static int sba_probe(struct platform_device *pdev) 1614 { 1615 int i, ret = 0, mchans_count; 1616 struct sba_device *sba; 1617 struct platform_device *mbox_pdev; 1618 struct of_phandle_args args; 1619 1620 /* Allocate main SBA struct */ 1621 sba = devm_kzalloc(&pdev->dev, sizeof(*sba), GFP_KERNEL); 1622 if (!sba) 1623 return -ENOMEM; 1624 1625 sba->dev = &pdev->dev; 1626 platform_set_drvdata(pdev, sba); 1627 1628 /* Determine SBA version from DT compatible string */ 1629 if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba")) 1630 sba->ver = SBA_VER_1; 1631 else if (of_device_is_compatible(sba->dev->of_node, 1632 "brcm,iproc-sba-v2")) 1633 sba->ver = SBA_VER_2; 1634 else 1635 return -ENODEV; 1636 1637 /* Derived Configuration parameters */ 1638 switch (sba->ver) { 1639 case SBA_VER_1: 1640 sba->max_req = 1024; 1641 sba->hw_buf_size = 4096; 1642 sba->hw_resp_size = 8; 1643 sba->max_pq_coefs = 6; 1644 sba->max_pq_srcs = 6; 1645 break; 1646 case SBA_VER_2: 1647 sba->max_req = 1024; 1648 sba->hw_buf_size = 4096; 1649 sba->hw_resp_size = 8; 1650 sba->max_pq_coefs = 30; 1651 /* 1652 * We can support max_pq_srcs == max_pq_coefs because 1653 * we are limited by number of SBA commands that we can 1654 * fit in one message for underlying ring manager HW. 1655 */ 1656 sba->max_pq_srcs = 12; 1657 break; 1658 default: 1659 return -EINVAL; 1660 } 1661 sba->max_cmd_per_req = sba->max_pq_srcs + 3; 1662 sba->max_xor_srcs = sba->max_cmd_per_req - 1; 1663 sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size; 1664 sba->max_cmds_pool_size = sba->max_req * 1665 sba->max_cmd_per_req * sizeof(u64); 1666 1667 /* Setup mailbox client */ 1668 sba->client.dev = &pdev->dev; 1669 sba->client.rx_callback = sba_receive_message; 1670 sba->client.tx_block = false; 1671 sba->client.knows_txdone = false; 1672 sba->client.tx_tout = 0; 1673 1674 /* Number of channels equals number of mailbox channels */ 1675 ret = of_count_phandle_with_args(pdev->dev.of_node, 1676 "mboxes", "#mbox-cells"); 1677 if (ret <= 0) 1678 return -ENODEV; 1679 mchans_count = ret; 1680 sba->mchans_count = 0; 1681 atomic_set(&sba->mchans_current, 0); 1682 1683 /* Allocate mailbox channel array */ 1684 sba->mchans = devm_kcalloc(&pdev->dev, sba->mchans_count, 1685 sizeof(*sba->mchans), GFP_KERNEL); 1686 if (!sba->mchans) 1687 return -ENOMEM; 1688 1689 /* Request mailbox channels */ 1690 for (i = 0; i < mchans_count; i++) { 1691 sba->mchans[i] = mbox_request_channel(&sba->client, i); 1692 if (IS_ERR(sba->mchans[i])) { 1693 ret = PTR_ERR(sba->mchans[i]); 1694 goto fail_free_mchans; 1695 } 1696 sba->mchans_count++; 1697 } 1698 1699 /* Find-out underlying mailbox device */ 1700 ret = of_parse_phandle_with_args(pdev->dev.of_node, 1701 "mboxes", "#mbox-cells", 0, &args); 1702 if (ret) 1703 goto fail_free_mchans; 1704 mbox_pdev = of_find_device_by_node(args.np); 1705 of_node_put(args.np); 1706 if (!mbox_pdev) { 1707 ret = -ENODEV; 1708 goto fail_free_mchans; 1709 } 1710 sba->mbox_dev = &mbox_pdev->dev; 1711 1712 /* All mailbox channels should be of same ring manager device */ 1713 for (i = 1; i < mchans_count; i++) { 1714 ret = of_parse_phandle_with_args(pdev->dev.of_node, 1715 "mboxes", "#mbox-cells", i, &args); 1716 if (ret) 1717 goto fail_free_mchans; 1718 mbox_pdev = of_find_device_by_node(args.np); 1719 of_node_put(args.np); 1720 if (sba->mbox_dev != &mbox_pdev->dev) { 1721 ret = -EINVAL; 1722 goto fail_free_mchans; 1723 } 1724 } 1725 1726 /* Register DMA device with linux async framework */ 1727 ret = sba_async_register(sba); 1728 if (ret) 1729 goto fail_free_mchans; 1730 1731 /* Prealloc channel resource */ 1732 ret = sba_prealloc_channel_resources(sba); 1733 if (ret) 1734 goto fail_async_dev_unreg; 1735 1736 /* Print device info */ 1737 dev_info(sba->dev, "%s using SBAv%d and %d mailbox channels", 1738 dma_chan_name(&sba->dma_chan), sba->ver+1, 1739 sba->mchans_count); 1740 1741 return 0; 1742 1743 fail_async_dev_unreg: 1744 dma_async_device_unregister(&sba->dma_dev); 1745 fail_free_mchans: 1746 for (i = 0; i < sba->mchans_count; i++) 1747 mbox_free_channel(sba->mchans[i]); 1748 return ret; 1749 } 1750 1751 static int sba_remove(struct platform_device *pdev) 1752 { 1753 int i; 1754 struct sba_device *sba = platform_get_drvdata(pdev); 1755 1756 sba_freeup_channel_resources(sba); 1757 1758 dma_async_device_unregister(&sba->dma_dev); 1759 1760 for (i = 0; i < sba->mchans_count; i++) 1761 mbox_free_channel(sba->mchans[i]); 1762 1763 return 0; 1764 } 1765 1766 static const struct of_device_id sba_of_match[] = { 1767 { .compatible = "brcm,iproc-sba", }, 1768 { .compatible = "brcm,iproc-sba-v2", }, 1769 {}, 1770 }; 1771 MODULE_DEVICE_TABLE(of, sba_of_match); 1772 1773 static struct platform_driver sba_driver = { 1774 .probe = sba_probe, 1775 .remove = sba_remove, 1776 .driver = { 1777 .name = "bcm-sba-raid", 1778 .of_match_table = sba_of_match, 1779 }, 1780 }; 1781 module_platform_driver(sba_driver); 1782 1783 MODULE_DESCRIPTION("Broadcom SBA RAID driver"); 1784 MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>"); 1785 MODULE_LICENSE("GPL v2"); 1786