1 /* 2 * offload engine driver for the Marvell XOR engine 3 * Copyright (C) 2007, 2008, Marvell International Ltd. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 */ 14 15 #include <linux/init.h> 16 #include <linux/slab.h> 17 #include <linux/delay.h> 18 #include <linux/dma-mapping.h> 19 #include <linux/spinlock.h> 20 #include <linux/interrupt.h> 21 #include <linux/of_device.h> 22 #include <linux/platform_device.h> 23 #include <linux/memory.h> 24 #include <linux/clk.h> 25 #include <linux/of.h> 26 #include <linux/of_irq.h> 27 #include <linux/irqdomain.h> 28 #include <linux/cpumask.h> 29 #include <linux/platform_data/dma-mv_xor.h> 30 31 #include "dmaengine.h" 32 #include "mv_xor.h" 33 34 enum mv_xor_type { 35 XOR_ORION, 36 XOR_ARMADA_38X, 37 XOR_ARMADA_37XX, 38 }; 39 40 enum mv_xor_mode { 41 XOR_MODE_IN_REG, 42 XOR_MODE_IN_DESC, 43 }; 44 45 static void mv_xor_issue_pending(struct dma_chan *chan); 46 47 #define to_mv_xor_chan(chan) \ 48 container_of(chan, struct mv_xor_chan, dmachan) 49 50 #define to_mv_xor_slot(tx) \ 51 container_of(tx, struct mv_xor_desc_slot, async_tx) 52 53 #define mv_chan_to_devp(chan) \ 54 ((chan)->dmadev.dev) 55 56 static void mv_desc_init(struct mv_xor_desc_slot *desc, 57 dma_addr_t addr, u32 byte_count, 58 enum dma_ctrl_flags flags) 59 { 60 struct mv_xor_desc *hw_desc = desc->hw_desc; 61 62 hw_desc->status = XOR_DESC_DMA_OWNED; 63 hw_desc->phy_next_desc = 0; 64 /* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */ 65 hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ? 66 XOR_DESC_EOD_INT_EN : 0; 67 hw_desc->phy_dest_addr = addr; 68 hw_desc->byte_count = byte_count; 69 } 70 71 static void mv_desc_set_mode(struct mv_xor_desc_slot *desc) 72 { 73 struct mv_xor_desc *hw_desc = desc->hw_desc; 74 75 switch (desc->type) { 76 case DMA_XOR: 77 case DMA_INTERRUPT: 78 hw_desc->desc_command |= XOR_DESC_OPERATION_XOR; 79 break; 80 case DMA_MEMCPY: 81 hw_desc->desc_command |= XOR_DESC_OPERATION_MEMCPY; 82 break; 83 default: 84 BUG(); 85 return; 86 } 87 } 88 89 static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc, 90 u32 next_desc_addr) 91 { 92 struct mv_xor_desc *hw_desc = desc->hw_desc; 93 BUG_ON(hw_desc->phy_next_desc); 94 hw_desc->phy_next_desc = next_desc_addr; 95 } 96 97 static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, 98 int index, dma_addr_t addr) 99 { 100 struct mv_xor_desc *hw_desc = desc->hw_desc; 101 hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr; 102 if (desc->type == DMA_XOR) 103 hw_desc->desc_command |= (1 << index); 104 } 105 106 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan) 107 { 108 return readl_relaxed(XOR_CURR_DESC(chan)); 109 } 110 111 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, 112 u32 next_desc_addr) 113 { 114 writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan)); 115 } 116 117 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) 118 { 119 u32 val = readl_relaxed(XOR_INTR_MASK(chan)); 120 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16); 121 writel_relaxed(val, XOR_INTR_MASK(chan)); 122 } 123 124 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) 125 { 126 u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan)); 127 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF; 128 return intr_cause; 129 } 130 131 static void mv_chan_clear_eoc_cause(struct mv_xor_chan *chan) 132 { 133 u32 val; 134 135 val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED; 136 val = ~(val << (chan->idx * 16)); 137 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val); 138 writel_relaxed(val, XOR_INTR_CAUSE(chan)); 139 } 140 141 static void mv_chan_clear_err_status(struct mv_xor_chan *chan) 142 { 143 u32 val = 0xFFFF0000 >> (chan->idx * 16); 144 writel_relaxed(val, XOR_INTR_CAUSE(chan)); 145 } 146 147 static void mv_chan_set_mode(struct mv_xor_chan *chan, 148 u32 op_mode) 149 { 150 u32 config = readl_relaxed(XOR_CONFIG(chan)); 151 152 config &= ~0x7; 153 config |= op_mode; 154 155 #if defined(__BIG_ENDIAN) 156 config |= XOR_DESCRIPTOR_SWAP; 157 #else 158 config &= ~XOR_DESCRIPTOR_SWAP; 159 #endif 160 161 writel_relaxed(config, XOR_CONFIG(chan)); 162 } 163 164 static void mv_chan_activate(struct mv_xor_chan *chan) 165 { 166 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n"); 167 168 /* writel ensures all descriptors are flushed before activation */ 169 writel(BIT(0), XOR_ACTIVATION(chan)); 170 } 171 172 static char mv_chan_is_busy(struct mv_xor_chan *chan) 173 { 174 u32 state = readl_relaxed(XOR_ACTIVATION(chan)); 175 176 state = (state >> 4) & 0x3; 177 178 return (state == 1) ? 1 : 0; 179 } 180 181 /* 182 * mv_chan_start_new_chain - program the engine to operate on new 183 * chain headed by sw_desc 184 * Caller must hold &mv_chan->lock while calling this function 185 */ 186 static void mv_chan_start_new_chain(struct mv_xor_chan *mv_chan, 187 struct mv_xor_desc_slot *sw_desc) 188 { 189 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n", 190 __func__, __LINE__, sw_desc); 191 192 /* set the hardware chain */ 193 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); 194 195 mv_chan->pending++; 196 mv_xor_issue_pending(&mv_chan->dmachan); 197 } 198 199 static dma_cookie_t 200 mv_desc_run_tx_complete_actions(struct mv_xor_desc_slot *desc, 201 struct mv_xor_chan *mv_chan, 202 dma_cookie_t cookie) 203 { 204 BUG_ON(desc->async_tx.cookie < 0); 205 206 if (desc->async_tx.cookie > 0) { 207 cookie = desc->async_tx.cookie; 208 209 dma_descriptor_unmap(&desc->async_tx); 210 /* call the callback (must not sleep or submit new 211 * operations to this channel) 212 */ 213 dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL); 214 } 215 216 /* run dependent operations */ 217 dma_run_dependencies(&desc->async_tx); 218 219 return cookie; 220 } 221 222 static int 223 mv_chan_clean_completed_slots(struct mv_xor_chan *mv_chan) 224 { 225 struct mv_xor_desc_slot *iter, *_iter; 226 227 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); 228 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, 229 node) { 230 231 if (async_tx_test_ack(&iter->async_tx)) { 232 list_move_tail(&iter->node, &mv_chan->free_slots); 233 if (!list_empty(&iter->sg_tx_list)) { 234 list_splice_tail_init(&iter->sg_tx_list, 235 &mv_chan->free_slots); 236 } 237 } 238 } 239 return 0; 240 } 241 242 static int 243 mv_desc_clean_slot(struct mv_xor_desc_slot *desc, 244 struct mv_xor_chan *mv_chan) 245 { 246 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n", 247 __func__, __LINE__, desc, desc->async_tx.flags); 248 249 /* the client is allowed to attach dependent operations 250 * until 'ack' is set 251 */ 252 if (!async_tx_test_ack(&desc->async_tx)) { 253 /* move this slot to the completed_slots */ 254 list_move_tail(&desc->node, &mv_chan->completed_slots); 255 if (!list_empty(&desc->sg_tx_list)) { 256 list_splice_tail_init(&desc->sg_tx_list, 257 &mv_chan->completed_slots); 258 } 259 } else { 260 list_move_tail(&desc->node, &mv_chan->free_slots); 261 if (!list_empty(&desc->sg_tx_list)) { 262 list_splice_tail_init(&desc->sg_tx_list, 263 &mv_chan->free_slots); 264 } 265 } 266 267 return 0; 268 } 269 270 /* This function must be called with the mv_xor_chan spinlock held */ 271 static void mv_chan_slot_cleanup(struct mv_xor_chan *mv_chan) 272 { 273 struct mv_xor_desc_slot *iter, *_iter; 274 dma_cookie_t cookie = 0; 275 int busy = mv_chan_is_busy(mv_chan); 276 u32 current_desc = mv_chan_get_current_desc(mv_chan); 277 int current_cleaned = 0; 278 struct mv_xor_desc *hw_desc; 279 280 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); 281 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc); 282 mv_chan_clean_completed_slots(mv_chan); 283 284 /* free completed slots from the chain starting with 285 * the oldest descriptor 286 */ 287 288 list_for_each_entry_safe(iter, _iter, &mv_chan->chain, 289 node) { 290 291 /* clean finished descriptors */ 292 hw_desc = iter->hw_desc; 293 if (hw_desc->status & XOR_DESC_SUCCESS) { 294 cookie = mv_desc_run_tx_complete_actions(iter, mv_chan, 295 cookie); 296 297 /* done processing desc, clean slot */ 298 mv_desc_clean_slot(iter, mv_chan); 299 300 /* break if we did cleaned the current */ 301 if (iter->async_tx.phys == current_desc) { 302 current_cleaned = 1; 303 break; 304 } 305 } else { 306 if (iter->async_tx.phys == current_desc) { 307 current_cleaned = 0; 308 break; 309 } 310 } 311 } 312 313 if ((busy == 0) && !list_empty(&mv_chan->chain)) { 314 if (current_cleaned) { 315 /* 316 * current descriptor cleaned and removed, run 317 * from list head 318 */ 319 iter = list_entry(mv_chan->chain.next, 320 struct mv_xor_desc_slot, 321 node); 322 mv_chan_start_new_chain(mv_chan, iter); 323 } else { 324 if (!list_is_last(&iter->node, &mv_chan->chain)) { 325 /* 326 * descriptors are still waiting after 327 * current, trigger them 328 */ 329 iter = list_entry(iter->node.next, 330 struct mv_xor_desc_slot, 331 node); 332 mv_chan_start_new_chain(mv_chan, iter); 333 } else { 334 /* 335 * some descriptors are still waiting 336 * to be cleaned 337 */ 338 tasklet_schedule(&mv_chan->irq_tasklet); 339 } 340 } 341 } 342 343 if (cookie > 0) 344 mv_chan->dmachan.completed_cookie = cookie; 345 } 346 347 static void mv_xor_tasklet(unsigned long data) 348 { 349 struct mv_xor_chan *chan = (struct mv_xor_chan *) data; 350 351 spin_lock(&chan->lock); 352 mv_chan_slot_cleanup(chan); 353 spin_unlock(&chan->lock); 354 } 355 356 static struct mv_xor_desc_slot * 357 mv_chan_alloc_slot(struct mv_xor_chan *mv_chan) 358 { 359 struct mv_xor_desc_slot *iter; 360 361 spin_lock_bh(&mv_chan->lock); 362 363 if (!list_empty(&mv_chan->free_slots)) { 364 iter = list_first_entry(&mv_chan->free_slots, 365 struct mv_xor_desc_slot, 366 node); 367 368 list_move_tail(&iter->node, &mv_chan->allocated_slots); 369 370 spin_unlock_bh(&mv_chan->lock); 371 372 /* pre-ack descriptor */ 373 async_tx_ack(&iter->async_tx); 374 iter->async_tx.cookie = -EBUSY; 375 376 return iter; 377 378 } 379 380 spin_unlock_bh(&mv_chan->lock); 381 382 /* try to free some slots if the allocation fails */ 383 tasklet_schedule(&mv_chan->irq_tasklet); 384 385 return NULL; 386 } 387 388 /************************ DMA engine API functions ****************************/ 389 static dma_cookie_t 390 mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) 391 { 392 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx); 393 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan); 394 struct mv_xor_desc_slot *old_chain_tail; 395 dma_cookie_t cookie; 396 int new_hw_chain = 1; 397 398 dev_dbg(mv_chan_to_devp(mv_chan), 399 "%s sw_desc %p: async_tx %p\n", 400 __func__, sw_desc, &sw_desc->async_tx); 401 402 spin_lock_bh(&mv_chan->lock); 403 cookie = dma_cookie_assign(tx); 404 405 if (list_empty(&mv_chan->chain)) 406 list_move_tail(&sw_desc->node, &mv_chan->chain); 407 else { 408 new_hw_chain = 0; 409 410 old_chain_tail = list_entry(mv_chan->chain.prev, 411 struct mv_xor_desc_slot, 412 node); 413 list_move_tail(&sw_desc->node, &mv_chan->chain); 414 415 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n", 416 &old_chain_tail->async_tx.phys); 417 418 /* fix up the hardware chain */ 419 mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys); 420 421 /* if the channel is not busy */ 422 if (!mv_chan_is_busy(mv_chan)) { 423 u32 current_desc = mv_chan_get_current_desc(mv_chan); 424 /* 425 * and the curren desc is the end of the chain before 426 * the append, then we need to start the channel 427 */ 428 if (current_desc == old_chain_tail->async_tx.phys) 429 new_hw_chain = 1; 430 } 431 } 432 433 if (new_hw_chain) 434 mv_chan_start_new_chain(mv_chan, sw_desc); 435 436 spin_unlock_bh(&mv_chan->lock); 437 438 return cookie; 439 } 440 441 /* returns the number of allocated descriptors */ 442 static int mv_xor_alloc_chan_resources(struct dma_chan *chan) 443 { 444 void *virt_desc; 445 dma_addr_t dma_desc; 446 int idx; 447 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 448 struct mv_xor_desc_slot *slot = NULL; 449 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE; 450 451 /* Allocate descriptor slots */ 452 idx = mv_chan->slots_allocated; 453 while (idx < num_descs_in_pool) { 454 slot = kzalloc(sizeof(*slot), GFP_KERNEL); 455 if (!slot) { 456 dev_info(mv_chan_to_devp(mv_chan), 457 "channel only initialized %d descriptor slots", 458 idx); 459 break; 460 } 461 virt_desc = mv_chan->dma_desc_pool_virt; 462 slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE; 463 464 dma_async_tx_descriptor_init(&slot->async_tx, chan); 465 slot->async_tx.tx_submit = mv_xor_tx_submit; 466 INIT_LIST_HEAD(&slot->node); 467 INIT_LIST_HEAD(&slot->sg_tx_list); 468 dma_desc = mv_chan->dma_desc_pool; 469 slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE; 470 slot->idx = idx++; 471 472 spin_lock_bh(&mv_chan->lock); 473 mv_chan->slots_allocated = idx; 474 list_add_tail(&slot->node, &mv_chan->free_slots); 475 spin_unlock_bh(&mv_chan->lock); 476 } 477 478 dev_dbg(mv_chan_to_devp(mv_chan), 479 "allocated %d descriptor slots\n", 480 mv_chan->slots_allocated); 481 482 return mv_chan->slots_allocated ? : -ENOMEM; 483 } 484 485 /* 486 * Check if source or destination is an PCIe/IO address (non-SDRAM) and add 487 * a new MBus window if necessary. Use a cache for these check so that 488 * the MMIO mapped registers don't have to be accessed for this check 489 * to speed up this process. 490 */ 491 static int mv_xor_add_io_win(struct mv_xor_chan *mv_chan, u32 addr) 492 { 493 struct mv_xor_device *xordev = mv_chan->xordev; 494 void __iomem *base = mv_chan->mmr_high_base; 495 u32 win_enable; 496 u32 size; 497 u8 target, attr; 498 int ret; 499 int i; 500 501 /* Nothing needs to get done for the Armada 3700 */ 502 if (xordev->xor_type == XOR_ARMADA_37XX) 503 return 0; 504 505 /* 506 * Loop over the cached windows to check, if the requested area 507 * is already mapped. If this the case, nothing needs to be done 508 * and we can return. 509 */ 510 for (i = 0; i < WINDOW_COUNT; i++) { 511 if (addr >= xordev->win_start[i] && 512 addr <= xordev->win_end[i]) { 513 /* Window is already mapped */ 514 return 0; 515 } 516 } 517 518 /* 519 * The window is not mapped, so we need to create the new mapping 520 */ 521 522 /* If no IO window is found that addr has to be located in SDRAM */ 523 ret = mvebu_mbus_get_io_win_info(addr, &size, &target, &attr); 524 if (ret < 0) 525 return 0; 526 527 /* 528 * Mask the base addr 'addr' according to 'size' read back from the 529 * MBus window. Otherwise we might end up with an address located 530 * somewhere in the middle of this area here. 531 */ 532 size -= 1; 533 addr &= ~size; 534 535 /* 536 * Reading one of both enabled register is enough, as they are always 537 * programmed to the identical values 538 */ 539 win_enable = readl(base + WINDOW_BAR_ENABLE(0)); 540 541 /* Set 'i' to the first free window to write the new values to */ 542 i = ffs(~win_enable) - 1; 543 if (i >= WINDOW_COUNT) 544 return -ENOMEM; 545 546 writel((addr & 0xffff0000) | (attr << 8) | target, 547 base + WINDOW_BASE(i)); 548 writel(size & 0xffff0000, base + WINDOW_SIZE(i)); 549 550 /* Fill the caching variables for later use */ 551 xordev->win_start[i] = addr; 552 xordev->win_end[i] = addr + size; 553 554 win_enable |= (1 << i); 555 win_enable |= 3 << (16 + (2 * i)); 556 writel(win_enable, base + WINDOW_BAR_ENABLE(0)); 557 writel(win_enable, base + WINDOW_BAR_ENABLE(1)); 558 559 return 0; 560 } 561 562 static struct dma_async_tx_descriptor * 563 mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, 564 unsigned int src_cnt, size_t len, unsigned long flags) 565 { 566 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 567 struct mv_xor_desc_slot *sw_desc; 568 int ret; 569 570 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 571 return NULL; 572 573 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); 574 575 dev_dbg(mv_chan_to_devp(mv_chan), 576 "%s src_cnt: %d len: %zu dest %pad flags: %ld\n", 577 __func__, src_cnt, len, &dest, flags); 578 579 /* Check if a new window needs to get added for 'dest' */ 580 ret = mv_xor_add_io_win(mv_chan, dest); 581 if (ret) 582 return NULL; 583 584 sw_desc = mv_chan_alloc_slot(mv_chan); 585 if (sw_desc) { 586 sw_desc->type = DMA_XOR; 587 sw_desc->async_tx.flags = flags; 588 mv_desc_init(sw_desc, dest, len, flags); 589 if (mv_chan->op_in_desc == XOR_MODE_IN_DESC) 590 mv_desc_set_mode(sw_desc); 591 while (src_cnt--) { 592 /* Check if a new window needs to get added for 'src' */ 593 ret = mv_xor_add_io_win(mv_chan, src[src_cnt]); 594 if (ret) 595 return NULL; 596 mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]); 597 } 598 } 599 600 dev_dbg(mv_chan_to_devp(mv_chan), 601 "%s sw_desc %p async_tx %p \n", 602 __func__, sw_desc, &sw_desc->async_tx); 603 return sw_desc ? &sw_desc->async_tx : NULL; 604 } 605 606 static struct dma_async_tx_descriptor * 607 mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 608 size_t len, unsigned long flags) 609 { 610 /* 611 * A MEMCPY operation is identical to an XOR operation with only 612 * a single source address. 613 */ 614 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); 615 } 616 617 static struct dma_async_tx_descriptor * 618 mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) 619 { 620 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 621 dma_addr_t src, dest; 622 size_t len; 623 624 src = mv_chan->dummy_src_addr; 625 dest = mv_chan->dummy_dst_addr; 626 len = MV_XOR_MIN_BYTE_COUNT; 627 628 /* 629 * We implement the DMA_INTERRUPT operation as a minimum sized 630 * XOR operation with a single dummy source address. 631 */ 632 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); 633 } 634 635 static void mv_xor_free_chan_resources(struct dma_chan *chan) 636 { 637 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 638 struct mv_xor_desc_slot *iter, *_iter; 639 int in_use_descs = 0; 640 641 spin_lock_bh(&mv_chan->lock); 642 643 mv_chan_slot_cleanup(mv_chan); 644 645 list_for_each_entry_safe(iter, _iter, &mv_chan->chain, 646 node) { 647 in_use_descs++; 648 list_move_tail(&iter->node, &mv_chan->free_slots); 649 } 650 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, 651 node) { 652 in_use_descs++; 653 list_move_tail(&iter->node, &mv_chan->free_slots); 654 } 655 list_for_each_entry_safe(iter, _iter, &mv_chan->allocated_slots, 656 node) { 657 in_use_descs++; 658 list_move_tail(&iter->node, &mv_chan->free_slots); 659 } 660 list_for_each_entry_safe_reverse( 661 iter, _iter, &mv_chan->free_slots, node) { 662 list_del(&iter->node); 663 kfree(iter); 664 mv_chan->slots_allocated--; 665 } 666 667 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n", 668 __func__, mv_chan->slots_allocated); 669 spin_unlock_bh(&mv_chan->lock); 670 671 if (in_use_descs) 672 dev_err(mv_chan_to_devp(mv_chan), 673 "freeing %d in use descriptors!\n", in_use_descs); 674 } 675 676 /** 677 * mv_xor_status - poll the status of an XOR transaction 678 * @chan: XOR channel handle 679 * @cookie: XOR transaction identifier 680 * @txstate: XOR transactions state holder (or NULL) 681 */ 682 static enum dma_status mv_xor_status(struct dma_chan *chan, 683 dma_cookie_t cookie, 684 struct dma_tx_state *txstate) 685 { 686 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 687 enum dma_status ret; 688 689 ret = dma_cookie_status(chan, cookie, txstate); 690 if (ret == DMA_COMPLETE) 691 return ret; 692 693 spin_lock_bh(&mv_chan->lock); 694 mv_chan_slot_cleanup(mv_chan); 695 spin_unlock_bh(&mv_chan->lock); 696 697 return dma_cookie_status(chan, cookie, txstate); 698 } 699 700 static void mv_chan_dump_regs(struct mv_xor_chan *chan) 701 { 702 u32 val; 703 704 val = readl_relaxed(XOR_CONFIG(chan)); 705 dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val); 706 707 val = readl_relaxed(XOR_ACTIVATION(chan)); 708 dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val); 709 710 val = readl_relaxed(XOR_INTR_CAUSE(chan)); 711 dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val); 712 713 val = readl_relaxed(XOR_INTR_MASK(chan)); 714 dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val); 715 716 val = readl_relaxed(XOR_ERROR_CAUSE(chan)); 717 dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val); 718 719 val = readl_relaxed(XOR_ERROR_ADDR(chan)); 720 dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val); 721 } 722 723 static void mv_chan_err_interrupt_handler(struct mv_xor_chan *chan, 724 u32 intr_cause) 725 { 726 if (intr_cause & XOR_INT_ERR_DECODE) { 727 dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n"); 728 return; 729 } 730 731 dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n", 732 chan->idx, intr_cause); 733 734 mv_chan_dump_regs(chan); 735 WARN_ON(1); 736 } 737 738 static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) 739 { 740 struct mv_xor_chan *chan = data; 741 u32 intr_cause = mv_chan_get_intr_cause(chan); 742 743 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause); 744 745 if (intr_cause & XOR_INTR_ERRORS) 746 mv_chan_err_interrupt_handler(chan, intr_cause); 747 748 tasklet_schedule(&chan->irq_tasklet); 749 750 mv_chan_clear_eoc_cause(chan); 751 752 return IRQ_HANDLED; 753 } 754 755 static void mv_xor_issue_pending(struct dma_chan *chan) 756 { 757 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 758 759 if (mv_chan->pending >= MV_XOR_THRESHOLD) { 760 mv_chan->pending = 0; 761 mv_chan_activate(mv_chan); 762 } 763 } 764 765 /* 766 * Perform a transaction to verify the HW works. 767 */ 768 769 static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan) 770 { 771 int i, ret; 772 void *src, *dest; 773 dma_addr_t src_dma, dest_dma; 774 struct dma_chan *dma_chan; 775 dma_cookie_t cookie; 776 struct dma_async_tx_descriptor *tx; 777 struct dmaengine_unmap_data *unmap; 778 int err = 0; 779 780 src = kmalloc(PAGE_SIZE, GFP_KERNEL); 781 if (!src) 782 return -ENOMEM; 783 784 dest = kzalloc(PAGE_SIZE, GFP_KERNEL); 785 if (!dest) { 786 kfree(src); 787 return -ENOMEM; 788 } 789 790 /* Fill in src buffer */ 791 for (i = 0; i < PAGE_SIZE; i++) 792 ((u8 *) src)[i] = (u8)i; 793 794 dma_chan = &mv_chan->dmachan; 795 if (mv_xor_alloc_chan_resources(dma_chan) < 1) { 796 err = -ENODEV; 797 goto out; 798 } 799 800 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL); 801 if (!unmap) { 802 err = -ENOMEM; 803 goto free_resources; 804 } 805 806 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 807 offset_in_page(src), PAGE_SIZE, 808 DMA_TO_DEVICE); 809 unmap->addr[0] = src_dma; 810 811 ret = dma_mapping_error(dma_chan->device->dev, src_dma); 812 if (ret) { 813 err = -ENOMEM; 814 goto free_resources; 815 } 816 unmap->to_cnt = 1; 817 818 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 819 offset_in_page(dest), PAGE_SIZE, 820 DMA_FROM_DEVICE); 821 unmap->addr[1] = dest_dma; 822 823 ret = dma_mapping_error(dma_chan->device->dev, dest_dma); 824 if (ret) { 825 err = -ENOMEM; 826 goto free_resources; 827 } 828 unmap->from_cnt = 1; 829 unmap->len = PAGE_SIZE; 830 831 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, 832 PAGE_SIZE, 0); 833 if (!tx) { 834 dev_err(dma_chan->device->dev, 835 "Self-test cannot prepare operation, disabling\n"); 836 err = -ENODEV; 837 goto free_resources; 838 } 839 840 cookie = mv_xor_tx_submit(tx); 841 if (dma_submit_error(cookie)) { 842 dev_err(dma_chan->device->dev, 843 "Self-test submit error, disabling\n"); 844 err = -ENODEV; 845 goto free_resources; 846 } 847 848 mv_xor_issue_pending(dma_chan); 849 async_tx_ack(tx); 850 msleep(1); 851 852 if (mv_xor_status(dma_chan, cookie, NULL) != 853 DMA_COMPLETE) { 854 dev_err(dma_chan->device->dev, 855 "Self-test copy timed out, disabling\n"); 856 err = -ENODEV; 857 goto free_resources; 858 } 859 860 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, 861 PAGE_SIZE, DMA_FROM_DEVICE); 862 if (memcmp(src, dest, PAGE_SIZE)) { 863 dev_err(dma_chan->device->dev, 864 "Self-test copy failed compare, disabling\n"); 865 err = -ENODEV; 866 goto free_resources; 867 } 868 869 free_resources: 870 dmaengine_unmap_put(unmap); 871 mv_xor_free_chan_resources(dma_chan); 872 out: 873 kfree(src); 874 kfree(dest); 875 return err; 876 } 877 878 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */ 879 static int 880 mv_chan_xor_self_test(struct mv_xor_chan *mv_chan) 881 { 882 int i, src_idx, ret; 883 struct page *dest; 884 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST]; 885 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; 886 dma_addr_t dest_dma; 887 struct dma_async_tx_descriptor *tx; 888 struct dmaengine_unmap_data *unmap; 889 struct dma_chan *dma_chan; 890 dma_cookie_t cookie; 891 u8 cmp_byte = 0; 892 u32 cmp_word; 893 int err = 0; 894 int src_count = MV_XOR_NUM_SRC_TEST; 895 896 for (src_idx = 0; src_idx < src_count; src_idx++) { 897 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 898 if (!xor_srcs[src_idx]) { 899 while (src_idx--) 900 __free_page(xor_srcs[src_idx]); 901 return -ENOMEM; 902 } 903 } 904 905 dest = alloc_page(GFP_KERNEL); 906 if (!dest) { 907 while (src_idx--) 908 __free_page(xor_srcs[src_idx]); 909 return -ENOMEM; 910 } 911 912 /* Fill in src buffers */ 913 for (src_idx = 0; src_idx < src_count; src_idx++) { 914 u8 *ptr = page_address(xor_srcs[src_idx]); 915 for (i = 0; i < PAGE_SIZE; i++) 916 ptr[i] = (1 << src_idx); 917 } 918 919 for (src_idx = 0; src_idx < src_count; src_idx++) 920 cmp_byte ^= (u8) (1 << src_idx); 921 922 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | 923 (cmp_byte << 8) | cmp_byte; 924 925 memset(page_address(dest), 0, PAGE_SIZE); 926 927 dma_chan = &mv_chan->dmachan; 928 if (mv_xor_alloc_chan_resources(dma_chan) < 1) { 929 err = -ENODEV; 930 goto out; 931 } 932 933 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1, 934 GFP_KERNEL); 935 if (!unmap) { 936 err = -ENOMEM; 937 goto free_resources; 938 } 939 940 /* test xor */ 941 for (i = 0; i < src_count; i++) { 942 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 943 0, PAGE_SIZE, DMA_TO_DEVICE); 944 dma_srcs[i] = unmap->addr[i]; 945 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]); 946 if (ret) { 947 err = -ENOMEM; 948 goto free_resources; 949 } 950 unmap->to_cnt++; 951 } 952 953 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, 954 DMA_FROM_DEVICE); 955 dest_dma = unmap->addr[src_count]; 956 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]); 957 if (ret) { 958 err = -ENOMEM; 959 goto free_resources; 960 } 961 unmap->from_cnt = 1; 962 unmap->len = PAGE_SIZE; 963 964 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 965 src_count, PAGE_SIZE, 0); 966 if (!tx) { 967 dev_err(dma_chan->device->dev, 968 "Self-test cannot prepare operation, disabling\n"); 969 err = -ENODEV; 970 goto free_resources; 971 } 972 973 cookie = mv_xor_tx_submit(tx); 974 if (dma_submit_error(cookie)) { 975 dev_err(dma_chan->device->dev, 976 "Self-test submit error, disabling\n"); 977 err = -ENODEV; 978 goto free_resources; 979 } 980 981 mv_xor_issue_pending(dma_chan); 982 async_tx_ack(tx); 983 msleep(8); 984 985 if (mv_xor_status(dma_chan, cookie, NULL) != 986 DMA_COMPLETE) { 987 dev_err(dma_chan->device->dev, 988 "Self-test xor timed out, disabling\n"); 989 err = -ENODEV; 990 goto free_resources; 991 } 992 993 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, 994 PAGE_SIZE, DMA_FROM_DEVICE); 995 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { 996 u32 *ptr = page_address(dest); 997 if (ptr[i] != cmp_word) { 998 dev_err(dma_chan->device->dev, 999 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n", 1000 i, ptr[i], cmp_word); 1001 err = -ENODEV; 1002 goto free_resources; 1003 } 1004 } 1005 1006 free_resources: 1007 dmaengine_unmap_put(unmap); 1008 mv_xor_free_chan_resources(dma_chan); 1009 out: 1010 src_idx = src_count; 1011 while (src_idx--) 1012 __free_page(xor_srcs[src_idx]); 1013 __free_page(dest); 1014 return err; 1015 } 1016 1017 static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan) 1018 { 1019 struct dma_chan *chan, *_chan; 1020 struct device *dev = mv_chan->dmadev.dev; 1021 1022 dma_async_device_unregister(&mv_chan->dmadev); 1023 1024 dma_free_coherent(dev, MV_XOR_POOL_SIZE, 1025 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); 1026 dma_unmap_single(dev, mv_chan->dummy_src_addr, 1027 MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE); 1028 dma_unmap_single(dev, mv_chan->dummy_dst_addr, 1029 MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE); 1030 1031 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels, 1032 device_node) { 1033 list_del(&chan->device_node); 1034 } 1035 1036 free_irq(mv_chan->irq, mv_chan); 1037 1038 return 0; 1039 } 1040 1041 static struct mv_xor_chan * 1042 mv_xor_channel_add(struct mv_xor_device *xordev, 1043 struct platform_device *pdev, 1044 int idx, dma_cap_mask_t cap_mask, int irq) 1045 { 1046 int ret = 0; 1047 struct mv_xor_chan *mv_chan; 1048 struct dma_device *dma_dev; 1049 1050 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); 1051 if (!mv_chan) 1052 return ERR_PTR(-ENOMEM); 1053 1054 mv_chan->idx = idx; 1055 mv_chan->irq = irq; 1056 if (xordev->xor_type == XOR_ORION) 1057 mv_chan->op_in_desc = XOR_MODE_IN_REG; 1058 else 1059 mv_chan->op_in_desc = XOR_MODE_IN_DESC; 1060 1061 dma_dev = &mv_chan->dmadev; 1062 dma_dev->dev = &pdev->dev; 1063 mv_chan->xordev = xordev; 1064 1065 /* 1066 * These source and destination dummy buffers are used to implement 1067 * a DMA_INTERRUPT operation as a minimum-sized XOR operation. 1068 * Hence, we only need to map the buffers at initialization-time. 1069 */ 1070 mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev, 1071 mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE); 1072 mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev, 1073 mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE); 1074 1075 /* allocate coherent memory for hardware descriptors 1076 * note: writecombine gives slightly better performance, but 1077 * requires that we explicitly flush the writes 1078 */ 1079 mv_chan->dma_desc_pool_virt = 1080 dma_alloc_wc(&pdev->dev, MV_XOR_POOL_SIZE, &mv_chan->dma_desc_pool, 1081 GFP_KERNEL); 1082 if (!mv_chan->dma_desc_pool_virt) 1083 return ERR_PTR(-ENOMEM); 1084 1085 /* discover transaction capabilites from the platform data */ 1086 dma_dev->cap_mask = cap_mask; 1087 1088 INIT_LIST_HEAD(&dma_dev->channels); 1089 1090 /* set base routines */ 1091 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources; 1092 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; 1093 dma_dev->device_tx_status = mv_xor_status; 1094 dma_dev->device_issue_pending = mv_xor_issue_pending; 1095 1096 /* set prep routines based on capability */ 1097 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) 1098 dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt; 1099 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) 1100 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; 1101 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1102 dma_dev->max_xor = 8; 1103 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; 1104 } 1105 1106 mv_chan->mmr_base = xordev->xor_base; 1107 mv_chan->mmr_high_base = xordev->xor_high_base; 1108 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) 1109 mv_chan); 1110 1111 /* clear errors before enabling interrupts */ 1112 mv_chan_clear_err_status(mv_chan); 1113 1114 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler, 1115 0, dev_name(&pdev->dev), mv_chan); 1116 if (ret) 1117 goto err_free_dma; 1118 1119 mv_chan_unmask_interrupts(mv_chan); 1120 1121 if (mv_chan->op_in_desc == XOR_MODE_IN_DESC) 1122 mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_IN_DESC); 1123 else 1124 mv_chan_set_mode(mv_chan, XOR_OPERATION_MODE_XOR); 1125 1126 spin_lock_init(&mv_chan->lock); 1127 INIT_LIST_HEAD(&mv_chan->chain); 1128 INIT_LIST_HEAD(&mv_chan->completed_slots); 1129 INIT_LIST_HEAD(&mv_chan->free_slots); 1130 INIT_LIST_HEAD(&mv_chan->allocated_slots); 1131 mv_chan->dmachan.device = dma_dev; 1132 dma_cookie_init(&mv_chan->dmachan); 1133 1134 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels); 1135 1136 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { 1137 ret = mv_chan_memcpy_self_test(mv_chan); 1138 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); 1139 if (ret) 1140 goto err_free_irq; 1141 } 1142 1143 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1144 ret = mv_chan_xor_self_test(mv_chan); 1145 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); 1146 if (ret) 1147 goto err_free_irq; 1148 } 1149 1150 dev_info(&pdev->dev, "Marvell XOR (%s): ( %s%s%s)\n", 1151 mv_chan->op_in_desc ? "Descriptor Mode" : "Registers Mode", 1152 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", 1153 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", 1154 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); 1155 1156 ret = dma_async_device_register(dma_dev); 1157 if (ret) 1158 goto err_free_irq; 1159 1160 return mv_chan; 1161 1162 err_free_irq: 1163 free_irq(mv_chan->irq, mv_chan); 1164 err_free_dma: 1165 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE, 1166 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); 1167 return ERR_PTR(ret); 1168 } 1169 1170 static void 1171 mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, 1172 const struct mbus_dram_target_info *dram) 1173 { 1174 void __iomem *base = xordev->xor_high_base; 1175 u32 win_enable = 0; 1176 int i; 1177 1178 for (i = 0; i < 8; i++) { 1179 writel(0, base + WINDOW_BASE(i)); 1180 writel(0, base + WINDOW_SIZE(i)); 1181 if (i < 4) 1182 writel(0, base + WINDOW_REMAP_HIGH(i)); 1183 } 1184 1185 for (i = 0; i < dram->num_cs; i++) { 1186 const struct mbus_dram_window *cs = dram->cs + i; 1187 1188 writel((cs->base & 0xffff0000) | 1189 (cs->mbus_attr << 8) | 1190 dram->mbus_dram_target_id, base + WINDOW_BASE(i)); 1191 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); 1192 1193 /* Fill the caching variables for later use */ 1194 xordev->win_start[i] = cs->base; 1195 xordev->win_end[i] = cs->base + cs->size - 1; 1196 1197 win_enable |= (1 << i); 1198 win_enable |= 3 << (16 + (2 * i)); 1199 } 1200 1201 writel(win_enable, base + WINDOW_BAR_ENABLE(0)); 1202 writel(win_enable, base + WINDOW_BAR_ENABLE(1)); 1203 writel(0, base + WINDOW_OVERRIDE_CTRL(0)); 1204 writel(0, base + WINDOW_OVERRIDE_CTRL(1)); 1205 } 1206 1207 static void 1208 mv_xor_conf_mbus_windows_a3700(struct mv_xor_device *xordev) 1209 { 1210 void __iomem *base = xordev->xor_high_base; 1211 u32 win_enable = 0; 1212 int i; 1213 1214 for (i = 0; i < 8; i++) { 1215 writel(0, base + WINDOW_BASE(i)); 1216 writel(0, base + WINDOW_SIZE(i)); 1217 if (i < 4) 1218 writel(0, base + WINDOW_REMAP_HIGH(i)); 1219 } 1220 /* 1221 * For Armada3700 open default 4GB Mbus window. The dram 1222 * related configuration are done at AXIS level. 1223 */ 1224 writel(0xffff0000, base + WINDOW_SIZE(0)); 1225 win_enable |= 1; 1226 win_enable |= 3 << 16; 1227 1228 writel(win_enable, base + WINDOW_BAR_ENABLE(0)); 1229 writel(win_enable, base + WINDOW_BAR_ENABLE(1)); 1230 writel(0, base + WINDOW_OVERRIDE_CTRL(0)); 1231 writel(0, base + WINDOW_OVERRIDE_CTRL(1)); 1232 } 1233 1234 /* 1235 * Since this XOR driver is basically used only for RAID5, we don't 1236 * need to care about synchronizing ->suspend with DMA activity, 1237 * because the DMA engine will naturally be quiet due to the block 1238 * devices being suspended. 1239 */ 1240 static int mv_xor_suspend(struct platform_device *pdev, pm_message_t state) 1241 { 1242 struct mv_xor_device *xordev = platform_get_drvdata(pdev); 1243 int i; 1244 1245 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { 1246 struct mv_xor_chan *mv_chan = xordev->channels[i]; 1247 1248 if (!mv_chan) 1249 continue; 1250 1251 mv_chan->saved_config_reg = 1252 readl_relaxed(XOR_CONFIG(mv_chan)); 1253 mv_chan->saved_int_mask_reg = 1254 readl_relaxed(XOR_INTR_MASK(mv_chan)); 1255 } 1256 1257 return 0; 1258 } 1259 1260 static int mv_xor_resume(struct platform_device *dev) 1261 { 1262 struct mv_xor_device *xordev = platform_get_drvdata(dev); 1263 const struct mbus_dram_target_info *dram; 1264 int i; 1265 1266 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { 1267 struct mv_xor_chan *mv_chan = xordev->channels[i]; 1268 1269 if (!mv_chan) 1270 continue; 1271 1272 writel_relaxed(mv_chan->saved_config_reg, 1273 XOR_CONFIG(mv_chan)); 1274 writel_relaxed(mv_chan->saved_int_mask_reg, 1275 XOR_INTR_MASK(mv_chan)); 1276 } 1277 1278 if (xordev->xor_type == XOR_ARMADA_37XX) { 1279 mv_xor_conf_mbus_windows_a3700(xordev); 1280 return 0; 1281 } 1282 1283 dram = mv_mbus_dram_info(); 1284 if (dram) 1285 mv_xor_conf_mbus_windows(xordev, dram); 1286 1287 return 0; 1288 } 1289 1290 static const struct of_device_id mv_xor_dt_ids[] = { 1291 { .compatible = "marvell,orion-xor", .data = (void *)XOR_ORION }, 1292 { .compatible = "marvell,armada-380-xor", .data = (void *)XOR_ARMADA_38X }, 1293 { .compatible = "marvell,armada-3700-xor", .data = (void *)XOR_ARMADA_37XX }, 1294 {}, 1295 }; 1296 1297 static unsigned int mv_xor_engine_count; 1298 1299 static int mv_xor_probe(struct platform_device *pdev) 1300 { 1301 const struct mbus_dram_target_info *dram; 1302 struct mv_xor_device *xordev; 1303 struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev); 1304 struct resource *res; 1305 unsigned int max_engines, max_channels; 1306 int i, ret; 1307 1308 dev_notice(&pdev->dev, "Marvell shared XOR driver\n"); 1309 1310 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL); 1311 if (!xordev) 1312 return -ENOMEM; 1313 1314 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1315 if (!res) 1316 return -ENODEV; 1317 1318 xordev->xor_base = devm_ioremap(&pdev->dev, res->start, 1319 resource_size(res)); 1320 if (!xordev->xor_base) 1321 return -EBUSY; 1322 1323 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1324 if (!res) 1325 return -ENODEV; 1326 1327 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start, 1328 resource_size(res)); 1329 if (!xordev->xor_high_base) 1330 return -EBUSY; 1331 1332 platform_set_drvdata(pdev, xordev); 1333 1334 1335 /* 1336 * We need to know which type of XOR device we use before 1337 * setting up. In non-dt case it can only be the legacy one. 1338 */ 1339 xordev->xor_type = XOR_ORION; 1340 if (pdev->dev.of_node) { 1341 const struct of_device_id *of_id = 1342 of_match_device(mv_xor_dt_ids, 1343 &pdev->dev); 1344 1345 xordev->xor_type = (uintptr_t)of_id->data; 1346 } 1347 1348 /* 1349 * (Re-)program MBUS remapping windows if we are asked to. 1350 */ 1351 if (xordev->xor_type == XOR_ARMADA_37XX) { 1352 mv_xor_conf_mbus_windows_a3700(xordev); 1353 } else { 1354 dram = mv_mbus_dram_info(); 1355 if (dram) 1356 mv_xor_conf_mbus_windows(xordev, dram); 1357 } 1358 1359 /* Not all platforms can gate the clock, so it is not 1360 * an error if the clock does not exists. 1361 */ 1362 xordev->clk = clk_get(&pdev->dev, NULL); 1363 if (!IS_ERR(xordev->clk)) 1364 clk_prepare_enable(xordev->clk); 1365 1366 /* 1367 * We don't want to have more than one channel per CPU in 1368 * order for async_tx to perform well. So we limit the number 1369 * of engines and channels so that we take into account this 1370 * constraint. Note that we also want to use channels from 1371 * separate engines when possible. For dual-CPU Armada 3700 1372 * SoC with single XOR engine allow using its both channels. 1373 */ 1374 max_engines = num_present_cpus(); 1375 if (xordev->xor_type == XOR_ARMADA_37XX) 1376 max_channels = num_present_cpus(); 1377 else 1378 max_channels = min_t(unsigned int, 1379 MV_XOR_MAX_CHANNELS, 1380 DIV_ROUND_UP(num_present_cpus(), 2)); 1381 1382 if (mv_xor_engine_count >= max_engines) 1383 return 0; 1384 1385 if (pdev->dev.of_node) { 1386 struct device_node *np; 1387 int i = 0; 1388 1389 for_each_child_of_node(pdev->dev.of_node, np) { 1390 struct mv_xor_chan *chan; 1391 dma_cap_mask_t cap_mask; 1392 int irq; 1393 1394 if (i >= max_channels) 1395 continue; 1396 1397 dma_cap_zero(cap_mask); 1398 dma_cap_set(DMA_MEMCPY, cap_mask); 1399 dma_cap_set(DMA_XOR, cap_mask); 1400 dma_cap_set(DMA_INTERRUPT, cap_mask); 1401 1402 irq = irq_of_parse_and_map(np, 0); 1403 if (!irq) { 1404 ret = -ENODEV; 1405 goto err_channel_add; 1406 } 1407 1408 chan = mv_xor_channel_add(xordev, pdev, i, 1409 cap_mask, irq); 1410 if (IS_ERR(chan)) { 1411 ret = PTR_ERR(chan); 1412 irq_dispose_mapping(irq); 1413 goto err_channel_add; 1414 } 1415 1416 xordev->channels[i] = chan; 1417 i++; 1418 } 1419 } else if (pdata && pdata->channels) { 1420 for (i = 0; i < max_channels; i++) { 1421 struct mv_xor_channel_data *cd; 1422 struct mv_xor_chan *chan; 1423 int irq; 1424 1425 cd = &pdata->channels[i]; 1426 irq = platform_get_irq(pdev, i); 1427 if (irq < 0) { 1428 ret = irq; 1429 goto err_channel_add; 1430 } 1431 1432 chan = mv_xor_channel_add(xordev, pdev, i, 1433 cd->cap_mask, irq); 1434 if (IS_ERR(chan)) { 1435 ret = PTR_ERR(chan); 1436 goto err_channel_add; 1437 } 1438 1439 xordev->channels[i] = chan; 1440 } 1441 } 1442 1443 return 0; 1444 1445 err_channel_add: 1446 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) 1447 if (xordev->channels[i]) { 1448 mv_xor_channel_remove(xordev->channels[i]); 1449 if (pdev->dev.of_node) 1450 irq_dispose_mapping(xordev->channels[i]->irq); 1451 } 1452 1453 if (!IS_ERR(xordev->clk)) { 1454 clk_disable_unprepare(xordev->clk); 1455 clk_put(xordev->clk); 1456 } 1457 1458 return ret; 1459 } 1460 1461 static struct platform_driver mv_xor_driver = { 1462 .probe = mv_xor_probe, 1463 .suspend = mv_xor_suspend, 1464 .resume = mv_xor_resume, 1465 .driver = { 1466 .name = MV_XOR_NAME, 1467 .of_match_table = of_match_ptr(mv_xor_dt_ids), 1468 }, 1469 }; 1470 1471 builtin_platform_driver(mv_xor_driver); 1472 1473 /* 1474 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>"); 1475 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine"); 1476 MODULE_LICENSE("GPL"); 1477 */ 1478