1 /* 2 * offload engine driver for the Marvell XOR engine 3 * Copyright (C) 2007, 2008, Marvell International Ltd. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 */ 18 19 #include <linux/init.h> 20 #include <linux/module.h> 21 #include <linux/slab.h> 22 #include <linux/delay.h> 23 #include <linux/dma-mapping.h> 24 #include <linux/spinlock.h> 25 #include <linux/interrupt.h> 26 #include <linux/platform_device.h> 27 #include <linux/memory.h> 28 #include <linux/clk.h> 29 #include <linux/of.h> 30 #include <linux/of_irq.h> 31 #include <linux/irqdomain.h> 32 #include <linux/platform_data/dma-mv_xor.h> 33 34 #include "dmaengine.h" 35 #include "mv_xor.h" 36 37 static void mv_xor_issue_pending(struct dma_chan *chan); 38 39 #define to_mv_xor_chan(chan) \ 40 container_of(chan, struct mv_xor_chan, dmachan) 41 42 #define to_mv_xor_slot(tx) \ 43 container_of(tx, struct mv_xor_desc_slot, async_tx) 44 45 #define mv_chan_to_devp(chan) \ 46 ((chan)->dmadev.dev) 47 48 static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) 49 { 50 struct mv_xor_desc *hw_desc = desc->hw_desc; 51 52 hw_desc->status = (1 << 31); 53 hw_desc->phy_next_desc = 0; 54 hw_desc->desc_command = (1 << 31); 55 } 56 57 static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc) 58 { 59 struct mv_xor_desc *hw_desc = desc->hw_desc; 60 return hw_desc->phy_dest_addr; 61 } 62 63 static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc, 64 int src_idx) 65 { 66 struct mv_xor_desc *hw_desc = desc->hw_desc; 67 return hw_desc->phy_src_addr[src_idx]; 68 } 69 70 71 static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, 72 u32 byte_count) 73 { 74 struct mv_xor_desc *hw_desc = desc->hw_desc; 75 hw_desc->byte_count = byte_count; 76 } 77 78 static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc, 79 u32 next_desc_addr) 80 { 81 struct mv_xor_desc *hw_desc = desc->hw_desc; 82 BUG_ON(hw_desc->phy_next_desc); 83 hw_desc->phy_next_desc = next_desc_addr; 84 } 85 86 static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc) 87 { 88 struct mv_xor_desc *hw_desc = desc->hw_desc; 89 hw_desc->phy_next_desc = 0; 90 } 91 92 static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val) 93 { 94 desc->value = val; 95 } 96 97 static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc, 98 dma_addr_t addr) 99 { 100 struct mv_xor_desc *hw_desc = desc->hw_desc; 101 hw_desc->phy_dest_addr = addr; 102 } 103 104 static int mv_chan_memset_slot_count(size_t len) 105 { 106 return 1; 107 } 108 109 #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c) 110 111 static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, 112 int index, dma_addr_t addr) 113 { 114 struct mv_xor_desc *hw_desc = desc->hw_desc; 115 hw_desc->phy_src_addr[index] = addr; 116 if (desc->type == DMA_XOR) 117 hw_desc->desc_command |= (1 << index); 118 } 119 120 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan) 121 { 122 return __raw_readl(XOR_CURR_DESC(chan)); 123 } 124 125 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, 126 u32 next_desc_addr) 127 { 128 __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan)); 129 } 130 131 static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr) 132 { 133 __raw_writel(desc_addr, XOR_DEST_POINTER(chan)); 134 } 135 136 static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size) 137 { 138 __raw_writel(block_size, XOR_BLOCK_SIZE(chan)); 139 } 140 141 static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value) 142 { 143 __raw_writel(value, XOR_INIT_VALUE_LOW(chan)); 144 __raw_writel(value, XOR_INIT_VALUE_HIGH(chan)); 145 } 146 147 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) 148 { 149 u32 val = __raw_readl(XOR_INTR_MASK(chan)); 150 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16); 151 __raw_writel(val, XOR_INTR_MASK(chan)); 152 } 153 154 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) 155 { 156 u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan)); 157 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF; 158 return intr_cause; 159 } 160 161 static int mv_is_err_intr(u32 intr_cause) 162 { 163 if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9))) 164 return 1; 165 166 return 0; 167 } 168 169 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) 170 { 171 u32 val = ~(1 << (chan->idx * 16)); 172 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val); 173 __raw_writel(val, XOR_INTR_CAUSE(chan)); 174 } 175 176 static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan) 177 { 178 u32 val = 0xFFFF0000 >> (chan->idx * 16); 179 __raw_writel(val, XOR_INTR_CAUSE(chan)); 180 } 181 182 static int mv_can_chain(struct mv_xor_desc_slot *desc) 183 { 184 struct mv_xor_desc_slot *chain_old_tail = list_entry( 185 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node); 186 187 if (chain_old_tail->type != desc->type) 188 return 0; 189 if (desc->type == DMA_MEMSET) 190 return 0; 191 192 return 1; 193 } 194 195 static void mv_set_mode(struct mv_xor_chan *chan, 196 enum dma_transaction_type type) 197 { 198 u32 op_mode; 199 u32 config = __raw_readl(XOR_CONFIG(chan)); 200 201 switch (type) { 202 case DMA_XOR: 203 op_mode = XOR_OPERATION_MODE_XOR; 204 break; 205 case DMA_MEMCPY: 206 op_mode = XOR_OPERATION_MODE_MEMCPY; 207 break; 208 case DMA_MEMSET: 209 op_mode = XOR_OPERATION_MODE_MEMSET; 210 break; 211 default: 212 dev_err(mv_chan_to_devp(chan), 213 "error: unsupported operation %d\n", 214 type); 215 BUG(); 216 return; 217 } 218 219 config &= ~0x7; 220 config |= op_mode; 221 __raw_writel(config, XOR_CONFIG(chan)); 222 chan->current_type = type; 223 } 224 225 static void mv_chan_activate(struct mv_xor_chan *chan) 226 { 227 u32 activation; 228 229 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n"); 230 activation = __raw_readl(XOR_ACTIVATION(chan)); 231 activation |= 0x1; 232 __raw_writel(activation, XOR_ACTIVATION(chan)); 233 } 234 235 static char mv_chan_is_busy(struct mv_xor_chan *chan) 236 { 237 u32 state = __raw_readl(XOR_ACTIVATION(chan)); 238 239 state = (state >> 4) & 0x3; 240 241 return (state == 1) ? 1 : 0; 242 } 243 244 static int mv_chan_xor_slot_count(size_t len, int src_cnt) 245 { 246 return 1; 247 } 248 249 /** 250 * mv_xor_free_slots - flags descriptor slots for reuse 251 * @slot: Slot to free 252 * Caller must hold &mv_chan->lock while calling this function 253 */ 254 static void mv_xor_free_slots(struct mv_xor_chan *mv_chan, 255 struct mv_xor_desc_slot *slot) 256 { 257 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n", 258 __func__, __LINE__, slot); 259 260 slot->slots_per_op = 0; 261 262 } 263 264 /* 265 * mv_xor_start_new_chain - program the engine to operate on new chain headed by 266 * sw_desc 267 * Caller must hold &mv_chan->lock while calling this function 268 */ 269 static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, 270 struct mv_xor_desc_slot *sw_desc) 271 { 272 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n", 273 __func__, __LINE__, sw_desc); 274 if (sw_desc->type != mv_chan->current_type) 275 mv_set_mode(mv_chan, sw_desc->type); 276 277 if (sw_desc->type == DMA_MEMSET) { 278 /* for memset requests we need to program the engine, no 279 * descriptors used. 280 */ 281 struct mv_xor_desc *hw_desc = sw_desc->hw_desc; 282 mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr); 283 mv_chan_set_block_size(mv_chan, sw_desc->unmap_len); 284 mv_chan_set_value(mv_chan, sw_desc->value); 285 } else { 286 /* set the hardware chain */ 287 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); 288 } 289 mv_chan->pending += sw_desc->slot_cnt; 290 mv_xor_issue_pending(&mv_chan->dmachan); 291 } 292 293 static dma_cookie_t 294 mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, 295 struct mv_xor_chan *mv_chan, dma_cookie_t cookie) 296 { 297 BUG_ON(desc->async_tx.cookie < 0); 298 299 if (desc->async_tx.cookie > 0) { 300 cookie = desc->async_tx.cookie; 301 302 /* call the callback (must not sleep or submit new 303 * operations to this channel) 304 */ 305 if (desc->async_tx.callback) 306 desc->async_tx.callback( 307 desc->async_tx.callback_param); 308 309 /* unmap dma addresses 310 * (unmap_single vs unmap_page?) 311 */ 312 if (desc->group_head && desc->unmap_len) { 313 struct mv_xor_desc_slot *unmap = desc->group_head; 314 struct device *dev = mv_chan_to_devp(mv_chan); 315 u32 len = unmap->unmap_len; 316 enum dma_ctrl_flags flags = desc->async_tx.flags; 317 u32 src_cnt; 318 dma_addr_t addr; 319 dma_addr_t dest; 320 321 src_cnt = unmap->unmap_src_cnt; 322 dest = mv_desc_get_dest_addr(unmap); 323 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 324 enum dma_data_direction dir; 325 326 if (src_cnt > 1) /* is xor ? */ 327 dir = DMA_BIDIRECTIONAL; 328 else 329 dir = DMA_FROM_DEVICE; 330 dma_unmap_page(dev, dest, len, dir); 331 } 332 333 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 334 while (src_cnt--) { 335 addr = mv_desc_get_src_addr(unmap, 336 src_cnt); 337 if (addr == dest) 338 continue; 339 dma_unmap_page(dev, addr, len, 340 DMA_TO_DEVICE); 341 } 342 } 343 desc->group_head = NULL; 344 } 345 } 346 347 /* run dependent operations */ 348 dma_run_dependencies(&desc->async_tx); 349 350 return cookie; 351 } 352 353 static int 354 mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan) 355 { 356 struct mv_xor_desc_slot *iter, *_iter; 357 358 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); 359 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, 360 completed_node) { 361 362 if (async_tx_test_ack(&iter->async_tx)) { 363 list_del(&iter->completed_node); 364 mv_xor_free_slots(mv_chan, iter); 365 } 366 } 367 return 0; 368 } 369 370 static int 371 mv_xor_clean_slot(struct mv_xor_desc_slot *desc, 372 struct mv_xor_chan *mv_chan) 373 { 374 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n", 375 __func__, __LINE__, desc, desc->async_tx.flags); 376 list_del(&desc->chain_node); 377 /* the client is allowed to attach dependent operations 378 * until 'ack' is set 379 */ 380 if (!async_tx_test_ack(&desc->async_tx)) { 381 /* move this slot to the completed_slots */ 382 list_add_tail(&desc->completed_node, &mv_chan->completed_slots); 383 return 0; 384 } 385 386 mv_xor_free_slots(mv_chan, desc); 387 return 0; 388 } 389 390 static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) 391 { 392 struct mv_xor_desc_slot *iter, *_iter; 393 dma_cookie_t cookie = 0; 394 int busy = mv_chan_is_busy(mv_chan); 395 u32 current_desc = mv_chan_get_current_desc(mv_chan); 396 int seen_current = 0; 397 398 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); 399 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc); 400 mv_xor_clean_completed_slots(mv_chan); 401 402 /* free completed slots from the chain starting with 403 * the oldest descriptor 404 */ 405 406 list_for_each_entry_safe(iter, _iter, &mv_chan->chain, 407 chain_node) { 408 prefetch(_iter); 409 prefetch(&_iter->async_tx); 410 411 /* do not advance past the current descriptor loaded into the 412 * hardware channel, subsequent descriptors are either in 413 * process or have not been submitted 414 */ 415 if (seen_current) 416 break; 417 418 /* stop the search if we reach the current descriptor and the 419 * channel is busy 420 */ 421 if (iter->async_tx.phys == current_desc) { 422 seen_current = 1; 423 if (busy) 424 break; 425 } 426 427 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie); 428 429 if (mv_xor_clean_slot(iter, mv_chan)) 430 break; 431 } 432 433 if ((busy == 0) && !list_empty(&mv_chan->chain)) { 434 struct mv_xor_desc_slot *chain_head; 435 chain_head = list_entry(mv_chan->chain.next, 436 struct mv_xor_desc_slot, 437 chain_node); 438 439 mv_xor_start_new_chain(mv_chan, chain_head); 440 } 441 442 if (cookie > 0) 443 mv_chan->dmachan.completed_cookie = cookie; 444 } 445 446 static void 447 mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) 448 { 449 spin_lock_bh(&mv_chan->lock); 450 __mv_xor_slot_cleanup(mv_chan); 451 spin_unlock_bh(&mv_chan->lock); 452 } 453 454 static void mv_xor_tasklet(unsigned long data) 455 { 456 struct mv_xor_chan *chan = (struct mv_xor_chan *) data; 457 mv_xor_slot_cleanup(chan); 458 } 459 460 static struct mv_xor_desc_slot * 461 mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots, 462 int slots_per_op) 463 { 464 struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL; 465 LIST_HEAD(chain); 466 int slots_found, retry = 0; 467 468 /* start search from the last allocated descrtiptor 469 * if a contiguous allocation can not be found start searching 470 * from the beginning of the list 471 */ 472 retry: 473 slots_found = 0; 474 if (retry == 0) 475 iter = mv_chan->last_used; 476 else 477 iter = list_entry(&mv_chan->all_slots, 478 struct mv_xor_desc_slot, 479 slot_node); 480 481 list_for_each_entry_safe_continue( 482 iter, _iter, &mv_chan->all_slots, slot_node) { 483 prefetch(_iter); 484 prefetch(&_iter->async_tx); 485 if (iter->slots_per_op) { 486 /* give up after finding the first busy slot 487 * on the second pass through the list 488 */ 489 if (retry) 490 break; 491 492 slots_found = 0; 493 continue; 494 } 495 496 /* start the allocation if the slot is correctly aligned */ 497 if (!slots_found++) 498 alloc_start = iter; 499 500 if (slots_found == num_slots) { 501 struct mv_xor_desc_slot *alloc_tail = NULL; 502 struct mv_xor_desc_slot *last_used = NULL; 503 iter = alloc_start; 504 while (num_slots) { 505 int i; 506 507 /* pre-ack all but the last descriptor */ 508 async_tx_ack(&iter->async_tx); 509 510 list_add_tail(&iter->chain_node, &chain); 511 alloc_tail = iter; 512 iter->async_tx.cookie = 0; 513 iter->slot_cnt = num_slots; 514 iter->xor_check_result = NULL; 515 for (i = 0; i < slots_per_op; i++) { 516 iter->slots_per_op = slots_per_op - i; 517 last_used = iter; 518 iter = list_entry(iter->slot_node.next, 519 struct mv_xor_desc_slot, 520 slot_node); 521 } 522 num_slots -= slots_per_op; 523 } 524 alloc_tail->group_head = alloc_start; 525 alloc_tail->async_tx.cookie = -EBUSY; 526 list_splice(&chain, &alloc_tail->tx_list); 527 mv_chan->last_used = last_used; 528 mv_desc_clear_next_desc(alloc_start); 529 mv_desc_clear_next_desc(alloc_tail); 530 return alloc_tail; 531 } 532 } 533 if (!retry++) 534 goto retry; 535 536 /* try to free some slots if the allocation fails */ 537 tasklet_schedule(&mv_chan->irq_tasklet); 538 539 return NULL; 540 } 541 542 /************************ DMA engine API functions ****************************/ 543 static dma_cookie_t 544 mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) 545 { 546 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx); 547 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan); 548 struct mv_xor_desc_slot *grp_start, *old_chain_tail; 549 dma_cookie_t cookie; 550 int new_hw_chain = 1; 551 552 dev_dbg(mv_chan_to_devp(mv_chan), 553 "%s sw_desc %p: async_tx %p\n", 554 __func__, sw_desc, &sw_desc->async_tx); 555 556 grp_start = sw_desc->group_head; 557 558 spin_lock_bh(&mv_chan->lock); 559 cookie = dma_cookie_assign(tx); 560 561 if (list_empty(&mv_chan->chain)) 562 list_splice_init(&sw_desc->tx_list, &mv_chan->chain); 563 else { 564 new_hw_chain = 0; 565 566 old_chain_tail = list_entry(mv_chan->chain.prev, 567 struct mv_xor_desc_slot, 568 chain_node); 569 list_splice_init(&grp_start->tx_list, 570 &old_chain_tail->chain_node); 571 572 if (!mv_can_chain(grp_start)) 573 goto submit_done; 574 575 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n", 576 old_chain_tail->async_tx.phys); 577 578 /* fix up the hardware chain */ 579 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys); 580 581 /* if the channel is not busy */ 582 if (!mv_chan_is_busy(mv_chan)) { 583 u32 current_desc = mv_chan_get_current_desc(mv_chan); 584 /* 585 * and the curren desc is the end of the chain before 586 * the append, then we need to start the channel 587 */ 588 if (current_desc == old_chain_tail->async_tx.phys) 589 new_hw_chain = 1; 590 } 591 } 592 593 if (new_hw_chain) 594 mv_xor_start_new_chain(mv_chan, grp_start); 595 596 submit_done: 597 spin_unlock_bh(&mv_chan->lock); 598 599 return cookie; 600 } 601 602 /* returns the number of allocated descriptors */ 603 static int mv_xor_alloc_chan_resources(struct dma_chan *chan) 604 { 605 char *hw_desc; 606 int idx; 607 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 608 struct mv_xor_desc_slot *slot = NULL; 609 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE; 610 611 /* Allocate descriptor slots */ 612 idx = mv_chan->slots_allocated; 613 while (idx < num_descs_in_pool) { 614 slot = kzalloc(sizeof(*slot), GFP_KERNEL); 615 if (!slot) { 616 printk(KERN_INFO "MV XOR Channel only initialized" 617 " %d descriptor slots", idx); 618 break; 619 } 620 hw_desc = (char *) mv_chan->dma_desc_pool_virt; 621 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE]; 622 623 dma_async_tx_descriptor_init(&slot->async_tx, chan); 624 slot->async_tx.tx_submit = mv_xor_tx_submit; 625 INIT_LIST_HEAD(&slot->chain_node); 626 INIT_LIST_HEAD(&slot->slot_node); 627 INIT_LIST_HEAD(&slot->tx_list); 628 hw_desc = (char *) mv_chan->dma_desc_pool; 629 slot->async_tx.phys = 630 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; 631 slot->idx = idx++; 632 633 spin_lock_bh(&mv_chan->lock); 634 mv_chan->slots_allocated = idx; 635 list_add_tail(&slot->slot_node, &mv_chan->all_slots); 636 spin_unlock_bh(&mv_chan->lock); 637 } 638 639 if (mv_chan->slots_allocated && !mv_chan->last_used) 640 mv_chan->last_used = list_entry(mv_chan->all_slots.next, 641 struct mv_xor_desc_slot, 642 slot_node); 643 644 dev_dbg(mv_chan_to_devp(mv_chan), 645 "allocated %d descriptor slots last_used: %p\n", 646 mv_chan->slots_allocated, mv_chan->last_used); 647 648 return mv_chan->slots_allocated ? : -ENOMEM; 649 } 650 651 static struct dma_async_tx_descriptor * 652 mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 653 size_t len, unsigned long flags) 654 { 655 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 656 struct mv_xor_desc_slot *sw_desc, *grp_start; 657 int slot_cnt; 658 659 dev_dbg(mv_chan_to_devp(mv_chan), 660 "%s dest: %x src %x len: %u flags: %ld\n", 661 __func__, dest, src, len, flags); 662 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 663 return NULL; 664 665 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); 666 667 spin_lock_bh(&mv_chan->lock); 668 slot_cnt = mv_chan_memcpy_slot_count(len); 669 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); 670 if (sw_desc) { 671 sw_desc->type = DMA_MEMCPY; 672 sw_desc->async_tx.flags = flags; 673 grp_start = sw_desc->group_head; 674 mv_desc_init(grp_start, flags); 675 mv_desc_set_byte_count(grp_start, len); 676 mv_desc_set_dest_addr(sw_desc->group_head, dest); 677 mv_desc_set_src_addr(grp_start, 0, src); 678 sw_desc->unmap_src_cnt = 1; 679 sw_desc->unmap_len = len; 680 } 681 spin_unlock_bh(&mv_chan->lock); 682 683 dev_dbg(mv_chan_to_devp(mv_chan), 684 "%s sw_desc %p async_tx %p\n", 685 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0); 686 687 return sw_desc ? &sw_desc->async_tx : NULL; 688 } 689 690 static struct dma_async_tx_descriptor * 691 mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, 692 size_t len, unsigned long flags) 693 { 694 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 695 struct mv_xor_desc_slot *sw_desc, *grp_start; 696 int slot_cnt; 697 698 dev_dbg(mv_chan_to_devp(mv_chan), 699 "%s dest: %x len: %u flags: %ld\n", 700 __func__, dest, len, flags); 701 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 702 return NULL; 703 704 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); 705 706 spin_lock_bh(&mv_chan->lock); 707 slot_cnt = mv_chan_memset_slot_count(len); 708 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); 709 if (sw_desc) { 710 sw_desc->type = DMA_MEMSET; 711 sw_desc->async_tx.flags = flags; 712 grp_start = sw_desc->group_head; 713 mv_desc_init(grp_start, flags); 714 mv_desc_set_byte_count(grp_start, len); 715 mv_desc_set_dest_addr(sw_desc->group_head, dest); 716 mv_desc_set_block_fill_val(grp_start, value); 717 sw_desc->unmap_src_cnt = 1; 718 sw_desc->unmap_len = len; 719 } 720 spin_unlock_bh(&mv_chan->lock); 721 dev_dbg(mv_chan_to_devp(mv_chan), 722 "%s sw_desc %p async_tx %p \n", 723 __func__, sw_desc, &sw_desc->async_tx); 724 return sw_desc ? &sw_desc->async_tx : NULL; 725 } 726 727 static struct dma_async_tx_descriptor * 728 mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, 729 unsigned int src_cnt, size_t len, unsigned long flags) 730 { 731 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 732 struct mv_xor_desc_slot *sw_desc, *grp_start; 733 int slot_cnt; 734 735 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 736 return NULL; 737 738 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); 739 740 dev_dbg(mv_chan_to_devp(mv_chan), 741 "%s src_cnt: %d len: dest %x %u flags: %ld\n", 742 __func__, src_cnt, len, dest, flags); 743 744 spin_lock_bh(&mv_chan->lock); 745 slot_cnt = mv_chan_xor_slot_count(len, src_cnt); 746 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); 747 if (sw_desc) { 748 sw_desc->type = DMA_XOR; 749 sw_desc->async_tx.flags = flags; 750 grp_start = sw_desc->group_head; 751 mv_desc_init(grp_start, flags); 752 /* the byte count field is the same as in memcpy desc*/ 753 mv_desc_set_byte_count(grp_start, len); 754 mv_desc_set_dest_addr(sw_desc->group_head, dest); 755 sw_desc->unmap_src_cnt = src_cnt; 756 sw_desc->unmap_len = len; 757 while (src_cnt--) 758 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]); 759 } 760 spin_unlock_bh(&mv_chan->lock); 761 dev_dbg(mv_chan_to_devp(mv_chan), 762 "%s sw_desc %p async_tx %p \n", 763 __func__, sw_desc, &sw_desc->async_tx); 764 return sw_desc ? &sw_desc->async_tx : NULL; 765 } 766 767 static void mv_xor_free_chan_resources(struct dma_chan *chan) 768 { 769 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 770 struct mv_xor_desc_slot *iter, *_iter; 771 int in_use_descs = 0; 772 773 mv_xor_slot_cleanup(mv_chan); 774 775 spin_lock_bh(&mv_chan->lock); 776 list_for_each_entry_safe(iter, _iter, &mv_chan->chain, 777 chain_node) { 778 in_use_descs++; 779 list_del(&iter->chain_node); 780 } 781 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, 782 completed_node) { 783 in_use_descs++; 784 list_del(&iter->completed_node); 785 } 786 list_for_each_entry_safe_reverse( 787 iter, _iter, &mv_chan->all_slots, slot_node) { 788 list_del(&iter->slot_node); 789 kfree(iter); 790 mv_chan->slots_allocated--; 791 } 792 mv_chan->last_used = NULL; 793 794 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n", 795 __func__, mv_chan->slots_allocated); 796 spin_unlock_bh(&mv_chan->lock); 797 798 if (in_use_descs) 799 dev_err(mv_chan_to_devp(mv_chan), 800 "freeing %d in use descriptors!\n", in_use_descs); 801 } 802 803 /** 804 * mv_xor_status - poll the status of an XOR transaction 805 * @chan: XOR channel handle 806 * @cookie: XOR transaction identifier 807 * @txstate: XOR transactions state holder (or NULL) 808 */ 809 static enum dma_status mv_xor_status(struct dma_chan *chan, 810 dma_cookie_t cookie, 811 struct dma_tx_state *txstate) 812 { 813 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 814 enum dma_status ret; 815 816 ret = dma_cookie_status(chan, cookie, txstate); 817 if (ret == DMA_SUCCESS) { 818 mv_xor_clean_completed_slots(mv_chan); 819 return ret; 820 } 821 mv_xor_slot_cleanup(mv_chan); 822 823 return dma_cookie_status(chan, cookie, txstate); 824 } 825 826 static void mv_dump_xor_regs(struct mv_xor_chan *chan) 827 { 828 u32 val; 829 830 val = __raw_readl(XOR_CONFIG(chan)); 831 dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val); 832 833 val = __raw_readl(XOR_ACTIVATION(chan)); 834 dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val); 835 836 val = __raw_readl(XOR_INTR_CAUSE(chan)); 837 dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val); 838 839 val = __raw_readl(XOR_INTR_MASK(chan)); 840 dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val); 841 842 val = __raw_readl(XOR_ERROR_CAUSE(chan)); 843 dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val); 844 845 val = __raw_readl(XOR_ERROR_ADDR(chan)); 846 dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val); 847 } 848 849 static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, 850 u32 intr_cause) 851 { 852 if (intr_cause & (1 << 4)) { 853 dev_dbg(mv_chan_to_devp(chan), 854 "ignore this error\n"); 855 return; 856 } 857 858 dev_err(mv_chan_to_devp(chan), 859 "error on chan %d. intr cause 0x%08x\n", 860 chan->idx, intr_cause); 861 862 mv_dump_xor_regs(chan); 863 BUG(); 864 } 865 866 static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) 867 { 868 struct mv_xor_chan *chan = data; 869 u32 intr_cause = mv_chan_get_intr_cause(chan); 870 871 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause); 872 873 if (mv_is_err_intr(intr_cause)) 874 mv_xor_err_interrupt_handler(chan, intr_cause); 875 876 tasklet_schedule(&chan->irq_tasklet); 877 878 mv_xor_device_clear_eoc_cause(chan); 879 880 return IRQ_HANDLED; 881 } 882 883 static void mv_xor_issue_pending(struct dma_chan *chan) 884 { 885 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 886 887 if (mv_chan->pending >= MV_XOR_THRESHOLD) { 888 mv_chan->pending = 0; 889 mv_chan_activate(mv_chan); 890 } 891 } 892 893 /* 894 * Perform a transaction to verify the HW works. 895 */ 896 #define MV_XOR_TEST_SIZE 2000 897 898 static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) 899 { 900 int i; 901 void *src, *dest; 902 dma_addr_t src_dma, dest_dma; 903 struct dma_chan *dma_chan; 904 dma_cookie_t cookie; 905 struct dma_async_tx_descriptor *tx; 906 int err = 0; 907 908 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); 909 if (!src) 910 return -ENOMEM; 911 912 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); 913 if (!dest) { 914 kfree(src); 915 return -ENOMEM; 916 } 917 918 /* Fill in src buffer */ 919 for (i = 0; i < MV_XOR_TEST_SIZE; i++) 920 ((u8 *) src)[i] = (u8)i; 921 922 dma_chan = &mv_chan->dmachan; 923 if (mv_xor_alloc_chan_resources(dma_chan) < 1) { 924 err = -ENODEV; 925 goto out; 926 } 927 928 dest_dma = dma_map_single(dma_chan->device->dev, dest, 929 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); 930 931 src_dma = dma_map_single(dma_chan->device->dev, src, 932 MV_XOR_TEST_SIZE, DMA_TO_DEVICE); 933 934 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, 935 MV_XOR_TEST_SIZE, 0); 936 cookie = mv_xor_tx_submit(tx); 937 mv_xor_issue_pending(dma_chan); 938 async_tx_ack(tx); 939 msleep(1); 940 941 if (mv_xor_status(dma_chan, cookie, NULL) != 942 DMA_SUCCESS) { 943 dev_err(dma_chan->device->dev, 944 "Self-test copy timed out, disabling\n"); 945 err = -ENODEV; 946 goto free_resources; 947 } 948 949 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, 950 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); 951 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { 952 dev_err(dma_chan->device->dev, 953 "Self-test copy failed compare, disabling\n"); 954 err = -ENODEV; 955 goto free_resources; 956 } 957 958 free_resources: 959 mv_xor_free_chan_resources(dma_chan); 960 out: 961 kfree(src); 962 kfree(dest); 963 return err; 964 } 965 966 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */ 967 static int 968 mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) 969 { 970 int i, src_idx; 971 struct page *dest; 972 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST]; 973 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; 974 dma_addr_t dest_dma; 975 struct dma_async_tx_descriptor *tx; 976 struct dma_chan *dma_chan; 977 dma_cookie_t cookie; 978 u8 cmp_byte = 0; 979 u32 cmp_word; 980 int err = 0; 981 982 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { 983 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 984 if (!xor_srcs[src_idx]) { 985 while (src_idx--) 986 __free_page(xor_srcs[src_idx]); 987 return -ENOMEM; 988 } 989 } 990 991 dest = alloc_page(GFP_KERNEL); 992 if (!dest) { 993 while (src_idx--) 994 __free_page(xor_srcs[src_idx]); 995 return -ENOMEM; 996 } 997 998 /* Fill in src buffers */ 999 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { 1000 u8 *ptr = page_address(xor_srcs[src_idx]); 1001 for (i = 0; i < PAGE_SIZE; i++) 1002 ptr[i] = (1 << src_idx); 1003 } 1004 1005 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) 1006 cmp_byte ^= (u8) (1 << src_idx); 1007 1008 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | 1009 (cmp_byte << 8) | cmp_byte; 1010 1011 memset(page_address(dest), 0, PAGE_SIZE); 1012 1013 dma_chan = &mv_chan->dmachan; 1014 if (mv_xor_alloc_chan_resources(dma_chan) < 1) { 1015 err = -ENODEV; 1016 goto out; 1017 } 1018 1019 /* test xor */ 1020 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, 1021 DMA_FROM_DEVICE); 1022 1023 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++) 1024 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 1025 0, PAGE_SIZE, DMA_TO_DEVICE); 1026 1027 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 1028 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0); 1029 1030 cookie = mv_xor_tx_submit(tx); 1031 mv_xor_issue_pending(dma_chan); 1032 async_tx_ack(tx); 1033 msleep(8); 1034 1035 if (mv_xor_status(dma_chan, cookie, NULL) != 1036 DMA_SUCCESS) { 1037 dev_err(dma_chan->device->dev, 1038 "Self-test xor timed out, disabling\n"); 1039 err = -ENODEV; 1040 goto free_resources; 1041 } 1042 1043 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, 1044 PAGE_SIZE, DMA_FROM_DEVICE); 1045 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { 1046 u32 *ptr = page_address(dest); 1047 if (ptr[i] != cmp_word) { 1048 dev_err(dma_chan->device->dev, 1049 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n", 1050 i, ptr[i], cmp_word); 1051 err = -ENODEV; 1052 goto free_resources; 1053 } 1054 } 1055 1056 free_resources: 1057 mv_xor_free_chan_resources(dma_chan); 1058 out: 1059 src_idx = MV_XOR_NUM_SRC_TEST; 1060 while (src_idx--) 1061 __free_page(xor_srcs[src_idx]); 1062 __free_page(dest); 1063 return err; 1064 } 1065 1066 /* This driver does not implement any of the optional DMA operations. */ 1067 static int 1068 mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1069 unsigned long arg) 1070 { 1071 return -ENOSYS; 1072 } 1073 1074 static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan) 1075 { 1076 struct dma_chan *chan, *_chan; 1077 struct device *dev = mv_chan->dmadev.dev; 1078 1079 dma_async_device_unregister(&mv_chan->dmadev); 1080 1081 dma_free_coherent(dev, MV_XOR_POOL_SIZE, 1082 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); 1083 1084 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels, 1085 device_node) { 1086 list_del(&chan->device_node); 1087 } 1088 1089 free_irq(mv_chan->irq, mv_chan); 1090 1091 return 0; 1092 } 1093 1094 static struct mv_xor_chan * 1095 mv_xor_channel_add(struct mv_xor_device *xordev, 1096 struct platform_device *pdev, 1097 int idx, dma_cap_mask_t cap_mask, int irq) 1098 { 1099 int ret = 0; 1100 struct mv_xor_chan *mv_chan; 1101 struct dma_device *dma_dev; 1102 1103 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); 1104 if (!mv_chan) { 1105 ret = -ENOMEM; 1106 goto err_free_dma; 1107 } 1108 1109 mv_chan->idx = idx; 1110 mv_chan->irq = irq; 1111 1112 dma_dev = &mv_chan->dmadev; 1113 1114 /* allocate coherent memory for hardware descriptors 1115 * note: writecombine gives slightly better performance, but 1116 * requires that we explicitly flush the writes 1117 */ 1118 mv_chan->dma_desc_pool_virt = 1119 dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE, 1120 &mv_chan->dma_desc_pool, GFP_KERNEL); 1121 if (!mv_chan->dma_desc_pool_virt) 1122 return ERR_PTR(-ENOMEM); 1123 1124 /* discover transaction capabilites from the platform data */ 1125 dma_dev->cap_mask = cap_mask; 1126 1127 INIT_LIST_HEAD(&dma_dev->channels); 1128 1129 /* set base routines */ 1130 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources; 1131 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; 1132 dma_dev->device_tx_status = mv_xor_status; 1133 dma_dev->device_issue_pending = mv_xor_issue_pending; 1134 dma_dev->device_control = mv_xor_control; 1135 dma_dev->dev = &pdev->dev; 1136 1137 /* set prep routines based on capability */ 1138 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) 1139 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; 1140 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) 1141 dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset; 1142 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1143 dma_dev->max_xor = 8; 1144 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; 1145 } 1146 1147 mv_chan->mmr_base = xordev->xor_base; 1148 if (!mv_chan->mmr_base) { 1149 ret = -ENOMEM; 1150 goto err_free_dma; 1151 } 1152 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) 1153 mv_chan); 1154 1155 /* clear errors before enabling interrupts */ 1156 mv_xor_device_clear_err_status(mv_chan); 1157 1158 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler, 1159 0, dev_name(&pdev->dev), mv_chan); 1160 if (ret) 1161 goto err_free_dma; 1162 1163 mv_chan_unmask_interrupts(mv_chan); 1164 1165 mv_set_mode(mv_chan, DMA_MEMCPY); 1166 1167 spin_lock_init(&mv_chan->lock); 1168 INIT_LIST_HEAD(&mv_chan->chain); 1169 INIT_LIST_HEAD(&mv_chan->completed_slots); 1170 INIT_LIST_HEAD(&mv_chan->all_slots); 1171 mv_chan->dmachan.device = dma_dev; 1172 dma_cookie_init(&mv_chan->dmachan); 1173 1174 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels); 1175 1176 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { 1177 ret = mv_xor_memcpy_self_test(mv_chan); 1178 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); 1179 if (ret) 1180 goto err_free_irq; 1181 } 1182 1183 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1184 ret = mv_xor_xor_self_test(mv_chan); 1185 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); 1186 if (ret) 1187 goto err_free_irq; 1188 } 1189 1190 dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s%s)\n", 1191 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", 1192 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", 1193 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", 1194 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); 1195 1196 dma_async_device_register(dma_dev); 1197 return mv_chan; 1198 1199 err_free_irq: 1200 free_irq(mv_chan->irq, mv_chan); 1201 err_free_dma: 1202 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE, 1203 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); 1204 return ERR_PTR(ret); 1205 } 1206 1207 static void 1208 mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, 1209 const struct mbus_dram_target_info *dram) 1210 { 1211 void __iomem *base = xordev->xor_base; 1212 u32 win_enable = 0; 1213 int i; 1214 1215 for (i = 0; i < 8; i++) { 1216 writel(0, base + WINDOW_BASE(i)); 1217 writel(0, base + WINDOW_SIZE(i)); 1218 if (i < 4) 1219 writel(0, base + WINDOW_REMAP_HIGH(i)); 1220 } 1221 1222 for (i = 0; i < dram->num_cs; i++) { 1223 const struct mbus_dram_window *cs = dram->cs + i; 1224 1225 writel((cs->base & 0xffff0000) | 1226 (cs->mbus_attr << 8) | 1227 dram->mbus_dram_target_id, base + WINDOW_BASE(i)); 1228 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); 1229 1230 win_enable |= (1 << i); 1231 win_enable |= 3 << (16 + (2 * i)); 1232 } 1233 1234 writel(win_enable, base + WINDOW_BAR_ENABLE(0)); 1235 writel(win_enable, base + WINDOW_BAR_ENABLE(1)); 1236 writel(0, base + WINDOW_OVERRIDE_CTRL(0)); 1237 writel(0, base + WINDOW_OVERRIDE_CTRL(1)); 1238 } 1239 1240 static int mv_xor_probe(struct platform_device *pdev) 1241 { 1242 const struct mbus_dram_target_info *dram; 1243 struct mv_xor_device *xordev; 1244 struct mv_xor_platform_data *pdata = pdev->dev.platform_data; 1245 struct resource *res; 1246 int i, ret; 1247 1248 dev_notice(&pdev->dev, "Marvell shared XOR driver\n"); 1249 1250 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL); 1251 if (!xordev) 1252 return -ENOMEM; 1253 1254 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1255 if (!res) 1256 return -ENODEV; 1257 1258 xordev->xor_base = devm_ioremap(&pdev->dev, res->start, 1259 resource_size(res)); 1260 if (!xordev->xor_base) 1261 return -EBUSY; 1262 1263 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1264 if (!res) 1265 return -ENODEV; 1266 1267 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start, 1268 resource_size(res)); 1269 if (!xordev->xor_high_base) 1270 return -EBUSY; 1271 1272 platform_set_drvdata(pdev, xordev); 1273 1274 /* 1275 * (Re-)program MBUS remapping windows if we are asked to. 1276 */ 1277 dram = mv_mbus_dram_info(); 1278 if (dram) 1279 mv_xor_conf_mbus_windows(xordev, dram); 1280 1281 /* Not all platforms can gate the clock, so it is not 1282 * an error if the clock does not exists. 1283 */ 1284 xordev->clk = clk_get(&pdev->dev, NULL); 1285 if (!IS_ERR(xordev->clk)) 1286 clk_prepare_enable(xordev->clk); 1287 1288 if (pdev->dev.of_node) { 1289 struct device_node *np; 1290 int i = 0; 1291 1292 for_each_child_of_node(pdev->dev.of_node, np) { 1293 dma_cap_mask_t cap_mask; 1294 int irq; 1295 1296 dma_cap_zero(cap_mask); 1297 if (of_property_read_bool(np, "dmacap,memcpy")) 1298 dma_cap_set(DMA_MEMCPY, cap_mask); 1299 if (of_property_read_bool(np, "dmacap,xor")) 1300 dma_cap_set(DMA_XOR, cap_mask); 1301 if (of_property_read_bool(np, "dmacap,memset")) 1302 dma_cap_set(DMA_MEMSET, cap_mask); 1303 if (of_property_read_bool(np, "dmacap,interrupt")) 1304 dma_cap_set(DMA_INTERRUPT, cap_mask); 1305 1306 irq = irq_of_parse_and_map(np, 0); 1307 if (!irq) { 1308 ret = -ENODEV; 1309 goto err_channel_add; 1310 } 1311 1312 xordev->channels[i] = 1313 mv_xor_channel_add(xordev, pdev, i, 1314 cap_mask, irq); 1315 if (IS_ERR(xordev->channels[i])) { 1316 ret = PTR_ERR(xordev->channels[i]); 1317 xordev->channels[i] = NULL; 1318 irq_dispose_mapping(irq); 1319 goto err_channel_add; 1320 } 1321 1322 i++; 1323 } 1324 } else if (pdata && pdata->channels) { 1325 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { 1326 struct mv_xor_channel_data *cd; 1327 int irq; 1328 1329 cd = &pdata->channels[i]; 1330 if (!cd) { 1331 ret = -ENODEV; 1332 goto err_channel_add; 1333 } 1334 1335 irq = platform_get_irq(pdev, i); 1336 if (irq < 0) { 1337 ret = irq; 1338 goto err_channel_add; 1339 } 1340 1341 xordev->channels[i] = 1342 mv_xor_channel_add(xordev, pdev, i, 1343 cd->cap_mask, irq); 1344 if (IS_ERR(xordev->channels[i])) { 1345 ret = PTR_ERR(xordev->channels[i]); 1346 goto err_channel_add; 1347 } 1348 } 1349 } 1350 1351 return 0; 1352 1353 err_channel_add: 1354 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) 1355 if (xordev->channels[i]) { 1356 mv_xor_channel_remove(xordev->channels[i]); 1357 if (pdev->dev.of_node) 1358 irq_dispose_mapping(xordev->channels[i]->irq); 1359 } 1360 1361 if (!IS_ERR(xordev->clk)) { 1362 clk_disable_unprepare(xordev->clk); 1363 clk_put(xordev->clk); 1364 } 1365 1366 return ret; 1367 } 1368 1369 static int mv_xor_remove(struct platform_device *pdev) 1370 { 1371 struct mv_xor_device *xordev = platform_get_drvdata(pdev); 1372 int i; 1373 1374 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { 1375 if (xordev->channels[i]) 1376 mv_xor_channel_remove(xordev->channels[i]); 1377 } 1378 1379 if (!IS_ERR(xordev->clk)) { 1380 clk_disable_unprepare(xordev->clk); 1381 clk_put(xordev->clk); 1382 } 1383 1384 return 0; 1385 } 1386 1387 #ifdef CONFIG_OF 1388 static struct of_device_id mv_xor_dt_ids[] = { 1389 { .compatible = "marvell,orion-xor", }, 1390 {}, 1391 }; 1392 MODULE_DEVICE_TABLE(of, mv_xor_dt_ids); 1393 #endif 1394 1395 static struct platform_driver mv_xor_driver = { 1396 .probe = mv_xor_probe, 1397 .remove = mv_xor_remove, 1398 .driver = { 1399 .owner = THIS_MODULE, 1400 .name = MV_XOR_NAME, 1401 .of_match_table = of_match_ptr(mv_xor_dt_ids), 1402 }, 1403 }; 1404 1405 1406 static int __init mv_xor_init(void) 1407 { 1408 return platform_driver_register(&mv_xor_driver); 1409 } 1410 module_init(mv_xor_init); 1411 1412 /* it's currently unsafe to unload this module */ 1413 #if 0 1414 static void __exit mv_xor_exit(void) 1415 { 1416 platform_driver_unregister(&mv_xor_driver); 1417 return; 1418 } 1419 1420 module_exit(mv_xor_exit); 1421 #endif 1422 1423 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>"); 1424 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine"); 1425 MODULE_LICENSE("GPL"); 1426