1 /* 2 * offload engine driver for the Marvell XOR engine 3 * Copyright (C) 2007, 2008, Marvell International Ltd. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 */ 18 19 #include <linux/init.h> 20 #include <linux/module.h> 21 #include <linux/slab.h> 22 #include <linux/delay.h> 23 #include <linux/dma-mapping.h> 24 #include <linux/spinlock.h> 25 #include <linux/interrupt.h> 26 #include <linux/platform_device.h> 27 #include <linux/memory.h> 28 #include <linux/clk.h> 29 #include <linux/platform_data/dma-mv_xor.h> 30 31 #include "dmaengine.h" 32 #include "mv_xor.h" 33 34 static void mv_xor_issue_pending(struct dma_chan *chan); 35 36 #define to_mv_xor_chan(chan) \ 37 container_of(chan, struct mv_xor_chan, dmachan) 38 39 #define to_mv_xor_slot(tx) \ 40 container_of(tx, struct mv_xor_desc_slot, async_tx) 41 42 #define mv_chan_to_devp(chan) \ 43 ((chan)->device->common.dev) 44 45 static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) 46 { 47 struct mv_xor_desc *hw_desc = desc->hw_desc; 48 49 hw_desc->status = (1 << 31); 50 hw_desc->phy_next_desc = 0; 51 hw_desc->desc_command = (1 << 31); 52 } 53 54 static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc) 55 { 56 struct mv_xor_desc *hw_desc = desc->hw_desc; 57 return hw_desc->phy_dest_addr; 58 } 59 60 static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc, 61 int src_idx) 62 { 63 struct mv_xor_desc *hw_desc = desc->hw_desc; 64 return hw_desc->phy_src_addr[src_idx]; 65 } 66 67 68 static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, 69 u32 byte_count) 70 { 71 struct mv_xor_desc *hw_desc = desc->hw_desc; 72 hw_desc->byte_count = byte_count; 73 } 74 75 static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc, 76 u32 next_desc_addr) 77 { 78 struct mv_xor_desc *hw_desc = desc->hw_desc; 79 BUG_ON(hw_desc->phy_next_desc); 80 hw_desc->phy_next_desc = next_desc_addr; 81 } 82 83 static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc) 84 { 85 struct mv_xor_desc *hw_desc = desc->hw_desc; 86 hw_desc->phy_next_desc = 0; 87 } 88 89 static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val) 90 { 91 desc->value = val; 92 } 93 94 static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc, 95 dma_addr_t addr) 96 { 97 struct mv_xor_desc *hw_desc = desc->hw_desc; 98 hw_desc->phy_dest_addr = addr; 99 } 100 101 static int mv_chan_memset_slot_count(size_t len) 102 { 103 return 1; 104 } 105 106 #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c) 107 108 static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, 109 int index, dma_addr_t addr) 110 { 111 struct mv_xor_desc *hw_desc = desc->hw_desc; 112 hw_desc->phy_src_addr[index] = addr; 113 if (desc->type == DMA_XOR) 114 hw_desc->desc_command |= (1 << index); 115 } 116 117 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan) 118 { 119 return __raw_readl(XOR_CURR_DESC(chan)); 120 } 121 122 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, 123 u32 next_desc_addr) 124 { 125 __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan)); 126 } 127 128 static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr) 129 { 130 __raw_writel(desc_addr, XOR_DEST_POINTER(chan)); 131 } 132 133 static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size) 134 { 135 __raw_writel(block_size, XOR_BLOCK_SIZE(chan)); 136 } 137 138 static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value) 139 { 140 __raw_writel(value, XOR_INIT_VALUE_LOW(chan)); 141 __raw_writel(value, XOR_INIT_VALUE_HIGH(chan)); 142 } 143 144 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) 145 { 146 u32 val = __raw_readl(XOR_INTR_MASK(chan)); 147 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16); 148 __raw_writel(val, XOR_INTR_MASK(chan)); 149 } 150 151 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) 152 { 153 u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan)); 154 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF; 155 return intr_cause; 156 } 157 158 static int mv_is_err_intr(u32 intr_cause) 159 { 160 if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9))) 161 return 1; 162 163 return 0; 164 } 165 166 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) 167 { 168 u32 val = ~(1 << (chan->idx * 16)); 169 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val); 170 __raw_writel(val, XOR_INTR_CAUSE(chan)); 171 } 172 173 static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan) 174 { 175 u32 val = 0xFFFF0000 >> (chan->idx * 16); 176 __raw_writel(val, XOR_INTR_CAUSE(chan)); 177 } 178 179 static int mv_can_chain(struct mv_xor_desc_slot *desc) 180 { 181 struct mv_xor_desc_slot *chain_old_tail = list_entry( 182 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node); 183 184 if (chain_old_tail->type != desc->type) 185 return 0; 186 if (desc->type == DMA_MEMSET) 187 return 0; 188 189 return 1; 190 } 191 192 static void mv_set_mode(struct mv_xor_chan *chan, 193 enum dma_transaction_type type) 194 { 195 u32 op_mode; 196 u32 config = __raw_readl(XOR_CONFIG(chan)); 197 198 switch (type) { 199 case DMA_XOR: 200 op_mode = XOR_OPERATION_MODE_XOR; 201 break; 202 case DMA_MEMCPY: 203 op_mode = XOR_OPERATION_MODE_MEMCPY; 204 break; 205 case DMA_MEMSET: 206 op_mode = XOR_OPERATION_MODE_MEMSET; 207 break; 208 default: 209 dev_err(mv_chan_to_devp(chan), 210 "error: unsupported operation %d.\n", 211 type); 212 BUG(); 213 return; 214 } 215 216 config &= ~0x7; 217 config |= op_mode; 218 __raw_writel(config, XOR_CONFIG(chan)); 219 chan->current_type = type; 220 } 221 222 static void mv_chan_activate(struct mv_xor_chan *chan) 223 { 224 u32 activation; 225 226 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n"); 227 activation = __raw_readl(XOR_ACTIVATION(chan)); 228 activation |= 0x1; 229 __raw_writel(activation, XOR_ACTIVATION(chan)); 230 } 231 232 static char mv_chan_is_busy(struct mv_xor_chan *chan) 233 { 234 u32 state = __raw_readl(XOR_ACTIVATION(chan)); 235 236 state = (state >> 4) & 0x3; 237 238 return (state == 1) ? 1 : 0; 239 } 240 241 static int mv_chan_xor_slot_count(size_t len, int src_cnt) 242 { 243 return 1; 244 } 245 246 /** 247 * mv_xor_free_slots - flags descriptor slots for reuse 248 * @slot: Slot to free 249 * Caller must hold &mv_chan->lock while calling this function 250 */ 251 static void mv_xor_free_slots(struct mv_xor_chan *mv_chan, 252 struct mv_xor_desc_slot *slot) 253 { 254 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n", 255 __func__, __LINE__, slot); 256 257 slot->slots_per_op = 0; 258 259 } 260 261 /* 262 * mv_xor_start_new_chain - program the engine to operate on new chain headed by 263 * sw_desc 264 * Caller must hold &mv_chan->lock while calling this function 265 */ 266 static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, 267 struct mv_xor_desc_slot *sw_desc) 268 { 269 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n", 270 __func__, __LINE__, sw_desc); 271 if (sw_desc->type != mv_chan->current_type) 272 mv_set_mode(mv_chan, sw_desc->type); 273 274 if (sw_desc->type == DMA_MEMSET) { 275 /* for memset requests we need to program the engine, no 276 * descriptors used. 277 */ 278 struct mv_xor_desc *hw_desc = sw_desc->hw_desc; 279 mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr); 280 mv_chan_set_block_size(mv_chan, sw_desc->unmap_len); 281 mv_chan_set_value(mv_chan, sw_desc->value); 282 } else { 283 /* set the hardware chain */ 284 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); 285 } 286 mv_chan->pending += sw_desc->slot_cnt; 287 mv_xor_issue_pending(&mv_chan->dmachan); 288 } 289 290 static dma_cookie_t 291 mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, 292 struct mv_xor_chan *mv_chan, dma_cookie_t cookie) 293 { 294 BUG_ON(desc->async_tx.cookie < 0); 295 296 if (desc->async_tx.cookie > 0) { 297 cookie = desc->async_tx.cookie; 298 299 /* call the callback (must not sleep or submit new 300 * operations to this channel) 301 */ 302 if (desc->async_tx.callback) 303 desc->async_tx.callback( 304 desc->async_tx.callback_param); 305 306 /* unmap dma addresses 307 * (unmap_single vs unmap_page?) 308 */ 309 if (desc->group_head && desc->unmap_len) { 310 struct mv_xor_desc_slot *unmap = desc->group_head; 311 struct device *dev = mv_chan_to_devp(mv_chan); 312 u32 len = unmap->unmap_len; 313 enum dma_ctrl_flags flags = desc->async_tx.flags; 314 u32 src_cnt; 315 dma_addr_t addr; 316 dma_addr_t dest; 317 318 src_cnt = unmap->unmap_src_cnt; 319 dest = mv_desc_get_dest_addr(unmap); 320 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 321 enum dma_data_direction dir; 322 323 if (src_cnt > 1) /* is xor ? */ 324 dir = DMA_BIDIRECTIONAL; 325 else 326 dir = DMA_FROM_DEVICE; 327 dma_unmap_page(dev, dest, len, dir); 328 } 329 330 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 331 while (src_cnt--) { 332 addr = mv_desc_get_src_addr(unmap, 333 src_cnt); 334 if (addr == dest) 335 continue; 336 dma_unmap_page(dev, addr, len, 337 DMA_TO_DEVICE); 338 } 339 } 340 desc->group_head = NULL; 341 } 342 } 343 344 /* run dependent operations */ 345 dma_run_dependencies(&desc->async_tx); 346 347 return cookie; 348 } 349 350 static int 351 mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan) 352 { 353 struct mv_xor_desc_slot *iter, *_iter; 354 355 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); 356 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, 357 completed_node) { 358 359 if (async_tx_test_ack(&iter->async_tx)) { 360 list_del(&iter->completed_node); 361 mv_xor_free_slots(mv_chan, iter); 362 } 363 } 364 return 0; 365 } 366 367 static int 368 mv_xor_clean_slot(struct mv_xor_desc_slot *desc, 369 struct mv_xor_chan *mv_chan) 370 { 371 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n", 372 __func__, __LINE__, desc, desc->async_tx.flags); 373 list_del(&desc->chain_node); 374 /* the client is allowed to attach dependent operations 375 * until 'ack' is set 376 */ 377 if (!async_tx_test_ack(&desc->async_tx)) { 378 /* move this slot to the completed_slots */ 379 list_add_tail(&desc->completed_node, &mv_chan->completed_slots); 380 return 0; 381 } 382 383 mv_xor_free_slots(mv_chan, desc); 384 return 0; 385 } 386 387 static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) 388 { 389 struct mv_xor_desc_slot *iter, *_iter; 390 dma_cookie_t cookie = 0; 391 int busy = mv_chan_is_busy(mv_chan); 392 u32 current_desc = mv_chan_get_current_desc(mv_chan); 393 int seen_current = 0; 394 395 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); 396 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc); 397 mv_xor_clean_completed_slots(mv_chan); 398 399 /* free completed slots from the chain starting with 400 * the oldest descriptor 401 */ 402 403 list_for_each_entry_safe(iter, _iter, &mv_chan->chain, 404 chain_node) { 405 prefetch(_iter); 406 prefetch(&_iter->async_tx); 407 408 /* do not advance past the current descriptor loaded into the 409 * hardware channel, subsequent descriptors are either in 410 * process or have not been submitted 411 */ 412 if (seen_current) 413 break; 414 415 /* stop the search if we reach the current descriptor and the 416 * channel is busy 417 */ 418 if (iter->async_tx.phys == current_desc) { 419 seen_current = 1; 420 if (busy) 421 break; 422 } 423 424 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie); 425 426 if (mv_xor_clean_slot(iter, mv_chan)) 427 break; 428 } 429 430 if ((busy == 0) && !list_empty(&mv_chan->chain)) { 431 struct mv_xor_desc_slot *chain_head; 432 chain_head = list_entry(mv_chan->chain.next, 433 struct mv_xor_desc_slot, 434 chain_node); 435 436 mv_xor_start_new_chain(mv_chan, chain_head); 437 } 438 439 if (cookie > 0) 440 mv_chan->dmachan.completed_cookie = cookie; 441 } 442 443 static void 444 mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) 445 { 446 spin_lock_bh(&mv_chan->lock); 447 __mv_xor_slot_cleanup(mv_chan); 448 spin_unlock_bh(&mv_chan->lock); 449 } 450 451 static void mv_xor_tasklet(unsigned long data) 452 { 453 struct mv_xor_chan *chan = (struct mv_xor_chan *) data; 454 mv_xor_slot_cleanup(chan); 455 } 456 457 static struct mv_xor_desc_slot * 458 mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots, 459 int slots_per_op) 460 { 461 struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL; 462 LIST_HEAD(chain); 463 int slots_found, retry = 0; 464 465 /* start search from the last allocated descrtiptor 466 * if a contiguous allocation can not be found start searching 467 * from the beginning of the list 468 */ 469 retry: 470 slots_found = 0; 471 if (retry == 0) 472 iter = mv_chan->last_used; 473 else 474 iter = list_entry(&mv_chan->all_slots, 475 struct mv_xor_desc_slot, 476 slot_node); 477 478 list_for_each_entry_safe_continue( 479 iter, _iter, &mv_chan->all_slots, slot_node) { 480 prefetch(_iter); 481 prefetch(&_iter->async_tx); 482 if (iter->slots_per_op) { 483 /* give up after finding the first busy slot 484 * on the second pass through the list 485 */ 486 if (retry) 487 break; 488 489 slots_found = 0; 490 continue; 491 } 492 493 /* start the allocation if the slot is correctly aligned */ 494 if (!slots_found++) 495 alloc_start = iter; 496 497 if (slots_found == num_slots) { 498 struct mv_xor_desc_slot *alloc_tail = NULL; 499 struct mv_xor_desc_slot *last_used = NULL; 500 iter = alloc_start; 501 while (num_slots) { 502 int i; 503 504 /* pre-ack all but the last descriptor */ 505 async_tx_ack(&iter->async_tx); 506 507 list_add_tail(&iter->chain_node, &chain); 508 alloc_tail = iter; 509 iter->async_tx.cookie = 0; 510 iter->slot_cnt = num_slots; 511 iter->xor_check_result = NULL; 512 for (i = 0; i < slots_per_op; i++) { 513 iter->slots_per_op = slots_per_op - i; 514 last_used = iter; 515 iter = list_entry(iter->slot_node.next, 516 struct mv_xor_desc_slot, 517 slot_node); 518 } 519 num_slots -= slots_per_op; 520 } 521 alloc_tail->group_head = alloc_start; 522 alloc_tail->async_tx.cookie = -EBUSY; 523 list_splice(&chain, &alloc_tail->tx_list); 524 mv_chan->last_used = last_used; 525 mv_desc_clear_next_desc(alloc_start); 526 mv_desc_clear_next_desc(alloc_tail); 527 return alloc_tail; 528 } 529 } 530 if (!retry++) 531 goto retry; 532 533 /* try to free some slots if the allocation fails */ 534 tasklet_schedule(&mv_chan->irq_tasklet); 535 536 return NULL; 537 } 538 539 /************************ DMA engine API functions ****************************/ 540 static dma_cookie_t 541 mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) 542 { 543 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx); 544 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan); 545 struct mv_xor_desc_slot *grp_start, *old_chain_tail; 546 dma_cookie_t cookie; 547 int new_hw_chain = 1; 548 549 dev_dbg(mv_chan_to_devp(mv_chan), 550 "%s sw_desc %p: async_tx %p\n", 551 __func__, sw_desc, &sw_desc->async_tx); 552 553 grp_start = sw_desc->group_head; 554 555 spin_lock_bh(&mv_chan->lock); 556 cookie = dma_cookie_assign(tx); 557 558 if (list_empty(&mv_chan->chain)) 559 list_splice_init(&sw_desc->tx_list, &mv_chan->chain); 560 else { 561 new_hw_chain = 0; 562 563 old_chain_tail = list_entry(mv_chan->chain.prev, 564 struct mv_xor_desc_slot, 565 chain_node); 566 list_splice_init(&grp_start->tx_list, 567 &old_chain_tail->chain_node); 568 569 if (!mv_can_chain(grp_start)) 570 goto submit_done; 571 572 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n", 573 old_chain_tail->async_tx.phys); 574 575 /* fix up the hardware chain */ 576 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys); 577 578 /* if the channel is not busy */ 579 if (!mv_chan_is_busy(mv_chan)) { 580 u32 current_desc = mv_chan_get_current_desc(mv_chan); 581 /* 582 * and the curren desc is the end of the chain before 583 * the append, then we need to start the channel 584 */ 585 if (current_desc == old_chain_tail->async_tx.phys) 586 new_hw_chain = 1; 587 } 588 } 589 590 if (new_hw_chain) 591 mv_xor_start_new_chain(mv_chan, grp_start); 592 593 submit_done: 594 spin_unlock_bh(&mv_chan->lock); 595 596 return cookie; 597 } 598 599 /* returns the number of allocated descriptors */ 600 static int mv_xor_alloc_chan_resources(struct dma_chan *chan) 601 { 602 char *hw_desc; 603 int idx; 604 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 605 struct mv_xor_desc_slot *slot = NULL; 606 int num_descs_in_pool = mv_chan->device->pool_size/MV_XOR_SLOT_SIZE; 607 608 /* Allocate descriptor slots */ 609 idx = mv_chan->slots_allocated; 610 while (idx < num_descs_in_pool) { 611 slot = kzalloc(sizeof(*slot), GFP_KERNEL); 612 if (!slot) { 613 printk(KERN_INFO "MV XOR Channel only initialized" 614 " %d descriptor slots", idx); 615 break; 616 } 617 hw_desc = (char *) mv_chan->device->dma_desc_pool_virt; 618 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE]; 619 620 dma_async_tx_descriptor_init(&slot->async_tx, chan); 621 slot->async_tx.tx_submit = mv_xor_tx_submit; 622 INIT_LIST_HEAD(&slot->chain_node); 623 INIT_LIST_HEAD(&slot->slot_node); 624 INIT_LIST_HEAD(&slot->tx_list); 625 hw_desc = (char *) mv_chan->device->dma_desc_pool; 626 slot->async_tx.phys = 627 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; 628 slot->idx = idx++; 629 630 spin_lock_bh(&mv_chan->lock); 631 mv_chan->slots_allocated = idx; 632 list_add_tail(&slot->slot_node, &mv_chan->all_slots); 633 spin_unlock_bh(&mv_chan->lock); 634 } 635 636 if (mv_chan->slots_allocated && !mv_chan->last_used) 637 mv_chan->last_used = list_entry(mv_chan->all_slots.next, 638 struct mv_xor_desc_slot, 639 slot_node); 640 641 dev_dbg(mv_chan_to_devp(mv_chan), 642 "allocated %d descriptor slots last_used: %p\n", 643 mv_chan->slots_allocated, mv_chan->last_used); 644 645 return mv_chan->slots_allocated ? : -ENOMEM; 646 } 647 648 static struct dma_async_tx_descriptor * 649 mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 650 size_t len, unsigned long flags) 651 { 652 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 653 struct mv_xor_desc_slot *sw_desc, *grp_start; 654 int slot_cnt; 655 656 dev_dbg(mv_chan_to_devp(mv_chan), 657 "%s dest: %x src %x len: %u flags: %ld\n", 658 __func__, dest, src, len, flags); 659 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 660 return NULL; 661 662 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); 663 664 spin_lock_bh(&mv_chan->lock); 665 slot_cnt = mv_chan_memcpy_slot_count(len); 666 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); 667 if (sw_desc) { 668 sw_desc->type = DMA_MEMCPY; 669 sw_desc->async_tx.flags = flags; 670 grp_start = sw_desc->group_head; 671 mv_desc_init(grp_start, flags); 672 mv_desc_set_byte_count(grp_start, len); 673 mv_desc_set_dest_addr(sw_desc->group_head, dest); 674 mv_desc_set_src_addr(grp_start, 0, src); 675 sw_desc->unmap_src_cnt = 1; 676 sw_desc->unmap_len = len; 677 } 678 spin_unlock_bh(&mv_chan->lock); 679 680 dev_dbg(mv_chan_to_devp(mv_chan), 681 "%s sw_desc %p async_tx %p\n", 682 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0); 683 684 return sw_desc ? &sw_desc->async_tx : NULL; 685 } 686 687 static struct dma_async_tx_descriptor * 688 mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, 689 size_t len, unsigned long flags) 690 { 691 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 692 struct mv_xor_desc_slot *sw_desc, *grp_start; 693 int slot_cnt; 694 695 dev_dbg(mv_chan_to_devp(mv_chan), 696 "%s dest: %x len: %u flags: %ld\n", 697 __func__, dest, len, flags); 698 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 699 return NULL; 700 701 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); 702 703 spin_lock_bh(&mv_chan->lock); 704 slot_cnt = mv_chan_memset_slot_count(len); 705 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); 706 if (sw_desc) { 707 sw_desc->type = DMA_MEMSET; 708 sw_desc->async_tx.flags = flags; 709 grp_start = sw_desc->group_head; 710 mv_desc_init(grp_start, flags); 711 mv_desc_set_byte_count(grp_start, len); 712 mv_desc_set_dest_addr(sw_desc->group_head, dest); 713 mv_desc_set_block_fill_val(grp_start, value); 714 sw_desc->unmap_src_cnt = 1; 715 sw_desc->unmap_len = len; 716 } 717 spin_unlock_bh(&mv_chan->lock); 718 dev_dbg(mv_chan_to_devp(mv_chan), 719 "%s sw_desc %p async_tx %p \n", 720 __func__, sw_desc, &sw_desc->async_tx); 721 return sw_desc ? &sw_desc->async_tx : NULL; 722 } 723 724 static struct dma_async_tx_descriptor * 725 mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, 726 unsigned int src_cnt, size_t len, unsigned long flags) 727 { 728 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 729 struct mv_xor_desc_slot *sw_desc, *grp_start; 730 int slot_cnt; 731 732 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 733 return NULL; 734 735 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); 736 737 dev_dbg(mv_chan_to_devp(mv_chan), 738 "%s src_cnt: %d len: dest %x %u flags: %ld\n", 739 __func__, src_cnt, len, dest, flags); 740 741 spin_lock_bh(&mv_chan->lock); 742 slot_cnt = mv_chan_xor_slot_count(len, src_cnt); 743 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); 744 if (sw_desc) { 745 sw_desc->type = DMA_XOR; 746 sw_desc->async_tx.flags = flags; 747 grp_start = sw_desc->group_head; 748 mv_desc_init(grp_start, flags); 749 /* the byte count field is the same as in memcpy desc*/ 750 mv_desc_set_byte_count(grp_start, len); 751 mv_desc_set_dest_addr(sw_desc->group_head, dest); 752 sw_desc->unmap_src_cnt = src_cnt; 753 sw_desc->unmap_len = len; 754 while (src_cnt--) 755 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]); 756 } 757 spin_unlock_bh(&mv_chan->lock); 758 dev_dbg(mv_chan_to_devp(mv_chan), 759 "%s sw_desc %p async_tx %p \n", 760 __func__, sw_desc, &sw_desc->async_tx); 761 return sw_desc ? &sw_desc->async_tx : NULL; 762 } 763 764 static void mv_xor_free_chan_resources(struct dma_chan *chan) 765 { 766 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 767 struct mv_xor_desc_slot *iter, *_iter; 768 int in_use_descs = 0; 769 770 mv_xor_slot_cleanup(mv_chan); 771 772 spin_lock_bh(&mv_chan->lock); 773 list_for_each_entry_safe(iter, _iter, &mv_chan->chain, 774 chain_node) { 775 in_use_descs++; 776 list_del(&iter->chain_node); 777 } 778 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, 779 completed_node) { 780 in_use_descs++; 781 list_del(&iter->completed_node); 782 } 783 list_for_each_entry_safe_reverse( 784 iter, _iter, &mv_chan->all_slots, slot_node) { 785 list_del(&iter->slot_node); 786 kfree(iter); 787 mv_chan->slots_allocated--; 788 } 789 mv_chan->last_used = NULL; 790 791 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n", 792 __func__, mv_chan->slots_allocated); 793 spin_unlock_bh(&mv_chan->lock); 794 795 if (in_use_descs) 796 dev_err(mv_chan_to_devp(mv_chan), 797 "freeing %d in use descriptors!\n", in_use_descs); 798 } 799 800 /** 801 * mv_xor_status - poll the status of an XOR transaction 802 * @chan: XOR channel handle 803 * @cookie: XOR transaction identifier 804 * @txstate: XOR transactions state holder (or NULL) 805 */ 806 static enum dma_status mv_xor_status(struct dma_chan *chan, 807 dma_cookie_t cookie, 808 struct dma_tx_state *txstate) 809 { 810 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 811 enum dma_status ret; 812 813 ret = dma_cookie_status(chan, cookie, txstate); 814 if (ret == DMA_SUCCESS) { 815 mv_xor_clean_completed_slots(mv_chan); 816 return ret; 817 } 818 mv_xor_slot_cleanup(mv_chan); 819 820 return dma_cookie_status(chan, cookie, txstate); 821 } 822 823 static void mv_dump_xor_regs(struct mv_xor_chan *chan) 824 { 825 u32 val; 826 827 val = __raw_readl(XOR_CONFIG(chan)); 828 dev_err(mv_chan_to_devp(chan), 829 "config 0x%08x.\n", val); 830 831 val = __raw_readl(XOR_ACTIVATION(chan)); 832 dev_err(mv_chan_to_devp(chan), 833 "activation 0x%08x.\n", val); 834 835 val = __raw_readl(XOR_INTR_CAUSE(chan)); 836 dev_err(mv_chan_to_devp(chan), 837 "intr cause 0x%08x.\n", val); 838 839 val = __raw_readl(XOR_INTR_MASK(chan)); 840 dev_err(mv_chan_to_devp(chan), 841 "intr mask 0x%08x.\n", val); 842 843 val = __raw_readl(XOR_ERROR_CAUSE(chan)); 844 dev_err(mv_chan_to_devp(chan), 845 "error cause 0x%08x.\n", val); 846 847 val = __raw_readl(XOR_ERROR_ADDR(chan)); 848 dev_err(mv_chan_to_devp(chan), 849 "error addr 0x%08x.\n", val); 850 } 851 852 static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, 853 u32 intr_cause) 854 { 855 if (intr_cause & (1 << 4)) { 856 dev_dbg(mv_chan_to_devp(chan), 857 "ignore this error\n"); 858 return; 859 } 860 861 dev_err(mv_chan_to_devp(chan), 862 "error on chan %d. intr cause 0x%08x.\n", 863 chan->idx, intr_cause); 864 865 mv_dump_xor_regs(chan); 866 BUG(); 867 } 868 869 static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) 870 { 871 struct mv_xor_chan *chan = data; 872 u32 intr_cause = mv_chan_get_intr_cause(chan); 873 874 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause); 875 876 if (mv_is_err_intr(intr_cause)) 877 mv_xor_err_interrupt_handler(chan, intr_cause); 878 879 tasklet_schedule(&chan->irq_tasklet); 880 881 mv_xor_device_clear_eoc_cause(chan); 882 883 return IRQ_HANDLED; 884 } 885 886 static void mv_xor_issue_pending(struct dma_chan *chan) 887 { 888 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 889 890 if (mv_chan->pending >= MV_XOR_THRESHOLD) { 891 mv_chan->pending = 0; 892 mv_chan_activate(mv_chan); 893 } 894 } 895 896 /* 897 * Perform a transaction to verify the HW works. 898 */ 899 #define MV_XOR_TEST_SIZE 2000 900 901 static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device) 902 { 903 int i; 904 void *src, *dest; 905 dma_addr_t src_dma, dest_dma; 906 struct dma_chan *dma_chan; 907 dma_cookie_t cookie; 908 struct dma_async_tx_descriptor *tx; 909 int err = 0; 910 911 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); 912 if (!src) 913 return -ENOMEM; 914 915 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); 916 if (!dest) { 917 kfree(src); 918 return -ENOMEM; 919 } 920 921 /* Fill in src buffer */ 922 for (i = 0; i < MV_XOR_TEST_SIZE; i++) 923 ((u8 *) src)[i] = (u8)i; 924 925 /* Start copy, using first DMA channel */ 926 dma_chan = container_of(device->common.channels.next, 927 struct dma_chan, 928 device_node); 929 if (mv_xor_alloc_chan_resources(dma_chan) < 1) { 930 err = -ENODEV; 931 goto out; 932 } 933 934 dest_dma = dma_map_single(dma_chan->device->dev, dest, 935 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); 936 937 src_dma = dma_map_single(dma_chan->device->dev, src, 938 MV_XOR_TEST_SIZE, DMA_TO_DEVICE); 939 940 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, 941 MV_XOR_TEST_SIZE, 0); 942 cookie = mv_xor_tx_submit(tx); 943 mv_xor_issue_pending(dma_chan); 944 async_tx_ack(tx); 945 msleep(1); 946 947 if (mv_xor_status(dma_chan, cookie, NULL) != 948 DMA_SUCCESS) { 949 dev_err(dma_chan->device->dev, 950 "Self-test copy timed out, disabling\n"); 951 err = -ENODEV; 952 goto free_resources; 953 } 954 955 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, 956 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); 957 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { 958 dev_err(dma_chan->device->dev, 959 "Self-test copy failed compare, disabling\n"); 960 err = -ENODEV; 961 goto free_resources; 962 } 963 964 free_resources: 965 mv_xor_free_chan_resources(dma_chan); 966 out: 967 kfree(src); 968 kfree(dest); 969 return err; 970 } 971 972 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */ 973 static int __devinit 974 mv_xor_xor_self_test(struct mv_xor_device *device) 975 { 976 int i, src_idx; 977 struct page *dest; 978 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST]; 979 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; 980 dma_addr_t dest_dma; 981 struct dma_async_tx_descriptor *tx; 982 struct dma_chan *dma_chan; 983 dma_cookie_t cookie; 984 u8 cmp_byte = 0; 985 u32 cmp_word; 986 int err = 0; 987 988 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { 989 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 990 if (!xor_srcs[src_idx]) { 991 while (src_idx--) 992 __free_page(xor_srcs[src_idx]); 993 return -ENOMEM; 994 } 995 } 996 997 dest = alloc_page(GFP_KERNEL); 998 if (!dest) { 999 while (src_idx--) 1000 __free_page(xor_srcs[src_idx]); 1001 return -ENOMEM; 1002 } 1003 1004 /* Fill in src buffers */ 1005 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { 1006 u8 *ptr = page_address(xor_srcs[src_idx]); 1007 for (i = 0; i < PAGE_SIZE; i++) 1008 ptr[i] = (1 << src_idx); 1009 } 1010 1011 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) 1012 cmp_byte ^= (u8) (1 << src_idx); 1013 1014 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | 1015 (cmp_byte << 8) | cmp_byte; 1016 1017 memset(page_address(dest), 0, PAGE_SIZE); 1018 1019 dma_chan = container_of(device->common.channels.next, 1020 struct dma_chan, 1021 device_node); 1022 if (mv_xor_alloc_chan_resources(dma_chan) < 1) { 1023 err = -ENODEV; 1024 goto out; 1025 } 1026 1027 /* test xor */ 1028 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, 1029 DMA_FROM_DEVICE); 1030 1031 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++) 1032 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 1033 0, PAGE_SIZE, DMA_TO_DEVICE); 1034 1035 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 1036 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0); 1037 1038 cookie = mv_xor_tx_submit(tx); 1039 mv_xor_issue_pending(dma_chan); 1040 async_tx_ack(tx); 1041 msleep(8); 1042 1043 if (mv_xor_status(dma_chan, cookie, NULL) != 1044 DMA_SUCCESS) { 1045 dev_err(dma_chan->device->dev, 1046 "Self-test xor timed out, disabling\n"); 1047 err = -ENODEV; 1048 goto free_resources; 1049 } 1050 1051 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, 1052 PAGE_SIZE, DMA_FROM_DEVICE); 1053 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { 1054 u32 *ptr = page_address(dest); 1055 if (ptr[i] != cmp_word) { 1056 dev_err(dma_chan->device->dev, 1057 "Self-test xor failed compare, disabling." 1058 " index %d, data %x, expected %x\n", i, 1059 ptr[i], cmp_word); 1060 err = -ENODEV; 1061 goto free_resources; 1062 } 1063 } 1064 1065 free_resources: 1066 mv_xor_free_chan_resources(dma_chan); 1067 out: 1068 src_idx = MV_XOR_NUM_SRC_TEST; 1069 while (src_idx--) 1070 __free_page(xor_srcs[src_idx]); 1071 __free_page(dest); 1072 return err; 1073 } 1074 1075 static int mv_xor_channel_remove(struct mv_xor_device *device) 1076 { 1077 struct dma_chan *chan, *_chan; 1078 struct mv_xor_chan *mv_chan; 1079 struct device *dev = device->common.dev; 1080 1081 dma_async_device_unregister(&device->common); 1082 1083 dma_free_coherent(dev, device->pool_size, 1084 device->dma_desc_pool_virt, device->dma_desc_pool); 1085 1086 list_for_each_entry_safe(chan, _chan, &device->common.channels, 1087 device_node) { 1088 mv_chan = to_mv_xor_chan(chan); 1089 list_del(&chan->device_node); 1090 } 1091 1092 return 0; 1093 } 1094 1095 static struct mv_xor_device * 1096 mv_xor_channel_add(struct mv_xor_private *msp, 1097 struct platform_device *pdev, 1098 int hw_id, dma_cap_mask_t cap_mask, 1099 size_t pool_size, int irq) 1100 { 1101 int ret = 0; 1102 struct mv_xor_device *adev; 1103 struct mv_xor_chan *mv_chan; 1104 struct dma_device *dma_dev; 1105 1106 adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL); 1107 if (!adev) 1108 return ERR_PTR(-ENOMEM); 1109 1110 dma_dev = &adev->common; 1111 1112 /* allocate coherent memory for hardware descriptors 1113 * note: writecombine gives slightly better performance, but 1114 * requires that we explicitly flush the writes 1115 */ 1116 adev->pool_size = pool_size; 1117 adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev, 1118 adev->pool_size, 1119 &adev->dma_desc_pool, 1120 GFP_KERNEL); 1121 if (!adev->dma_desc_pool_virt) 1122 return ERR_PTR(-ENOMEM); 1123 1124 /* discover transaction capabilites from the platform data */ 1125 dma_dev->cap_mask = cap_mask; 1126 adev->shared = msp; 1127 1128 INIT_LIST_HEAD(&dma_dev->channels); 1129 1130 /* set base routines */ 1131 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources; 1132 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; 1133 dma_dev->device_tx_status = mv_xor_status; 1134 dma_dev->device_issue_pending = mv_xor_issue_pending; 1135 dma_dev->dev = &pdev->dev; 1136 1137 /* set prep routines based on capability */ 1138 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) 1139 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; 1140 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) 1141 dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset; 1142 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1143 dma_dev->max_xor = 8; 1144 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; 1145 } 1146 1147 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); 1148 if (!mv_chan) { 1149 ret = -ENOMEM; 1150 goto err_free_dma; 1151 } 1152 mv_chan->device = adev; 1153 mv_chan->idx = hw_id; 1154 mv_chan->mmr_base = adev->shared->xor_base; 1155 1156 if (!mv_chan->mmr_base) { 1157 ret = -ENOMEM; 1158 goto err_free_dma; 1159 } 1160 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) 1161 mv_chan); 1162 1163 /* clear errors before enabling interrupts */ 1164 mv_xor_device_clear_err_status(mv_chan); 1165 1166 ret = devm_request_irq(&pdev->dev, irq, 1167 mv_xor_interrupt_handler, 1168 0, dev_name(&pdev->dev), mv_chan); 1169 if (ret) 1170 goto err_free_dma; 1171 1172 mv_chan_unmask_interrupts(mv_chan); 1173 1174 mv_set_mode(mv_chan, DMA_MEMCPY); 1175 1176 spin_lock_init(&mv_chan->lock); 1177 INIT_LIST_HEAD(&mv_chan->chain); 1178 INIT_LIST_HEAD(&mv_chan->completed_slots); 1179 INIT_LIST_HEAD(&mv_chan->all_slots); 1180 mv_chan->dmachan.device = dma_dev; 1181 dma_cookie_init(&mv_chan->dmachan); 1182 1183 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels); 1184 1185 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { 1186 ret = mv_xor_memcpy_self_test(adev); 1187 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); 1188 if (ret) 1189 goto err_free_dma; 1190 } 1191 1192 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1193 ret = mv_xor_xor_self_test(adev); 1194 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); 1195 if (ret) 1196 goto err_free_dma; 1197 } 1198 1199 dev_info(&pdev->dev, "Marvell XOR: " 1200 "( %s%s%s%s)\n", 1201 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", 1202 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", 1203 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", 1204 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); 1205 1206 dma_async_device_register(dma_dev); 1207 return adev; 1208 1209 err_free_dma: 1210 dma_free_coherent(&pdev->dev, pool_size, 1211 adev->dma_desc_pool_virt, adev->dma_desc_pool); 1212 return ERR_PTR(ret); 1213 } 1214 1215 static void 1216 mv_xor_conf_mbus_windows(struct mv_xor_private *msp, 1217 const struct mbus_dram_target_info *dram) 1218 { 1219 void __iomem *base = msp->xor_base; 1220 u32 win_enable = 0; 1221 int i; 1222 1223 for (i = 0; i < 8; i++) { 1224 writel(0, base + WINDOW_BASE(i)); 1225 writel(0, base + WINDOW_SIZE(i)); 1226 if (i < 4) 1227 writel(0, base + WINDOW_REMAP_HIGH(i)); 1228 } 1229 1230 for (i = 0; i < dram->num_cs; i++) { 1231 const struct mbus_dram_window *cs = dram->cs + i; 1232 1233 writel((cs->base & 0xffff0000) | 1234 (cs->mbus_attr << 8) | 1235 dram->mbus_dram_target_id, base + WINDOW_BASE(i)); 1236 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); 1237 1238 win_enable |= (1 << i); 1239 win_enable |= 3 << (16 + (2 * i)); 1240 } 1241 1242 writel(win_enable, base + WINDOW_BAR_ENABLE(0)); 1243 writel(win_enable, base + WINDOW_BAR_ENABLE(1)); 1244 } 1245 1246 static int mv_xor_probe(struct platform_device *pdev) 1247 { 1248 const struct mbus_dram_target_info *dram; 1249 struct mv_xor_private *msp; 1250 struct mv_xor_platform_data *pdata = pdev->dev.platform_data; 1251 struct resource *res; 1252 int i, ret; 1253 1254 dev_notice(&pdev->dev, "Marvell XOR driver\n"); 1255 1256 msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL); 1257 if (!msp) 1258 return -ENOMEM; 1259 1260 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1261 if (!res) 1262 return -ENODEV; 1263 1264 msp->xor_base = devm_ioremap(&pdev->dev, res->start, 1265 resource_size(res)); 1266 if (!msp->xor_base) 1267 return -EBUSY; 1268 1269 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1270 if (!res) 1271 return -ENODEV; 1272 1273 msp->xor_high_base = devm_ioremap(&pdev->dev, res->start, 1274 resource_size(res)); 1275 if (!msp->xor_high_base) 1276 return -EBUSY; 1277 1278 platform_set_drvdata(pdev, msp); 1279 1280 /* 1281 * (Re-)program MBUS remapping windows if we are asked to. 1282 */ 1283 dram = mv_mbus_dram_info(); 1284 if (dram) 1285 mv_xor_conf_mbus_windows(msp, dram); 1286 1287 /* Not all platforms can gate the clock, so it is not 1288 * an error if the clock does not exists. 1289 */ 1290 msp->clk = clk_get(&pdev->dev, NULL); 1291 if (!IS_ERR(msp->clk)) 1292 clk_prepare_enable(msp->clk); 1293 1294 if (pdata && pdata->channels) { 1295 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { 1296 struct mv_xor_channel_data *cd; 1297 int irq; 1298 1299 cd = &pdata->channels[i]; 1300 if (!cd) { 1301 ret = -ENODEV; 1302 goto err_channel_add; 1303 } 1304 1305 irq = platform_get_irq(pdev, i); 1306 if (irq < 0) { 1307 ret = irq; 1308 goto err_channel_add; 1309 } 1310 1311 msp->channels[i] = 1312 mv_xor_channel_add(msp, pdev, cd->hw_id, 1313 cd->cap_mask, 1314 cd->pool_size, irq); 1315 if (IS_ERR(msp->channels[i])) { 1316 ret = PTR_ERR(msp->channels[i]); 1317 goto err_channel_add; 1318 } 1319 } 1320 } 1321 1322 return 0; 1323 1324 err_channel_add: 1325 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) 1326 if (msp->channels[i]) 1327 mv_xor_channel_remove(msp->channels[i]); 1328 1329 clk_disable_unprepare(msp->clk); 1330 clk_put(msp->clk); 1331 return ret; 1332 } 1333 1334 static int mv_xor_remove(struct platform_device *pdev) 1335 { 1336 struct mv_xor_private *msp = platform_get_drvdata(pdev); 1337 int i; 1338 1339 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { 1340 if (msp->channels[i]) 1341 mv_xor_channel_remove(msp->channels[i]); 1342 } 1343 1344 if (!IS_ERR(msp->clk)) { 1345 clk_disable_unprepare(msp->clk); 1346 clk_put(msp->clk); 1347 } 1348 1349 return 0; 1350 } 1351 1352 static struct platform_driver mv_xor_driver = { 1353 .probe = mv_xor_probe, 1354 .remove = mv_xor_remove, 1355 .driver = { 1356 .owner = THIS_MODULE, 1357 .name = MV_XOR_NAME, 1358 }, 1359 }; 1360 1361 1362 static int __init mv_xor_init(void) 1363 { 1364 return platform_driver_register(&mv_xor_driver); 1365 } 1366 module_init(mv_xor_init); 1367 1368 /* it's currently unsafe to unload this module */ 1369 #if 0 1370 static void __exit mv_xor_exit(void) 1371 { 1372 platform_driver_unregister(&mv_xor_driver); 1373 return; 1374 } 1375 1376 module_exit(mv_xor_exit); 1377 #endif 1378 1379 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>"); 1380 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine"); 1381 MODULE_LICENSE("GPL"); 1382