1 /* 2 * offload engine driver for the Marvell XOR engine 3 * Copyright (C) 2007, 2008, Marvell International Ltd. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 */ 18 19 #include <linux/init.h> 20 #include <linux/module.h> 21 #include <linux/slab.h> 22 #include <linux/delay.h> 23 #include <linux/dma-mapping.h> 24 #include <linux/spinlock.h> 25 #include <linux/interrupt.h> 26 #include <linux/platform_device.h> 27 #include <linux/memory.h> 28 #include <linux/clk.h> 29 #include <linux/platform_data/dma-mv_xor.h> 30 31 #include "dmaengine.h" 32 #include "mv_xor.h" 33 34 static void mv_xor_issue_pending(struct dma_chan *chan); 35 36 #define to_mv_xor_chan(chan) \ 37 container_of(chan, struct mv_xor_chan, common) 38 39 #define to_mv_xor_device(dev) \ 40 container_of(dev, struct mv_xor_device, common) 41 42 #define to_mv_xor_slot(tx) \ 43 container_of(tx, struct mv_xor_desc_slot, async_tx) 44 45 static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) 46 { 47 struct mv_xor_desc *hw_desc = desc->hw_desc; 48 49 hw_desc->status = (1 << 31); 50 hw_desc->phy_next_desc = 0; 51 hw_desc->desc_command = (1 << 31); 52 } 53 54 static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc) 55 { 56 struct mv_xor_desc *hw_desc = desc->hw_desc; 57 return hw_desc->phy_dest_addr; 58 } 59 60 static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc, 61 int src_idx) 62 { 63 struct mv_xor_desc *hw_desc = desc->hw_desc; 64 return hw_desc->phy_src_addr[src_idx]; 65 } 66 67 68 static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, 69 u32 byte_count) 70 { 71 struct mv_xor_desc *hw_desc = desc->hw_desc; 72 hw_desc->byte_count = byte_count; 73 } 74 75 static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc, 76 u32 next_desc_addr) 77 { 78 struct mv_xor_desc *hw_desc = desc->hw_desc; 79 BUG_ON(hw_desc->phy_next_desc); 80 hw_desc->phy_next_desc = next_desc_addr; 81 } 82 83 static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc) 84 { 85 struct mv_xor_desc *hw_desc = desc->hw_desc; 86 hw_desc->phy_next_desc = 0; 87 } 88 89 static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val) 90 { 91 desc->value = val; 92 } 93 94 static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc, 95 dma_addr_t addr) 96 { 97 struct mv_xor_desc *hw_desc = desc->hw_desc; 98 hw_desc->phy_dest_addr = addr; 99 } 100 101 static int mv_chan_memset_slot_count(size_t len) 102 { 103 return 1; 104 } 105 106 #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c) 107 108 static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, 109 int index, dma_addr_t addr) 110 { 111 struct mv_xor_desc *hw_desc = desc->hw_desc; 112 hw_desc->phy_src_addr[index] = addr; 113 if (desc->type == DMA_XOR) 114 hw_desc->desc_command |= (1 << index); 115 } 116 117 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan) 118 { 119 return __raw_readl(XOR_CURR_DESC(chan)); 120 } 121 122 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, 123 u32 next_desc_addr) 124 { 125 __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan)); 126 } 127 128 static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr) 129 { 130 __raw_writel(desc_addr, XOR_DEST_POINTER(chan)); 131 } 132 133 static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size) 134 { 135 __raw_writel(block_size, XOR_BLOCK_SIZE(chan)); 136 } 137 138 static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value) 139 { 140 __raw_writel(value, XOR_INIT_VALUE_LOW(chan)); 141 __raw_writel(value, XOR_INIT_VALUE_HIGH(chan)); 142 } 143 144 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) 145 { 146 u32 val = __raw_readl(XOR_INTR_MASK(chan)); 147 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16); 148 __raw_writel(val, XOR_INTR_MASK(chan)); 149 } 150 151 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) 152 { 153 u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan)); 154 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF; 155 return intr_cause; 156 } 157 158 static int mv_is_err_intr(u32 intr_cause) 159 { 160 if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9))) 161 return 1; 162 163 return 0; 164 } 165 166 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) 167 { 168 u32 val = ~(1 << (chan->idx * 16)); 169 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val); 170 __raw_writel(val, XOR_INTR_CAUSE(chan)); 171 } 172 173 static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan) 174 { 175 u32 val = 0xFFFF0000 >> (chan->idx * 16); 176 __raw_writel(val, XOR_INTR_CAUSE(chan)); 177 } 178 179 static int mv_can_chain(struct mv_xor_desc_slot *desc) 180 { 181 struct mv_xor_desc_slot *chain_old_tail = list_entry( 182 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node); 183 184 if (chain_old_tail->type != desc->type) 185 return 0; 186 if (desc->type == DMA_MEMSET) 187 return 0; 188 189 return 1; 190 } 191 192 static void mv_set_mode(struct mv_xor_chan *chan, 193 enum dma_transaction_type type) 194 { 195 u32 op_mode; 196 u32 config = __raw_readl(XOR_CONFIG(chan)); 197 198 switch (type) { 199 case DMA_XOR: 200 op_mode = XOR_OPERATION_MODE_XOR; 201 break; 202 case DMA_MEMCPY: 203 op_mode = XOR_OPERATION_MODE_MEMCPY; 204 break; 205 case DMA_MEMSET: 206 op_mode = XOR_OPERATION_MODE_MEMSET; 207 break; 208 default: 209 dev_err(chan->device->common.dev, 210 "error: unsupported operation %d.\n", 211 type); 212 BUG(); 213 return; 214 } 215 216 config &= ~0x7; 217 config |= op_mode; 218 __raw_writel(config, XOR_CONFIG(chan)); 219 chan->current_type = type; 220 } 221 222 static void mv_chan_activate(struct mv_xor_chan *chan) 223 { 224 u32 activation; 225 226 dev_dbg(chan->device->common.dev, " activate chan.\n"); 227 activation = __raw_readl(XOR_ACTIVATION(chan)); 228 activation |= 0x1; 229 __raw_writel(activation, XOR_ACTIVATION(chan)); 230 } 231 232 static char mv_chan_is_busy(struct mv_xor_chan *chan) 233 { 234 u32 state = __raw_readl(XOR_ACTIVATION(chan)); 235 236 state = (state >> 4) & 0x3; 237 238 return (state == 1) ? 1 : 0; 239 } 240 241 static int mv_chan_xor_slot_count(size_t len, int src_cnt) 242 { 243 return 1; 244 } 245 246 /** 247 * mv_xor_free_slots - flags descriptor slots for reuse 248 * @slot: Slot to free 249 * Caller must hold &mv_chan->lock while calling this function 250 */ 251 static void mv_xor_free_slots(struct mv_xor_chan *mv_chan, 252 struct mv_xor_desc_slot *slot) 253 { 254 dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n", 255 __func__, __LINE__, slot); 256 257 slot->slots_per_op = 0; 258 259 } 260 261 /* 262 * mv_xor_start_new_chain - program the engine to operate on new chain headed by 263 * sw_desc 264 * Caller must hold &mv_chan->lock while calling this function 265 */ 266 static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, 267 struct mv_xor_desc_slot *sw_desc) 268 { 269 dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n", 270 __func__, __LINE__, sw_desc); 271 if (sw_desc->type != mv_chan->current_type) 272 mv_set_mode(mv_chan, sw_desc->type); 273 274 if (sw_desc->type == DMA_MEMSET) { 275 /* for memset requests we need to program the engine, no 276 * descriptors used. 277 */ 278 struct mv_xor_desc *hw_desc = sw_desc->hw_desc; 279 mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr); 280 mv_chan_set_block_size(mv_chan, sw_desc->unmap_len); 281 mv_chan_set_value(mv_chan, sw_desc->value); 282 } else { 283 /* set the hardware chain */ 284 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); 285 } 286 mv_chan->pending += sw_desc->slot_cnt; 287 mv_xor_issue_pending(&mv_chan->common); 288 } 289 290 static dma_cookie_t 291 mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, 292 struct mv_xor_chan *mv_chan, dma_cookie_t cookie) 293 { 294 BUG_ON(desc->async_tx.cookie < 0); 295 296 if (desc->async_tx.cookie > 0) { 297 cookie = desc->async_tx.cookie; 298 299 /* call the callback (must not sleep or submit new 300 * operations to this channel) 301 */ 302 if (desc->async_tx.callback) 303 desc->async_tx.callback( 304 desc->async_tx.callback_param); 305 306 /* unmap dma addresses 307 * (unmap_single vs unmap_page?) 308 */ 309 if (desc->group_head && desc->unmap_len) { 310 struct mv_xor_desc_slot *unmap = desc->group_head; 311 struct device *dev = 312 &mv_chan->device->pdev->dev; 313 u32 len = unmap->unmap_len; 314 enum dma_ctrl_flags flags = desc->async_tx.flags; 315 u32 src_cnt; 316 dma_addr_t addr; 317 dma_addr_t dest; 318 319 src_cnt = unmap->unmap_src_cnt; 320 dest = mv_desc_get_dest_addr(unmap); 321 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 322 enum dma_data_direction dir; 323 324 if (src_cnt > 1) /* is xor ? */ 325 dir = DMA_BIDIRECTIONAL; 326 else 327 dir = DMA_FROM_DEVICE; 328 dma_unmap_page(dev, dest, len, dir); 329 } 330 331 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 332 while (src_cnt--) { 333 addr = mv_desc_get_src_addr(unmap, 334 src_cnt); 335 if (addr == dest) 336 continue; 337 dma_unmap_page(dev, addr, len, 338 DMA_TO_DEVICE); 339 } 340 } 341 desc->group_head = NULL; 342 } 343 } 344 345 /* run dependent operations */ 346 dma_run_dependencies(&desc->async_tx); 347 348 return cookie; 349 } 350 351 static int 352 mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan) 353 { 354 struct mv_xor_desc_slot *iter, *_iter; 355 356 dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__); 357 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, 358 completed_node) { 359 360 if (async_tx_test_ack(&iter->async_tx)) { 361 list_del(&iter->completed_node); 362 mv_xor_free_slots(mv_chan, iter); 363 } 364 } 365 return 0; 366 } 367 368 static int 369 mv_xor_clean_slot(struct mv_xor_desc_slot *desc, 370 struct mv_xor_chan *mv_chan) 371 { 372 dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n", 373 __func__, __LINE__, desc, desc->async_tx.flags); 374 list_del(&desc->chain_node); 375 /* the client is allowed to attach dependent operations 376 * until 'ack' is set 377 */ 378 if (!async_tx_test_ack(&desc->async_tx)) { 379 /* move this slot to the completed_slots */ 380 list_add_tail(&desc->completed_node, &mv_chan->completed_slots); 381 return 0; 382 } 383 384 mv_xor_free_slots(mv_chan, desc); 385 return 0; 386 } 387 388 static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) 389 { 390 struct mv_xor_desc_slot *iter, *_iter; 391 dma_cookie_t cookie = 0; 392 int busy = mv_chan_is_busy(mv_chan); 393 u32 current_desc = mv_chan_get_current_desc(mv_chan); 394 int seen_current = 0; 395 396 dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__); 397 dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc); 398 mv_xor_clean_completed_slots(mv_chan); 399 400 /* free completed slots from the chain starting with 401 * the oldest descriptor 402 */ 403 404 list_for_each_entry_safe(iter, _iter, &mv_chan->chain, 405 chain_node) { 406 prefetch(_iter); 407 prefetch(&_iter->async_tx); 408 409 /* do not advance past the current descriptor loaded into the 410 * hardware channel, subsequent descriptors are either in 411 * process or have not been submitted 412 */ 413 if (seen_current) 414 break; 415 416 /* stop the search if we reach the current descriptor and the 417 * channel is busy 418 */ 419 if (iter->async_tx.phys == current_desc) { 420 seen_current = 1; 421 if (busy) 422 break; 423 } 424 425 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie); 426 427 if (mv_xor_clean_slot(iter, mv_chan)) 428 break; 429 } 430 431 if ((busy == 0) && !list_empty(&mv_chan->chain)) { 432 struct mv_xor_desc_slot *chain_head; 433 chain_head = list_entry(mv_chan->chain.next, 434 struct mv_xor_desc_slot, 435 chain_node); 436 437 mv_xor_start_new_chain(mv_chan, chain_head); 438 } 439 440 if (cookie > 0) 441 mv_chan->common.completed_cookie = cookie; 442 } 443 444 static void 445 mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) 446 { 447 spin_lock_bh(&mv_chan->lock); 448 __mv_xor_slot_cleanup(mv_chan); 449 spin_unlock_bh(&mv_chan->lock); 450 } 451 452 static void mv_xor_tasklet(unsigned long data) 453 { 454 struct mv_xor_chan *chan = (struct mv_xor_chan *) data; 455 mv_xor_slot_cleanup(chan); 456 } 457 458 static struct mv_xor_desc_slot * 459 mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots, 460 int slots_per_op) 461 { 462 struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL; 463 LIST_HEAD(chain); 464 int slots_found, retry = 0; 465 466 /* start search from the last allocated descrtiptor 467 * if a contiguous allocation can not be found start searching 468 * from the beginning of the list 469 */ 470 retry: 471 slots_found = 0; 472 if (retry == 0) 473 iter = mv_chan->last_used; 474 else 475 iter = list_entry(&mv_chan->all_slots, 476 struct mv_xor_desc_slot, 477 slot_node); 478 479 list_for_each_entry_safe_continue( 480 iter, _iter, &mv_chan->all_slots, slot_node) { 481 prefetch(_iter); 482 prefetch(&_iter->async_tx); 483 if (iter->slots_per_op) { 484 /* give up after finding the first busy slot 485 * on the second pass through the list 486 */ 487 if (retry) 488 break; 489 490 slots_found = 0; 491 continue; 492 } 493 494 /* start the allocation if the slot is correctly aligned */ 495 if (!slots_found++) 496 alloc_start = iter; 497 498 if (slots_found == num_slots) { 499 struct mv_xor_desc_slot *alloc_tail = NULL; 500 struct mv_xor_desc_slot *last_used = NULL; 501 iter = alloc_start; 502 while (num_slots) { 503 int i; 504 505 /* pre-ack all but the last descriptor */ 506 async_tx_ack(&iter->async_tx); 507 508 list_add_tail(&iter->chain_node, &chain); 509 alloc_tail = iter; 510 iter->async_tx.cookie = 0; 511 iter->slot_cnt = num_slots; 512 iter->xor_check_result = NULL; 513 for (i = 0; i < slots_per_op; i++) { 514 iter->slots_per_op = slots_per_op - i; 515 last_used = iter; 516 iter = list_entry(iter->slot_node.next, 517 struct mv_xor_desc_slot, 518 slot_node); 519 } 520 num_slots -= slots_per_op; 521 } 522 alloc_tail->group_head = alloc_start; 523 alloc_tail->async_tx.cookie = -EBUSY; 524 list_splice(&chain, &alloc_tail->tx_list); 525 mv_chan->last_used = last_used; 526 mv_desc_clear_next_desc(alloc_start); 527 mv_desc_clear_next_desc(alloc_tail); 528 return alloc_tail; 529 } 530 } 531 if (!retry++) 532 goto retry; 533 534 /* try to free some slots if the allocation fails */ 535 tasklet_schedule(&mv_chan->irq_tasklet); 536 537 return NULL; 538 } 539 540 /************************ DMA engine API functions ****************************/ 541 static dma_cookie_t 542 mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) 543 { 544 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx); 545 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan); 546 struct mv_xor_desc_slot *grp_start, *old_chain_tail; 547 dma_cookie_t cookie; 548 int new_hw_chain = 1; 549 550 dev_dbg(mv_chan->device->common.dev, 551 "%s sw_desc %p: async_tx %p\n", 552 __func__, sw_desc, &sw_desc->async_tx); 553 554 grp_start = sw_desc->group_head; 555 556 spin_lock_bh(&mv_chan->lock); 557 cookie = dma_cookie_assign(tx); 558 559 if (list_empty(&mv_chan->chain)) 560 list_splice_init(&sw_desc->tx_list, &mv_chan->chain); 561 else { 562 new_hw_chain = 0; 563 564 old_chain_tail = list_entry(mv_chan->chain.prev, 565 struct mv_xor_desc_slot, 566 chain_node); 567 list_splice_init(&grp_start->tx_list, 568 &old_chain_tail->chain_node); 569 570 if (!mv_can_chain(grp_start)) 571 goto submit_done; 572 573 dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n", 574 old_chain_tail->async_tx.phys); 575 576 /* fix up the hardware chain */ 577 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys); 578 579 /* if the channel is not busy */ 580 if (!mv_chan_is_busy(mv_chan)) { 581 u32 current_desc = mv_chan_get_current_desc(mv_chan); 582 /* 583 * and the curren desc is the end of the chain before 584 * the append, then we need to start the channel 585 */ 586 if (current_desc == old_chain_tail->async_tx.phys) 587 new_hw_chain = 1; 588 } 589 } 590 591 if (new_hw_chain) 592 mv_xor_start_new_chain(mv_chan, grp_start); 593 594 submit_done: 595 spin_unlock_bh(&mv_chan->lock); 596 597 return cookie; 598 } 599 600 /* returns the number of allocated descriptors */ 601 static int mv_xor_alloc_chan_resources(struct dma_chan *chan) 602 { 603 char *hw_desc; 604 int idx; 605 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 606 struct mv_xor_desc_slot *slot = NULL; 607 int num_descs_in_pool = mv_chan->device->pool_size/MV_XOR_SLOT_SIZE; 608 609 /* Allocate descriptor slots */ 610 idx = mv_chan->slots_allocated; 611 while (idx < num_descs_in_pool) { 612 slot = kzalloc(sizeof(*slot), GFP_KERNEL); 613 if (!slot) { 614 printk(KERN_INFO "MV XOR Channel only initialized" 615 " %d descriptor slots", idx); 616 break; 617 } 618 hw_desc = (char *) mv_chan->device->dma_desc_pool_virt; 619 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE]; 620 621 dma_async_tx_descriptor_init(&slot->async_tx, chan); 622 slot->async_tx.tx_submit = mv_xor_tx_submit; 623 INIT_LIST_HEAD(&slot->chain_node); 624 INIT_LIST_HEAD(&slot->slot_node); 625 INIT_LIST_HEAD(&slot->tx_list); 626 hw_desc = (char *) mv_chan->device->dma_desc_pool; 627 slot->async_tx.phys = 628 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; 629 slot->idx = idx++; 630 631 spin_lock_bh(&mv_chan->lock); 632 mv_chan->slots_allocated = idx; 633 list_add_tail(&slot->slot_node, &mv_chan->all_slots); 634 spin_unlock_bh(&mv_chan->lock); 635 } 636 637 if (mv_chan->slots_allocated && !mv_chan->last_used) 638 mv_chan->last_used = list_entry(mv_chan->all_slots.next, 639 struct mv_xor_desc_slot, 640 slot_node); 641 642 dev_dbg(mv_chan->device->common.dev, 643 "allocated %d descriptor slots last_used: %p\n", 644 mv_chan->slots_allocated, mv_chan->last_used); 645 646 return mv_chan->slots_allocated ? : -ENOMEM; 647 } 648 649 static struct dma_async_tx_descriptor * 650 mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 651 size_t len, unsigned long flags) 652 { 653 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 654 struct mv_xor_desc_slot *sw_desc, *grp_start; 655 int slot_cnt; 656 657 dev_dbg(mv_chan->device->common.dev, 658 "%s dest: %x src %x len: %u flags: %ld\n", 659 __func__, dest, src, len, flags); 660 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 661 return NULL; 662 663 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); 664 665 spin_lock_bh(&mv_chan->lock); 666 slot_cnt = mv_chan_memcpy_slot_count(len); 667 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); 668 if (sw_desc) { 669 sw_desc->type = DMA_MEMCPY; 670 sw_desc->async_tx.flags = flags; 671 grp_start = sw_desc->group_head; 672 mv_desc_init(grp_start, flags); 673 mv_desc_set_byte_count(grp_start, len); 674 mv_desc_set_dest_addr(sw_desc->group_head, dest); 675 mv_desc_set_src_addr(grp_start, 0, src); 676 sw_desc->unmap_src_cnt = 1; 677 sw_desc->unmap_len = len; 678 } 679 spin_unlock_bh(&mv_chan->lock); 680 681 dev_dbg(mv_chan->device->common.dev, 682 "%s sw_desc %p async_tx %p\n", 683 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0); 684 685 return sw_desc ? &sw_desc->async_tx : NULL; 686 } 687 688 static struct dma_async_tx_descriptor * 689 mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, 690 size_t len, unsigned long flags) 691 { 692 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 693 struct mv_xor_desc_slot *sw_desc, *grp_start; 694 int slot_cnt; 695 696 dev_dbg(mv_chan->device->common.dev, 697 "%s dest: %x len: %u flags: %ld\n", 698 __func__, dest, len, flags); 699 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 700 return NULL; 701 702 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); 703 704 spin_lock_bh(&mv_chan->lock); 705 slot_cnt = mv_chan_memset_slot_count(len); 706 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); 707 if (sw_desc) { 708 sw_desc->type = DMA_MEMSET; 709 sw_desc->async_tx.flags = flags; 710 grp_start = sw_desc->group_head; 711 mv_desc_init(grp_start, flags); 712 mv_desc_set_byte_count(grp_start, len); 713 mv_desc_set_dest_addr(sw_desc->group_head, dest); 714 mv_desc_set_block_fill_val(grp_start, value); 715 sw_desc->unmap_src_cnt = 1; 716 sw_desc->unmap_len = len; 717 } 718 spin_unlock_bh(&mv_chan->lock); 719 dev_dbg(mv_chan->device->common.dev, 720 "%s sw_desc %p async_tx %p \n", 721 __func__, sw_desc, &sw_desc->async_tx); 722 return sw_desc ? &sw_desc->async_tx : NULL; 723 } 724 725 static struct dma_async_tx_descriptor * 726 mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, 727 unsigned int src_cnt, size_t len, unsigned long flags) 728 { 729 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 730 struct mv_xor_desc_slot *sw_desc, *grp_start; 731 int slot_cnt; 732 733 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 734 return NULL; 735 736 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); 737 738 dev_dbg(mv_chan->device->common.dev, 739 "%s src_cnt: %d len: dest %x %u flags: %ld\n", 740 __func__, src_cnt, len, dest, flags); 741 742 spin_lock_bh(&mv_chan->lock); 743 slot_cnt = mv_chan_xor_slot_count(len, src_cnt); 744 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); 745 if (sw_desc) { 746 sw_desc->type = DMA_XOR; 747 sw_desc->async_tx.flags = flags; 748 grp_start = sw_desc->group_head; 749 mv_desc_init(grp_start, flags); 750 /* the byte count field is the same as in memcpy desc*/ 751 mv_desc_set_byte_count(grp_start, len); 752 mv_desc_set_dest_addr(sw_desc->group_head, dest); 753 sw_desc->unmap_src_cnt = src_cnt; 754 sw_desc->unmap_len = len; 755 while (src_cnt--) 756 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]); 757 } 758 spin_unlock_bh(&mv_chan->lock); 759 dev_dbg(mv_chan->device->common.dev, 760 "%s sw_desc %p async_tx %p \n", 761 __func__, sw_desc, &sw_desc->async_tx); 762 return sw_desc ? &sw_desc->async_tx : NULL; 763 } 764 765 static void mv_xor_free_chan_resources(struct dma_chan *chan) 766 { 767 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 768 struct mv_xor_desc_slot *iter, *_iter; 769 int in_use_descs = 0; 770 771 mv_xor_slot_cleanup(mv_chan); 772 773 spin_lock_bh(&mv_chan->lock); 774 list_for_each_entry_safe(iter, _iter, &mv_chan->chain, 775 chain_node) { 776 in_use_descs++; 777 list_del(&iter->chain_node); 778 } 779 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, 780 completed_node) { 781 in_use_descs++; 782 list_del(&iter->completed_node); 783 } 784 list_for_each_entry_safe_reverse( 785 iter, _iter, &mv_chan->all_slots, slot_node) { 786 list_del(&iter->slot_node); 787 kfree(iter); 788 mv_chan->slots_allocated--; 789 } 790 mv_chan->last_used = NULL; 791 792 dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n", 793 __func__, mv_chan->slots_allocated); 794 spin_unlock_bh(&mv_chan->lock); 795 796 if (in_use_descs) 797 dev_err(mv_chan->device->common.dev, 798 "freeing %d in use descriptors!\n", in_use_descs); 799 } 800 801 /** 802 * mv_xor_status - poll the status of an XOR transaction 803 * @chan: XOR channel handle 804 * @cookie: XOR transaction identifier 805 * @txstate: XOR transactions state holder (or NULL) 806 */ 807 static enum dma_status mv_xor_status(struct dma_chan *chan, 808 dma_cookie_t cookie, 809 struct dma_tx_state *txstate) 810 { 811 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 812 enum dma_status ret; 813 814 ret = dma_cookie_status(chan, cookie, txstate); 815 if (ret == DMA_SUCCESS) { 816 mv_xor_clean_completed_slots(mv_chan); 817 return ret; 818 } 819 mv_xor_slot_cleanup(mv_chan); 820 821 return dma_cookie_status(chan, cookie, txstate); 822 } 823 824 static void mv_dump_xor_regs(struct mv_xor_chan *chan) 825 { 826 u32 val; 827 828 val = __raw_readl(XOR_CONFIG(chan)); 829 dev_err(chan->device->common.dev, 830 "config 0x%08x.\n", val); 831 832 val = __raw_readl(XOR_ACTIVATION(chan)); 833 dev_err(chan->device->common.dev, 834 "activation 0x%08x.\n", val); 835 836 val = __raw_readl(XOR_INTR_CAUSE(chan)); 837 dev_err(chan->device->common.dev, 838 "intr cause 0x%08x.\n", val); 839 840 val = __raw_readl(XOR_INTR_MASK(chan)); 841 dev_err(chan->device->common.dev, 842 "intr mask 0x%08x.\n", val); 843 844 val = __raw_readl(XOR_ERROR_CAUSE(chan)); 845 dev_err(chan->device->common.dev, 846 "error cause 0x%08x.\n", val); 847 848 val = __raw_readl(XOR_ERROR_ADDR(chan)); 849 dev_err(chan->device->common.dev, 850 "error addr 0x%08x.\n", val); 851 } 852 853 static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, 854 u32 intr_cause) 855 { 856 if (intr_cause & (1 << 4)) { 857 dev_dbg(chan->device->common.dev, 858 "ignore this error\n"); 859 return; 860 } 861 862 dev_err(chan->device->common.dev, 863 "error on chan %d. intr cause 0x%08x.\n", 864 chan->idx, intr_cause); 865 866 mv_dump_xor_regs(chan); 867 BUG(); 868 } 869 870 static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) 871 { 872 struct mv_xor_chan *chan = data; 873 u32 intr_cause = mv_chan_get_intr_cause(chan); 874 875 dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause); 876 877 if (mv_is_err_intr(intr_cause)) 878 mv_xor_err_interrupt_handler(chan, intr_cause); 879 880 tasklet_schedule(&chan->irq_tasklet); 881 882 mv_xor_device_clear_eoc_cause(chan); 883 884 return IRQ_HANDLED; 885 } 886 887 static void mv_xor_issue_pending(struct dma_chan *chan) 888 { 889 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 890 891 if (mv_chan->pending >= MV_XOR_THRESHOLD) { 892 mv_chan->pending = 0; 893 mv_chan_activate(mv_chan); 894 } 895 } 896 897 /* 898 * Perform a transaction to verify the HW works. 899 */ 900 #define MV_XOR_TEST_SIZE 2000 901 902 static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device) 903 { 904 int i; 905 void *src, *dest; 906 dma_addr_t src_dma, dest_dma; 907 struct dma_chan *dma_chan; 908 dma_cookie_t cookie; 909 struct dma_async_tx_descriptor *tx; 910 int err = 0; 911 struct mv_xor_chan *mv_chan; 912 913 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); 914 if (!src) 915 return -ENOMEM; 916 917 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); 918 if (!dest) { 919 kfree(src); 920 return -ENOMEM; 921 } 922 923 /* Fill in src buffer */ 924 for (i = 0; i < MV_XOR_TEST_SIZE; i++) 925 ((u8 *) src)[i] = (u8)i; 926 927 /* Start copy, using first DMA channel */ 928 dma_chan = container_of(device->common.channels.next, 929 struct dma_chan, 930 device_node); 931 if (mv_xor_alloc_chan_resources(dma_chan) < 1) { 932 err = -ENODEV; 933 goto out; 934 } 935 936 dest_dma = dma_map_single(dma_chan->device->dev, dest, 937 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); 938 939 src_dma = dma_map_single(dma_chan->device->dev, src, 940 MV_XOR_TEST_SIZE, DMA_TO_DEVICE); 941 942 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, 943 MV_XOR_TEST_SIZE, 0); 944 cookie = mv_xor_tx_submit(tx); 945 mv_xor_issue_pending(dma_chan); 946 async_tx_ack(tx); 947 msleep(1); 948 949 if (mv_xor_status(dma_chan, cookie, NULL) != 950 DMA_SUCCESS) { 951 dev_err(dma_chan->device->dev, 952 "Self-test copy timed out, disabling\n"); 953 err = -ENODEV; 954 goto free_resources; 955 } 956 957 mv_chan = to_mv_xor_chan(dma_chan); 958 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma, 959 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); 960 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { 961 dev_err(dma_chan->device->dev, 962 "Self-test copy failed compare, disabling\n"); 963 err = -ENODEV; 964 goto free_resources; 965 } 966 967 free_resources: 968 mv_xor_free_chan_resources(dma_chan); 969 out: 970 kfree(src); 971 kfree(dest); 972 return err; 973 } 974 975 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */ 976 static int __devinit 977 mv_xor_xor_self_test(struct mv_xor_device *device) 978 { 979 int i, src_idx; 980 struct page *dest; 981 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST]; 982 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; 983 dma_addr_t dest_dma; 984 struct dma_async_tx_descriptor *tx; 985 struct dma_chan *dma_chan; 986 dma_cookie_t cookie; 987 u8 cmp_byte = 0; 988 u32 cmp_word; 989 int err = 0; 990 struct mv_xor_chan *mv_chan; 991 992 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { 993 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 994 if (!xor_srcs[src_idx]) { 995 while (src_idx--) 996 __free_page(xor_srcs[src_idx]); 997 return -ENOMEM; 998 } 999 } 1000 1001 dest = alloc_page(GFP_KERNEL); 1002 if (!dest) { 1003 while (src_idx--) 1004 __free_page(xor_srcs[src_idx]); 1005 return -ENOMEM; 1006 } 1007 1008 /* Fill in src buffers */ 1009 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { 1010 u8 *ptr = page_address(xor_srcs[src_idx]); 1011 for (i = 0; i < PAGE_SIZE; i++) 1012 ptr[i] = (1 << src_idx); 1013 } 1014 1015 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) 1016 cmp_byte ^= (u8) (1 << src_idx); 1017 1018 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | 1019 (cmp_byte << 8) | cmp_byte; 1020 1021 memset(page_address(dest), 0, PAGE_SIZE); 1022 1023 dma_chan = container_of(device->common.channels.next, 1024 struct dma_chan, 1025 device_node); 1026 if (mv_xor_alloc_chan_resources(dma_chan) < 1) { 1027 err = -ENODEV; 1028 goto out; 1029 } 1030 1031 /* test xor */ 1032 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, 1033 DMA_FROM_DEVICE); 1034 1035 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++) 1036 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 1037 0, PAGE_SIZE, DMA_TO_DEVICE); 1038 1039 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 1040 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0); 1041 1042 cookie = mv_xor_tx_submit(tx); 1043 mv_xor_issue_pending(dma_chan); 1044 async_tx_ack(tx); 1045 msleep(8); 1046 1047 if (mv_xor_status(dma_chan, cookie, NULL) != 1048 DMA_SUCCESS) { 1049 dev_err(dma_chan->device->dev, 1050 "Self-test xor timed out, disabling\n"); 1051 err = -ENODEV; 1052 goto free_resources; 1053 } 1054 1055 mv_chan = to_mv_xor_chan(dma_chan); 1056 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma, 1057 PAGE_SIZE, DMA_FROM_DEVICE); 1058 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { 1059 u32 *ptr = page_address(dest); 1060 if (ptr[i] != cmp_word) { 1061 dev_err(dma_chan->device->dev, 1062 "Self-test xor failed compare, disabling." 1063 " index %d, data %x, expected %x\n", i, 1064 ptr[i], cmp_word); 1065 err = -ENODEV; 1066 goto free_resources; 1067 } 1068 } 1069 1070 free_resources: 1071 mv_xor_free_chan_resources(dma_chan); 1072 out: 1073 src_idx = MV_XOR_NUM_SRC_TEST; 1074 while (src_idx--) 1075 __free_page(xor_srcs[src_idx]); 1076 __free_page(dest); 1077 return err; 1078 } 1079 1080 static int __devexit mv_xor_remove(struct platform_device *dev) 1081 { 1082 struct mv_xor_device *device = platform_get_drvdata(dev); 1083 struct dma_chan *chan, *_chan; 1084 struct mv_xor_chan *mv_chan; 1085 1086 dma_async_device_unregister(&device->common); 1087 1088 dma_free_coherent(&dev->dev, device->pool_size, 1089 device->dma_desc_pool_virt, device->dma_desc_pool); 1090 1091 list_for_each_entry_safe(chan, _chan, &device->common.channels, 1092 device_node) { 1093 mv_chan = to_mv_xor_chan(chan); 1094 list_del(&chan->device_node); 1095 } 1096 1097 return 0; 1098 } 1099 1100 static int __devinit mv_xor_probe(struct platform_device *pdev) 1101 { 1102 int ret = 0; 1103 int irq; 1104 struct mv_xor_device *adev; 1105 struct mv_xor_chan *mv_chan; 1106 struct dma_device *dma_dev; 1107 struct mv_xor_platform_data *plat_data = pdev->dev.platform_data; 1108 1109 1110 adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL); 1111 if (!adev) 1112 return -ENOMEM; 1113 1114 dma_dev = &adev->common; 1115 1116 /* allocate coherent memory for hardware descriptors 1117 * note: writecombine gives slightly better performance, but 1118 * requires that we explicitly flush the writes 1119 */ 1120 adev->pool_size = plat_data->pool_size; 1121 adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev, 1122 adev->pool_size, 1123 &adev->dma_desc_pool, 1124 GFP_KERNEL); 1125 if (!adev->dma_desc_pool_virt) 1126 return -ENOMEM; 1127 1128 adev->id = plat_data->hw_id; 1129 1130 /* discover transaction capabilites from the platform data */ 1131 dma_dev->cap_mask = plat_data->cap_mask; 1132 adev->pdev = pdev; 1133 platform_set_drvdata(pdev, adev); 1134 1135 adev->shared = platform_get_drvdata(plat_data->shared); 1136 1137 INIT_LIST_HEAD(&dma_dev->channels); 1138 1139 /* set base routines */ 1140 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources; 1141 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; 1142 dma_dev->device_tx_status = mv_xor_status; 1143 dma_dev->device_issue_pending = mv_xor_issue_pending; 1144 dma_dev->dev = &pdev->dev; 1145 1146 /* set prep routines based on capability */ 1147 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) 1148 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; 1149 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) 1150 dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset; 1151 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1152 dma_dev->max_xor = 8; 1153 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; 1154 } 1155 1156 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); 1157 if (!mv_chan) { 1158 ret = -ENOMEM; 1159 goto err_free_dma; 1160 } 1161 mv_chan->device = adev; 1162 mv_chan->idx = plat_data->hw_id; 1163 mv_chan->mmr_base = adev->shared->xor_base; 1164 1165 if (!mv_chan->mmr_base) { 1166 ret = -ENOMEM; 1167 goto err_free_dma; 1168 } 1169 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) 1170 mv_chan); 1171 1172 /* clear errors before enabling interrupts */ 1173 mv_xor_device_clear_err_status(mv_chan); 1174 1175 irq = platform_get_irq(pdev, 0); 1176 if (irq < 0) { 1177 ret = irq; 1178 goto err_free_dma; 1179 } 1180 ret = devm_request_irq(&pdev->dev, irq, 1181 mv_xor_interrupt_handler, 1182 0, dev_name(&pdev->dev), mv_chan); 1183 if (ret) 1184 goto err_free_dma; 1185 1186 mv_chan_unmask_interrupts(mv_chan); 1187 1188 mv_set_mode(mv_chan, DMA_MEMCPY); 1189 1190 spin_lock_init(&mv_chan->lock); 1191 INIT_LIST_HEAD(&mv_chan->chain); 1192 INIT_LIST_HEAD(&mv_chan->completed_slots); 1193 INIT_LIST_HEAD(&mv_chan->all_slots); 1194 mv_chan->common.device = dma_dev; 1195 dma_cookie_init(&mv_chan->common); 1196 1197 list_add_tail(&mv_chan->common.device_node, &dma_dev->channels); 1198 1199 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { 1200 ret = mv_xor_memcpy_self_test(adev); 1201 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); 1202 if (ret) 1203 goto err_free_dma; 1204 } 1205 1206 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1207 ret = mv_xor_xor_self_test(adev); 1208 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); 1209 if (ret) 1210 goto err_free_dma; 1211 } 1212 1213 dev_info(&pdev->dev, "Marvell XOR: " 1214 "( %s%s%s%s)\n", 1215 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", 1216 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", 1217 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", 1218 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); 1219 1220 dma_async_device_register(dma_dev); 1221 goto out; 1222 1223 err_free_dma: 1224 dma_free_coherent(&adev->pdev->dev, plat_data->pool_size, 1225 adev->dma_desc_pool_virt, adev->dma_desc_pool); 1226 out: 1227 return ret; 1228 } 1229 1230 static void 1231 mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp, 1232 const struct mbus_dram_target_info *dram) 1233 { 1234 void __iomem *base = msp->xor_base; 1235 u32 win_enable = 0; 1236 int i; 1237 1238 for (i = 0; i < 8; i++) { 1239 writel(0, base + WINDOW_BASE(i)); 1240 writel(0, base + WINDOW_SIZE(i)); 1241 if (i < 4) 1242 writel(0, base + WINDOW_REMAP_HIGH(i)); 1243 } 1244 1245 for (i = 0; i < dram->num_cs; i++) { 1246 const struct mbus_dram_window *cs = dram->cs + i; 1247 1248 writel((cs->base & 0xffff0000) | 1249 (cs->mbus_attr << 8) | 1250 dram->mbus_dram_target_id, base + WINDOW_BASE(i)); 1251 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); 1252 1253 win_enable |= (1 << i); 1254 win_enable |= 3 << (16 + (2 * i)); 1255 } 1256 1257 writel(win_enable, base + WINDOW_BAR_ENABLE(0)); 1258 writel(win_enable, base + WINDOW_BAR_ENABLE(1)); 1259 } 1260 1261 static struct platform_driver mv_xor_driver = { 1262 .probe = mv_xor_probe, 1263 .remove = __devexit_p(mv_xor_remove), 1264 .driver = { 1265 .owner = THIS_MODULE, 1266 .name = MV_XOR_NAME, 1267 }, 1268 }; 1269 1270 static int mv_xor_shared_probe(struct platform_device *pdev) 1271 { 1272 const struct mbus_dram_target_info *dram; 1273 struct mv_xor_shared_private *msp; 1274 struct resource *res; 1275 1276 dev_notice(&pdev->dev, "Marvell shared XOR driver\n"); 1277 1278 msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL); 1279 if (!msp) 1280 return -ENOMEM; 1281 1282 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1283 if (!res) 1284 return -ENODEV; 1285 1286 msp->xor_base = devm_ioremap(&pdev->dev, res->start, 1287 resource_size(res)); 1288 if (!msp->xor_base) 1289 return -EBUSY; 1290 1291 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1292 if (!res) 1293 return -ENODEV; 1294 1295 msp->xor_high_base = devm_ioremap(&pdev->dev, res->start, 1296 resource_size(res)); 1297 if (!msp->xor_high_base) 1298 return -EBUSY; 1299 1300 platform_set_drvdata(pdev, msp); 1301 1302 /* 1303 * (Re-)program MBUS remapping windows if we are asked to. 1304 */ 1305 dram = mv_mbus_dram_info(); 1306 if (dram) 1307 mv_xor_conf_mbus_windows(msp, dram); 1308 1309 /* Not all platforms can gate the clock, so it is not 1310 * an error if the clock does not exists. 1311 */ 1312 msp->clk = clk_get(&pdev->dev, NULL); 1313 if (!IS_ERR(msp->clk)) 1314 clk_prepare_enable(msp->clk); 1315 1316 return 0; 1317 } 1318 1319 static int mv_xor_shared_remove(struct platform_device *pdev) 1320 { 1321 struct mv_xor_shared_private *msp = platform_get_drvdata(pdev); 1322 1323 if (!IS_ERR(msp->clk)) { 1324 clk_disable_unprepare(msp->clk); 1325 clk_put(msp->clk); 1326 } 1327 1328 return 0; 1329 } 1330 1331 static struct platform_driver mv_xor_shared_driver = { 1332 .probe = mv_xor_shared_probe, 1333 .remove = mv_xor_shared_remove, 1334 .driver = { 1335 .owner = THIS_MODULE, 1336 .name = MV_XOR_SHARED_NAME, 1337 }, 1338 }; 1339 1340 1341 static int __init mv_xor_init(void) 1342 { 1343 int rc; 1344 1345 rc = platform_driver_register(&mv_xor_shared_driver); 1346 if (!rc) { 1347 rc = platform_driver_register(&mv_xor_driver); 1348 if (rc) 1349 platform_driver_unregister(&mv_xor_shared_driver); 1350 } 1351 return rc; 1352 } 1353 module_init(mv_xor_init); 1354 1355 /* it's currently unsafe to unload this module */ 1356 #if 0 1357 static void __exit mv_xor_exit(void) 1358 { 1359 platform_driver_unregister(&mv_xor_driver); 1360 platform_driver_unregister(&mv_xor_shared_driver); 1361 return; 1362 } 1363 1364 module_exit(mv_xor_exit); 1365 #endif 1366 1367 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>"); 1368 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine"); 1369 MODULE_LICENSE("GPL"); 1370