1 /* 2 * offload engine driver for the Marvell XOR engine 3 * Copyright (C) 2007, 2008, Marvell International Ltd. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program; if not, write to the Free Software Foundation, Inc., 16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 */ 18 19 #include <linux/init.h> 20 #include <linux/module.h> 21 #include <linux/async_tx.h> 22 #include <linux/delay.h> 23 #include <linux/dma-mapping.h> 24 #include <linux/spinlock.h> 25 #include <linux/interrupt.h> 26 #include <linux/platform_device.h> 27 #include <linux/memory.h> 28 #include <asm/plat-orion/mv_xor.h> 29 #include "mv_xor.h" 30 31 static void mv_xor_issue_pending(struct dma_chan *chan); 32 33 #define to_mv_xor_chan(chan) \ 34 container_of(chan, struct mv_xor_chan, common) 35 36 #define to_mv_xor_device(dev) \ 37 container_of(dev, struct mv_xor_device, common) 38 39 #define to_mv_xor_slot(tx) \ 40 container_of(tx, struct mv_xor_desc_slot, async_tx) 41 42 static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) 43 { 44 struct mv_xor_desc *hw_desc = desc->hw_desc; 45 46 hw_desc->status = (1 << 31); 47 hw_desc->phy_next_desc = 0; 48 hw_desc->desc_command = (1 << 31); 49 } 50 51 static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc) 52 { 53 struct mv_xor_desc *hw_desc = desc->hw_desc; 54 return hw_desc->phy_dest_addr; 55 } 56 57 static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc, 58 int src_idx) 59 { 60 struct mv_xor_desc *hw_desc = desc->hw_desc; 61 return hw_desc->phy_src_addr[src_idx]; 62 } 63 64 65 static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, 66 u32 byte_count) 67 { 68 struct mv_xor_desc *hw_desc = desc->hw_desc; 69 hw_desc->byte_count = byte_count; 70 } 71 72 static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc, 73 u32 next_desc_addr) 74 { 75 struct mv_xor_desc *hw_desc = desc->hw_desc; 76 BUG_ON(hw_desc->phy_next_desc); 77 hw_desc->phy_next_desc = next_desc_addr; 78 } 79 80 static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc) 81 { 82 struct mv_xor_desc *hw_desc = desc->hw_desc; 83 hw_desc->phy_next_desc = 0; 84 } 85 86 static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val) 87 { 88 desc->value = val; 89 } 90 91 static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc, 92 dma_addr_t addr) 93 { 94 struct mv_xor_desc *hw_desc = desc->hw_desc; 95 hw_desc->phy_dest_addr = addr; 96 } 97 98 static int mv_chan_memset_slot_count(size_t len) 99 { 100 return 1; 101 } 102 103 #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c) 104 105 static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, 106 int index, dma_addr_t addr) 107 { 108 struct mv_xor_desc *hw_desc = desc->hw_desc; 109 hw_desc->phy_src_addr[index] = addr; 110 if (desc->type == DMA_XOR) 111 hw_desc->desc_command |= (1 << index); 112 } 113 114 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan) 115 { 116 return __raw_readl(XOR_CURR_DESC(chan)); 117 } 118 119 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, 120 u32 next_desc_addr) 121 { 122 __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan)); 123 } 124 125 static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr) 126 { 127 __raw_writel(desc_addr, XOR_DEST_POINTER(chan)); 128 } 129 130 static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size) 131 { 132 __raw_writel(block_size, XOR_BLOCK_SIZE(chan)); 133 } 134 135 static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value) 136 { 137 __raw_writel(value, XOR_INIT_VALUE_LOW(chan)); 138 __raw_writel(value, XOR_INIT_VALUE_HIGH(chan)); 139 } 140 141 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) 142 { 143 u32 val = __raw_readl(XOR_INTR_MASK(chan)); 144 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16); 145 __raw_writel(val, XOR_INTR_MASK(chan)); 146 } 147 148 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) 149 { 150 u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan)); 151 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF; 152 return intr_cause; 153 } 154 155 static int mv_is_err_intr(u32 intr_cause) 156 { 157 if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9))) 158 return 1; 159 160 return 0; 161 } 162 163 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) 164 { 165 u32 val = (1 << (1 + (chan->idx * 16))); 166 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val); 167 __raw_writel(val, XOR_INTR_CAUSE(chan)); 168 } 169 170 static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan) 171 { 172 u32 val = 0xFFFF0000 >> (chan->idx * 16); 173 __raw_writel(val, XOR_INTR_CAUSE(chan)); 174 } 175 176 static int mv_can_chain(struct mv_xor_desc_slot *desc) 177 { 178 struct mv_xor_desc_slot *chain_old_tail = list_entry( 179 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node); 180 181 if (chain_old_tail->type != desc->type) 182 return 0; 183 if (desc->type == DMA_MEMSET) 184 return 0; 185 186 return 1; 187 } 188 189 static void mv_set_mode(struct mv_xor_chan *chan, 190 enum dma_transaction_type type) 191 { 192 u32 op_mode; 193 u32 config = __raw_readl(XOR_CONFIG(chan)); 194 195 switch (type) { 196 case DMA_XOR: 197 op_mode = XOR_OPERATION_MODE_XOR; 198 break; 199 case DMA_MEMCPY: 200 op_mode = XOR_OPERATION_MODE_MEMCPY; 201 break; 202 case DMA_MEMSET: 203 op_mode = XOR_OPERATION_MODE_MEMSET; 204 break; 205 default: 206 dev_printk(KERN_ERR, chan->device->common.dev, 207 "error: unsupported operation %d.\n", 208 type); 209 BUG(); 210 return; 211 } 212 213 config &= ~0x7; 214 config |= op_mode; 215 __raw_writel(config, XOR_CONFIG(chan)); 216 chan->current_type = type; 217 } 218 219 static void mv_chan_activate(struct mv_xor_chan *chan) 220 { 221 u32 activation; 222 223 dev_dbg(chan->device->common.dev, " activate chan.\n"); 224 activation = __raw_readl(XOR_ACTIVATION(chan)); 225 activation |= 0x1; 226 __raw_writel(activation, XOR_ACTIVATION(chan)); 227 } 228 229 static char mv_chan_is_busy(struct mv_xor_chan *chan) 230 { 231 u32 state = __raw_readl(XOR_ACTIVATION(chan)); 232 233 state = (state >> 4) & 0x3; 234 235 return (state == 1) ? 1 : 0; 236 } 237 238 static int mv_chan_xor_slot_count(size_t len, int src_cnt) 239 { 240 return 1; 241 } 242 243 /** 244 * mv_xor_free_slots - flags descriptor slots for reuse 245 * @slot: Slot to free 246 * Caller must hold &mv_chan->lock while calling this function 247 */ 248 static void mv_xor_free_slots(struct mv_xor_chan *mv_chan, 249 struct mv_xor_desc_slot *slot) 250 { 251 dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n", 252 __func__, __LINE__, slot); 253 254 slot->slots_per_op = 0; 255 256 } 257 258 /* 259 * mv_xor_start_new_chain - program the engine to operate on new chain headed by 260 * sw_desc 261 * Caller must hold &mv_chan->lock while calling this function 262 */ 263 static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, 264 struct mv_xor_desc_slot *sw_desc) 265 { 266 dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n", 267 __func__, __LINE__, sw_desc); 268 if (sw_desc->type != mv_chan->current_type) 269 mv_set_mode(mv_chan, sw_desc->type); 270 271 if (sw_desc->type == DMA_MEMSET) { 272 /* for memset requests we need to program the engine, no 273 * descriptors used. 274 */ 275 struct mv_xor_desc *hw_desc = sw_desc->hw_desc; 276 mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr); 277 mv_chan_set_block_size(mv_chan, sw_desc->unmap_len); 278 mv_chan_set_value(mv_chan, sw_desc->value); 279 } else { 280 /* set the hardware chain */ 281 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); 282 } 283 mv_chan->pending += sw_desc->slot_cnt; 284 mv_xor_issue_pending(&mv_chan->common); 285 } 286 287 static dma_cookie_t 288 mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, 289 struct mv_xor_chan *mv_chan, dma_cookie_t cookie) 290 { 291 BUG_ON(desc->async_tx.cookie < 0); 292 293 if (desc->async_tx.cookie > 0) { 294 cookie = desc->async_tx.cookie; 295 296 /* call the callback (must not sleep or submit new 297 * operations to this channel) 298 */ 299 if (desc->async_tx.callback) 300 desc->async_tx.callback( 301 desc->async_tx.callback_param); 302 303 /* unmap dma addresses 304 * (unmap_single vs unmap_page?) 305 */ 306 if (desc->group_head && desc->unmap_len) { 307 struct mv_xor_desc_slot *unmap = desc->group_head; 308 struct device *dev = 309 &mv_chan->device->pdev->dev; 310 u32 len = unmap->unmap_len; 311 enum dma_ctrl_flags flags = desc->async_tx.flags; 312 u32 src_cnt; 313 dma_addr_t addr; 314 315 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 316 addr = mv_desc_get_dest_addr(unmap); 317 dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE); 318 } 319 320 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 321 src_cnt = unmap->unmap_src_cnt; 322 while (src_cnt--) { 323 addr = mv_desc_get_src_addr(unmap, 324 src_cnt); 325 dma_unmap_page(dev, addr, len, 326 DMA_TO_DEVICE); 327 } 328 } 329 desc->group_head = NULL; 330 } 331 } 332 333 /* run dependent operations */ 334 async_tx_run_dependencies(&desc->async_tx); 335 336 return cookie; 337 } 338 339 static int 340 mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan) 341 { 342 struct mv_xor_desc_slot *iter, *_iter; 343 344 dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__); 345 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, 346 completed_node) { 347 348 if (async_tx_test_ack(&iter->async_tx)) { 349 list_del(&iter->completed_node); 350 mv_xor_free_slots(mv_chan, iter); 351 } 352 } 353 return 0; 354 } 355 356 static int 357 mv_xor_clean_slot(struct mv_xor_desc_slot *desc, 358 struct mv_xor_chan *mv_chan) 359 { 360 dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n", 361 __func__, __LINE__, desc, desc->async_tx.flags); 362 list_del(&desc->chain_node); 363 /* the client is allowed to attach dependent operations 364 * until 'ack' is set 365 */ 366 if (!async_tx_test_ack(&desc->async_tx)) { 367 /* move this slot to the completed_slots */ 368 list_add_tail(&desc->completed_node, &mv_chan->completed_slots); 369 return 0; 370 } 371 372 mv_xor_free_slots(mv_chan, desc); 373 return 0; 374 } 375 376 static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) 377 { 378 struct mv_xor_desc_slot *iter, *_iter; 379 dma_cookie_t cookie = 0; 380 int busy = mv_chan_is_busy(mv_chan); 381 u32 current_desc = mv_chan_get_current_desc(mv_chan); 382 int seen_current = 0; 383 384 dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__); 385 dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc); 386 mv_xor_clean_completed_slots(mv_chan); 387 388 /* free completed slots from the chain starting with 389 * the oldest descriptor 390 */ 391 392 list_for_each_entry_safe(iter, _iter, &mv_chan->chain, 393 chain_node) { 394 prefetch(_iter); 395 prefetch(&_iter->async_tx); 396 397 /* do not advance past the current descriptor loaded into the 398 * hardware channel, subsequent descriptors are either in 399 * process or have not been submitted 400 */ 401 if (seen_current) 402 break; 403 404 /* stop the search if we reach the current descriptor and the 405 * channel is busy 406 */ 407 if (iter->async_tx.phys == current_desc) { 408 seen_current = 1; 409 if (busy) 410 break; 411 } 412 413 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie); 414 415 if (mv_xor_clean_slot(iter, mv_chan)) 416 break; 417 } 418 419 if ((busy == 0) && !list_empty(&mv_chan->chain)) { 420 struct mv_xor_desc_slot *chain_head; 421 chain_head = list_entry(mv_chan->chain.next, 422 struct mv_xor_desc_slot, 423 chain_node); 424 425 mv_xor_start_new_chain(mv_chan, chain_head); 426 } 427 428 if (cookie > 0) 429 mv_chan->completed_cookie = cookie; 430 } 431 432 static void 433 mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) 434 { 435 spin_lock_bh(&mv_chan->lock); 436 __mv_xor_slot_cleanup(mv_chan); 437 spin_unlock_bh(&mv_chan->lock); 438 } 439 440 static void mv_xor_tasklet(unsigned long data) 441 { 442 struct mv_xor_chan *chan = (struct mv_xor_chan *) data; 443 __mv_xor_slot_cleanup(chan); 444 } 445 446 static struct mv_xor_desc_slot * 447 mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots, 448 int slots_per_op) 449 { 450 struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL; 451 LIST_HEAD(chain); 452 int slots_found, retry = 0; 453 454 /* start search from the last allocated descrtiptor 455 * if a contiguous allocation can not be found start searching 456 * from the beginning of the list 457 */ 458 retry: 459 slots_found = 0; 460 if (retry == 0) 461 iter = mv_chan->last_used; 462 else 463 iter = list_entry(&mv_chan->all_slots, 464 struct mv_xor_desc_slot, 465 slot_node); 466 467 list_for_each_entry_safe_continue( 468 iter, _iter, &mv_chan->all_slots, slot_node) { 469 prefetch(_iter); 470 prefetch(&_iter->async_tx); 471 if (iter->slots_per_op) { 472 /* give up after finding the first busy slot 473 * on the second pass through the list 474 */ 475 if (retry) 476 break; 477 478 slots_found = 0; 479 continue; 480 } 481 482 /* start the allocation if the slot is correctly aligned */ 483 if (!slots_found++) 484 alloc_start = iter; 485 486 if (slots_found == num_slots) { 487 struct mv_xor_desc_slot *alloc_tail = NULL; 488 struct mv_xor_desc_slot *last_used = NULL; 489 iter = alloc_start; 490 while (num_slots) { 491 int i; 492 493 /* pre-ack all but the last descriptor */ 494 async_tx_ack(&iter->async_tx); 495 496 list_add_tail(&iter->chain_node, &chain); 497 alloc_tail = iter; 498 iter->async_tx.cookie = 0; 499 iter->slot_cnt = num_slots; 500 iter->xor_check_result = NULL; 501 for (i = 0; i < slots_per_op; i++) { 502 iter->slots_per_op = slots_per_op - i; 503 last_used = iter; 504 iter = list_entry(iter->slot_node.next, 505 struct mv_xor_desc_slot, 506 slot_node); 507 } 508 num_slots -= slots_per_op; 509 } 510 alloc_tail->group_head = alloc_start; 511 alloc_tail->async_tx.cookie = -EBUSY; 512 list_splice(&chain, &alloc_tail->async_tx.tx_list); 513 mv_chan->last_used = last_used; 514 mv_desc_clear_next_desc(alloc_start); 515 mv_desc_clear_next_desc(alloc_tail); 516 return alloc_tail; 517 } 518 } 519 if (!retry++) 520 goto retry; 521 522 /* try to free some slots if the allocation fails */ 523 tasklet_schedule(&mv_chan->irq_tasklet); 524 525 return NULL; 526 } 527 528 static dma_cookie_t 529 mv_desc_assign_cookie(struct mv_xor_chan *mv_chan, 530 struct mv_xor_desc_slot *desc) 531 { 532 dma_cookie_t cookie = mv_chan->common.cookie; 533 534 if (++cookie < 0) 535 cookie = 1; 536 mv_chan->common.cookie = desc->async_tx.cookie = cookie; 537 return cookie; 538 } 539 540 /************************ DMA engine API functions ****************************/ 541 static dma_cookie_t 542 mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) 543 { 544 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx); 545 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan); 546 struct mv_xor_desc_slot *grp_start, *old_chain_tail; 547 dma_cookie_t cookie; 548 int new_hw_chain = 1; 549 550 dev_dbg(mv_chan->device->common.dev, 551 "%s sw_desc %p: async_tx %p\n", 552 __func__, sw_desc, &sw_desc->async_tx); 553 554 grp_start = sw_desc->group_head; 555 556 spin_lock_bh(&mv_chan->lock); 557 cookie = mv_desc_assign_cookie(mv_chan, sw_desc); 558 559 if (list_empty(&mv_chan->chain)) 560 list_splice_init(&sw_desc->async_tx.tx_list, &mv_chan->chain); 561 else { 562 new_hw_chain = 0; 563 564 old_chain_tail = list_entry(mv_chan->chain.prev, 565 struct mv_xor_desc_slot, 566 chain_node); 567 list_splice_init(&grp_start->async_tx.tx_list, 568 &old_chain_tail->chain_node); 569 570 if (!mv_can_chain(grp_start)) 571 goto submit_done; 572 573 dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n", 574 old_chain_tail->async_tx.phys); 575 576 /* fix up the hardware chain */ 577 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys); 578 579 /* if the channel is not busy */ 580 if (!mv_chan_is_busy(mv_chan)) { 581 u32 current_desc = mv_chan_get_current_desc(mv_chan); 582 /* 583 * and the curren desc is the end of the chain before 584 * the append, then we need to start the channel 585 */ 586 if (current_desc == old_chain_tail->async_tx.phys) 587 new_hw_chain = 1; 588 } 589 } 590 591 if (new_hw_chain) 592 mv_xor_start_new_chain(mv_chan, grp_start); 593 594 submit_done: 595 spin_unlock_bh(&mv_chan->lock); 596 597 return cookie; 598 } 599 600 /* returns the number of allocated descriptors */ 601 static int mv_xor_alloc_chan_resources(struct dma_chan *chan, 602 struct dma_client *client) 603 { 604 char *hw_desc; 605 int idx; 606 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 607 struct mv_xor_desc_slot *slot = NULL; 608 struct mv_xor_platform_data *plat_data = 609 mv_chan->device->pdev->dev.platform_data; 610 int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE; 611 612 /* Allocate descriptor slots */ 613 idx = mv_chan->slots_allocated; 614 while (idx < num_descs_in_pool) { 615 slot = kzalloc(sizeof(*slot), GFP_KERNEL); 616 if (!slot) { 617 printk(KERN_INFO "MV XOR Channel only initialized" 618 " %d descriptor slots", idx); 619 break; 620 } 621 hw_desc = (char *) mv_chan->device->dma_desc_pool_virt; 622 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE]; 623 624 dma_async_tx_descriptor_init(&slot->async_tx, chan); 625 slot->async_tx.tx_submit = mv_xor_tx_submit; 626 INIT_LIST_HEAD(&slot->chain_node); 627 INIT_LIST_HEAD(&slot->slot_node); 628 INIT_LIST_HEAD(&slot->async_tx.tx_list); 629 hw_desc = (char *) mv_chan->device->dma_desc_pool; 630 slot->async_tx.phys = 631 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; 632 slot->idx = idx++; 633 634 spin_lock_bh(&mv_chan->lock); 635 mv_chan->slots_allocated = idx; 636 list_add_tail(&slot->slot_node, &mv_chan->all_slots); 637 spin_unlock_bh(&mv_chan->lock); 638 } 639 640 if (mv_chan->slots_allocated && !mv_chan->last_used) 641 mv_chan->last_used = list_entry(mv_chan->all_slots.next, 642 struct mv_xor_desc_slot, 643 slot_node); 644 645 dev_dbg(mv_chan->device->common.dev, 646 "allocated %d descriptor slots last_used: %p\n", 647 mv_chan->slots_allocated, mv_chan->last_used); 648 649 return mv_chan->slots_allocated ? : -ENOMEM; 650 } 651 652 static struct dma_async_tx_descriptor * 653 mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 654 size_t len, unsigned long flags) 655 { 656 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 657 struct mv_xor_desc_slot *sw_desc, *grp_start; 658 int slot_cnt; 659 660 dev_dbg(mv_chan->device->common.dev, 661 "%s dest: %x src %x len: %u flags: %ld\n", 662 __func__, dest, src, len, flags); 663 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 664 return NULL; 665 666 BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT)); 667 668 spin_lock_bh(&mv_chan->lock); 669 slot_cnt = mv_chan_memcpy_slot_count(len); 670 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); 671 if (sw_desc) { 672 sw_desc->type = DMA_MEMCPY; 673 sw_desc->async_tx.flags = flags; 674 grp_start = sw_desc->group_head; 675 mv_desc_init(grp_start, flags); 676 mv_desc_set_byte_count(grp_start, len); 677 mv_desc_set_dest_addr(sw_desc->group_head, dest); 678 mv_desc_set_src_addr(grp_start, 0, src); 679 sw_desc->unmap_src_cnt = 1; 680 sw_desc->unmap_len = len; 681 } 682 spin_unlock_bh(&mv_chan->lock); 683 684 dev_dbg(mv_chan->device->common.dev, 685 "%s sw_desc %p async_tx %p\n", 686 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0); 687 688 return sw_desc ? &sw_desc->async_tx : NULL; 689 } 690 691 static struct dma_async_tx_descriptor * 692 mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, 693 size_t len, unsigned long flags) 694 { 695 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 696 struct mv_xor_desc_slot *sw_desc, *grp_start; 697 int slot_cnt; 698 699 dev_dbg(mv_chan->device->common.dev, 700 "%s dest: %x len: %u flags: %ld\n", 701 __func__, dest, len, flags); 702 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 703 return NULL; 704 705 BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT)); 706 707 spin_lock_bh(&mv_chan->lock); 708 slot_cnt = mv_chan_memset_slot_count(len); 709 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); 710 if (sw_desc) { 711 sw_desc->type = DMA_MEMSET; 712 sw_desc->async_tx.flags = flags; 713 grp_start = sw_desc->group_head; 714 mv_desc_init(grp_start, flags); 715 mv_desc_set_byte_count(grp_start, len); 716 mv_desc_set_dest_addr(sw_desc->group_head, dest); 717 mv_desc_set_block_fill_val(grp_start, value); 718 sw_desc->unmap_src_cnt = 1; 719 sw_desc->unmap_len = len; 720 } 721 spin_unlock_bh(&mv_chan->lock); 722 dev_dbg(mv_chan->device->common.dev, 723 "%s sw_desc %p async_tx %p \n", 724 __func__, sw_desc, &sw_desc->async_tx); 725 return sw_desc ? &sw_desc->async_tx : NULL; 726 } 727 728 static struct dma_async_tx_descriptor * 729 mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, 730 unsigned int src_cnt, size_t len, unsigned long flags) 731 { 732 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 733 struct mv_xor_desc_slot *sw_desc, *grp_start; 734 int slot_cnt; 735 736 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 737 return NULL; 738 739 BUG_ON(unlikely(len > MV_XOR_MAX_BYTE_COUNT)); 740 741 dev_dbg(mv_chan->device->common.dev, 742 "%s src_cnt: %d len: dest %x %u flags: %ld\n", 743 __func__, src_cnt, len, dest, flags); 744 745 spin_lock_bh(&mv_chan->lock); 746 slot_cnt = mv_chan_xor_slot_count(len, src_cnt); 747 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); 748 if (sw_desc) { 749 sw_desc->type = DMA_XOR; 750 sw_desc->async_tx.flags = flags; 751 grp_start = sw_desc->group_head; 752 mv_desc_init(grp_start, flags); 753 /* the byte count field is the same as in memcpy desc*/ 754 mv_desc_set_byte_count(grp_start, len); 755 mv_desc_set_dest_addr(sw_desc->group_head, dest); 756 sw_desc->unmap_src_cnt = src_cnt; 757 sw_desc->unmap_len = len; 758 while (src_cnt--) 759 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]); 760 } 761 spin_unlock_bh(&mv_chan->lock); 762 dev_dbg(mv_chan->device->common.dev, 763 "%s sw_desc %p async_tx %p \n", 764 __func__, sw_desc, &sw_desc->async_tx); 765 return sw_desc ? &sw_desc->async_tx : NULL; 766 } 767 768 static void mv_xor_free_chan_resources(struct dma_chan *chan) 769 { 770 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 771 struct mv_xor_desc_slot *iter, *_iter; 772 int in_use_descs = 0; 773 774 mv_xor_slot_cleanup(mv_chan); 775 776 spin_lock_bh(&mv_chan->lock); 777 list_for_each_entry_safe(iter, _iter, &mv_chan->chain, 778 chain_node) { 779 in_use_descs++; 780 list_del(&iter->chain_node); 781 } 782 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, 783 completed_node) { 784 in_use_descs++; 785 list_del(&iter->completed_node); 786 } 787 list_for_each_entry_safe_reverse( 788 iter, _iter, &mv_chan->all_slots, slot_node) { 789 list_del(&iter->slot_node); 790 kfree(iter); 791 mv_chan->slots_allocated--; 792 } 793 mv_chan->last_used = NULL; 794 795 dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n", 796 __func__, mv_chan->slots_allocated); 797 spin_unlock_bh(&mv_chan->lock); 798 799 if (in_use_descs) 800 dev_err(mv_chan->device->common.dev, 801 "freeing %d in use descriptors!\n", in_use_descs); 802 } 803 804 /** 805 * mv_xor_is_complete - poll the status of an XOR transaction 806 * @chan: XOR channel handle 807 * @cookie: XOR transaction identifier 808 */ 809 static enum dma_status mv_xor_is_complete(struct dma_chan *chan, 810 dma_cookie_t cookie, 811 dma_cookie_t *done, 812 dma_cookie_t *used) 813 { 814 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 815 dma_cookie_t last_used; 816 dma_cookie_t last_complete; 817 enum dma_status ret; 818 819 last_used = chan->cookie; 820 last_complete = mv_chan->completed_cookie; 821 mv_chan->is_complete_cookie = cookie; 822 if (done) 823 *done = last_complete; 824 if (used) 825 *used = last_used; 826 827 ret = dma_async_is_complete(cookie, last_complete, last_used); 828 if (ret == DMA_SUCCESS) { 829 mv_xor_clean_completed_slots(mv_chan); 830 return ret; 831 } 832 mv_xor_slot_cleanup(mv_chan); 833 834 last_used = chan->cookie; 835 last_complete = mv_chan->completed_cookie; 836 837 if (done) 838 *done = last_complete; 839 if (used) 840 *used = last_used; 841 842 return dma_async_is_complete(cookie, last_complete, last_used); 843 } 844 845 static void mv_dump_xor_regs(struct mv_xor_chan *chan) 846 { 847 u32 val; 848 849 val = __raw_readl(XOR_CONFIG(chan)); 850 dev_printk(KERN_ERR, chan->device->common.dev, 851 "config 0x%08x.\n", val); 852 853 val = __raw_readl(XOR_ACTIVATION(chan)); 854 dev_printk(KERN_ERR, chan->device->common.dev, 855 "activation 0x%08x.\n", val); 856 857 val = __raw_readl(XOR_INTR_CAUSE(chan)); 858 dev_printk(KERN_ERR, chan->device->common.dev, 859 "intr cause 0x%08x.\n", val); 860 861 val = __raw_readl(XOR_INTR_MASK(chan)); 862 dev_printk(KERN_ERR, chan->device->common.dev, 863 "intr mask 0x%08x.\n", val); 864 865 val = __raw_readl(XOR_ERROR_CAUSE(chan)); 866 dev_printk(KERN_ERR, chan->device->common.dev, 867 "error cause 0x%08x.\n", val); 868 869 val = __raw_readl(XOR_ERROR_ADDR(chan)); 870 dev_printk(KERN_ERR, chan->device->common.dev, 871 "error addr 0x%08x.\n", val); 872 } 873 874 static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, 875 u32 intr_cause) 876 { 877 if (intr_cause & (1 << 4)) { 878 dev_dbg(chan->device->common.dev, 879 "ignore this error\n"); 880 return; 881 } 882 883 dev_printk(KERN_ERR, chan->device->common.dev, 884 "error on chan %d. intr cause 0x%08x.\n", 885 chan->idx, intr_cause); 886 887 mv_dump_xor_regs(chan); 888 BUG(); 889 } 890 891 static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) 892 { 893 struct mv_xor_chan *chan = data; 894 u32 intr_cause = mv_chan_get_intr_cause(chan); 895 896 dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause); 897 898 if (mv_is_err_intr(intr_cause)) 899 mv_xor_err_interrupt_handler(chan, intr_cause); 900 901 tasklet_schedule(&chan->irq_tasklet); 902 903 mv_xor_device_clear_eoc_cause(chan); 904 905 return IRQ_HANDLED; 906 } 907 908 static void mv_xor_issue_pending(struct dma_chan *chan) 909 { 910 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 911 912 if (mv_chan->pending >= MV_XOR_THRESHOLD) { 913 mv_chan->pending = 0; 914 mv_chan_activate(mv_chan); 915 } 916 } 917 918 /* 919 * Perform a transaction to verify the HW works. 920 */ 921 #define MV_XOR_TEST_SIZE 2000 922 923 static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device) 924 { 925 int i; 926 void *src, *dest; 927 dma_addr_t src_dma, dest_dma; 928 struct dma_chan *dma_chan; 929 dma_cookie_t cookie; 930 struct dma_async_tx_descriptor *tx; 931 int err = 0; 932 struct mv_xor_chan *mv_chan; 933 934 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); 935 if (!src) 936 return -ENOMEM; 937 938 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); 939 if (!dest) { 940 kfree(src); 941 return -ENOMEM; 942 } 943 944 /* Fill in src buffer */ 945 for (i = 0; i < MV_XOR_TEST_SIZE; i++) 946 ((u8 *) src)[i] = (u8)i; 947 948 /* Start copy, using first DMA channel */ 949 dma_chan = container_of(device->common.channels.next, 950 struct dma_chan, 951 device_node); 952 if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) { 953 err = -ENODEV; 954 goto out; 955 } 956 957 dest_dma = dma_map_single(dma_chan->device->dev, dest, 958 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); 959 960 src_dma = dma_map_single(dma_chan->device->dev, src, 961 MV_XOR_TEST_SIZE, DMA_TO_DEVICE); 962 963 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, 964 MV_XOR_TEST_SIZE, 0); 965 cookie = mv_xor_tx_submit(tx); 966 mv_xor_issue_pending(dma_chan); 967 async_tx_ack(tx); 968 msleep(1); 969 970 if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) != 971 DMA_SUCCESS) { 972 dev_printk(KERN_ERR, dma_chan->device->dev, 973 "Self-test copy timed out, disabling\n"); 974 err = -ENODEV; 975 goto free_resources; 976 } 977 978 mv_chan = to_mv_xor_chan(dma_chan); 979 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma, 980 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); 981 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { 982 dev_printk(KERN_ERR, dma_chan->device->dev, 983 "Self-test copy failed compare, disabling\n"); 984 err = -ENODEV; 985 goto free_resources; 986 } 987 988 free_resources: 989 mv_xor_free_chan_resources(dma_chan); 990 out: 991 kfree(src); 992 kfree(dest); 993 return err; 994 } 995 996 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */ 997 static int __devinit 998 mv_xor_xor_self_test(struct mv_xor_device *device) 999 { 1000 int i, src_idx; 1001 struct page *dest; 1002 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST]; 1003 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; 1004 dma_addr_t dest_dma; 1005 struct dma_async_tx_descriptor *tx; 1006 struct dma_chan *dma_chan; 1007 dma_cookie_t cookie; 1008 u8 cmp_byte = 0; 1009 u32 cmp_word; 1010 int err = 0; 1011 struct mv_xor_chan *mv_chan; 1012 1013 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { 1014 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 1015 if (!xor_srcs[src_idx]) 1016 while (src_idx--) { 1017 __free_page(xor_srcs[src_idx]); 1018 return -ENOMEM; 1019 } 1020 } 1021 1022 dest = alloc_page(GFP_KERNEL); 1023 if (!dest) 1024 while (src_idx--) { 1025 __free_page(xor_srcs[src_idx]); 1026 return -ENOMEM; 1027 } 1028 1029 /* Fill in src buffers */ 1030 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { 1031 u8 *ptr = page_address(xor_srcs[src_idx]); 1032 for (i = 0; i < PAGE_SIZE; i++) 1033 ptr[i] = (1 << src_idx); 1034 } 1035 1036 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) 1037 cmp_byte ^= (u8) (1 << src_idx); 1038 1039 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | 1040 (cmp_byte << 8) | cmp_byte; 1041 1042 memset(page_address(dest), 0, PAGE_SIZE); 1043 1044 dma_chan = container_of(device->common.channels.next, 1045 struct dma_chan, 1046 device_node); 1047 if (mv_xor_alloc_chan_resources(dma_chan, NULL) < 1) { 1048 err = -ENODEV; 1049 goto out; 1050 } 1051 1052 /* test xor */ 1053 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, 1054 DMA_FROM_DEVICE); 1055 1056 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++) 1057 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 1058 0, PAGE_SIZE, DMA_TO_DEVICE); 1059 1060 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 1061 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0); 1062 1063 cookie = mv_xor_tx_submit(tx); 1064 mv_xor_issue_pending(dma_chan); 1065 async_tx_ack(tx); 1066 msleep(8); 1067 1068 if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) != 1069 DMA_SUCCESS) { 1070 dev_printk(KERN_ERR, dma_chan->device->dev, 1071 "Self-test xor timed out, disabling\n"); 1072 err = -ENODEV; 1073 goto free_resources; 1074 } 1075 1076 mv_chan = to_mv_xor_chan(dma_chan); 1077 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma, 1078 PAGE_SIZE, DMA_FROM_DEVICE); 1079 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { 1080 u32 *ptr = page_address(dest); 1081 if (ptr[i] != cmp_word) { 1082 dev_printk(KERN_ERR, dma_chan->device->dev, 1083 "Self-test xor failed compare, disabling." 1084 " index %d, data %x, expected %x\n", i, 1085 ptr[i], cmp_word); 1086 err = -ENODEV; 1087 goto free_resources; 1088 } 1089 } 1090 1091 free_resources: 1092 mv_xor_free_chan_resources(dma_chan); 1093 out: 1094 src_idx = MV_XOR_NUM_SRC_TEST; 1095 while (src_idx--) 1096 __free_page(xor_srcs[src_idx]); 1097 __free_page(dest); 1098 return err; 1099 } 1100 1101 static int __devexit mv_xor_remove(struct platform_device *dev) 1102 { 1103 struct mv_xor_device *device = platform_get_drvdata(dev); 1104 struct dma_chan *chan, *_chan; 1105 struct mv_xor_chan *mv_chan; 1106 struct mv_xor_platform_data *plat_data = dev->dev.platform_data; 1107 1108 dma_async_device_unregister(&device->common); 1109 1110 dma_free_coherent(&dev->dev, plat_data->pool_size, 1111 device->dma_desc_pool_virt, device->dma_desc_pool); 1112 1113 list_for_each_entry_safe(chan, _chan, &device->common.channels, 1114 device_node) { 1115 mv_chan = to_mv_xor_chan(chan); 1116 list_del(&chan->device_node); 1117 } 1118 1119 return 0; 1120 } 1121 1122 static int __devinit mv_xor_probe(struct platform_device *pdev) 1123 { 1124 int ret = 0; 1125 int irq; 1126 struct mv_xor_device *adev; 1127 struct mv_xor_chan *mv_chan; 1128 struct dma_device *dma_dev; 1129 struct mv_xor_platform_data *plat_data = pdev->dev.platform_data; 1130 1131 1132 adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL); 1133 if (!adev) 1134 return -ENOMEM; 1135 1136 dma_dev = &adev->common; 1137 1138 /* allocate coherent memory for hardware descriptors 1139 * note: writecombine gives slightly better performance, but 1140 * requires that we explicitly flush the writes 1141 */ 1142 adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev, 1143 plat_data->pool_size, 1144 &adev->dma_desc_pool, 1145 GFP_KERNEL); 1146 if (!adev->dma_desc_pool_virt) 1147 return -ENOMEM; 1148 1149 adev->id = plat_data->hw_id; 1150 1151 /* discover transaction capabilites from the platform data */ 1152 dma_dev->cap_mask = plat_data->cap_mask; 1153 adev->pdev = pdev; 1154 platform_set_drvdata(pdev, adev); 1155 1156 adev->shared = platform_get_drvdata(plat_data->shared); 1157 1158 INIT_LIST_HEAD(&dma_dev->channels); 1159 1160 /* set base routines */ 1161 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources; 1162 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; 1163 dma_dev->device_is_tx_complete = mv_xor_is_complete; 1164 dma_dev->device_issue_pending = mv_xor_issue_pending; 1165 dma_dev->dev = &pdev->dev; 1166 1167 /* set prep routines based on capability */ 1168 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) 1169 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; 1170 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) 1171 dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset; 1172 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1173 dma_dev->max_xor = 8; ; 1174 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; 1175 } 1176 1177 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); 1178 if (!mv_chan) { 1179 ret = -ENOMEM; 1180 goto err_free_dma; 1181 } 1182 mv_chan->device = adev; 1183 mv_chan->idx = plat_data->hw_id; 1184 mv_chan->mmr_base = adev->shared->xor_base; 1185 1186 if (!mv_chan->mmr_base) { 1187 ret = -ENOMEM; 1188 goto err_free_dma; 1189 } 1190 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) 1191 mv_chan); 1192 1193 /* clear errors before enabling interrupts */ 1194 mv_xor_device_clear_err_status(mv_chan); 1195 1196 irq = platform_get_irq(pdev, 0); 1197 if (irq < 0) { 1198 ret = irq; 1199 goto err_free_dma; 1200 } 1201 ret = devm_request_irq(&pdev->dev, irq, 1202 mv_xor_interrupt_handler, 1203 0, dev_name(&pdev->dev), mv_chan); 1204 if (ret) 1205 goto err_free_dma; 1206 1207 mv_chan_unmask_interrupts(mv_chan); 1208 1209 mv_set_mode(mv_chan, DMA_MEMCPY); 1210 1211 spin_lock_init(&mv_chan->lock); 1212 INIT_LIST_HEAD(&mv_chan->chain); 1213 INIT_LIST_HEAD(&mv_chan->completed_slots); 1214 INIT_LIST_HEAD(&mv_chan->all_slots); 1215 INIT_RCU_HEAD(&mv_chan->common.rcu); 1216 mv_chan->common.device = dma_dev; 1217 1218 list_add_tail(&mv_chan->common.device_node, &dma_dev->channels); 1219 1220 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { 1221 ret = mv_xor_memcpy_self_test(adev); 1222 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); 1223 if (ret) 1224 goto err_free_dma; 1225 } 1226 1227 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1228 ret = mv_xor_xor_self_test(adev); 1229 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); 1230 if (ret) 1231 goto err_free_dma; 1232 } 1233 1234 dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: " 1235 "( %s%s%s%s)\n", 1236 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", 1237 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", 1238 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", 1239 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); 1240 1241 dma_async_device_register(dma_dev); 1242 goto out; 1243 1244 err_free_dma: 1245 dma_free_coherent(&adev->pdev->dev, plat_data->pool_size, 1246 adev->dma_desc_pool_virt, adev->dma_desc_pool); 1247 out: 1248 return ret; 1249 } 1250 1251 static void 1252 mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp, 1253 struct mbus_dram_target_info *dram) 1254 { 1255 void __iomem *base = msp->xor_base; 1256 u32 win_enable = 0; 1257 int i; 1258 1259 for (i = 0; i < 8; i++) { 1260 writel(0, base + WINDOW_BASE(i)); 1261 writel(0, base + WINDOW_SIZE(i)); 1262 if (i < 4) 1263 writel(0, base + WINDOW_REMAP_HIGH(i)); 1264 } 1265 1266 for (i = 0; i < dram->num_cs; i++) { 1267 struct mbus_dram_window *cs = dram->cs + i; 1268 1269 writel((cs->base & 0xffff0000) | 1270 (cs->mbus_attr << 8) | 1271 dram->mbus_dram_target_id, base + WINDOW_BASE(i)); 1272 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); 1273 1274 win_enable |= (1 << i); 1275 win_enable |= 3 << (16 + (2 * i)); 1276 } 1277 1278 writel(win_enable, base + WINDOW_BAR_ENABLE(0)); 1279 writel(win_enable, base + WINDOW_BAR_ENABLE(1)); 1280 } 1281 1282 static struct platform_driver mv_xor_driver = { 1283 .probe = mv_xor_probe, 1284 .remove = mv_xor_remove, 1285 .driver = { 1286 .owner = THIS_MODULE, 1287 .name = MV_XOR_NAME, 1288 }, 1289 }; 1290 1291 static int mv_xor_shared_probe(struct platform_device *pdev) 1292 { 1293 struct mv_xor_platform_shared_data *msd = pdev->dev.platform_data; 1294 struct mv_xor_shared_private *msp; 1295 struct resource *res; 1296 1297 dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n"); 1298 1299 msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL); 1300 if (!msp) 1301 return -ENOMEM; 1302 1303 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1304 if (!res) 1305 return -ENODEV; 1306 1307 msp->xor_base = devm_ioremap(&pdev->dev, res->start, 1308 res->end - res->start + 1); 1309 if (!msp->xor_base) 1310 return -EBUSY; 1311 1312 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1313 if (!res) 1314 return -ENODEV; 1315 1316 msp->xor_high_base = devm_ioremap(&pdev->dev, res->start, 1317 res->end - res->start + 1); 1318 if (!msp->xor_high_base) 1319 return -EBUSY; 1320 1321 platform_set_drvdata(pdev, msp); 1322 1323 /* 1324 * (Re-)program MBUS remapping windows if we are asked to. 1325 */ 1326 if (msd != NULL && msd->dram != NULL) 1327 mv_xor_conf_mbus_windows(msp, msd->dram); 1328 1329 return 0; 1330 } 1331 1332 static int mv_xor_shared_remove(struct platform_device *pdev) 1333 { 1334 return 0; 1335 } 1336 1337 static struct platform_driver mv_xor_shared_driver = { 1338 .probe = mv_xor_shared_probe, 1339 .remove = mv_xor_shared_remove, 1340 .driver = { 1341 .owner = THIS_MODULE, 1342 .name = MV_XOR_SHARED_NAME, 1343 }, 1344 }; 1345 1346 1347 static int __init mv_xor_init(void) 1348 { 1349 int rc; 1350 1351 rc = platform_driver_register(&mv_xor_shared_driver); 1352 if (!rc) { 1353 rc = platform_driver_register(&mv_xor_driver); 1354 if (rc) 1355 platform_driver_unregister(&mv_xor_shared_driver); 1356 } 1357 return rc; 1358 } 1359 module_init(mv_xor_init); 1360 1361 /* it's currently unsafe to unload this module */ 1362 #if 0 1363 static void __exit mv_xor_exit(void) 1364 { 1365 platform_driver_unregister(&mv_xor_driver); 1366 platform_driver_unregister(&mv_xor_shared_driver); 1367 return; 1368 } 1369 1370 module_exit(mv_xor_exit); 1371 #endif 1372 1373 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>"); 1374 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine"); 1375 MODULE_LICENSE("GPL"); 1376