1 /* 2 * offload engine driver for the Marvell XOR engine 3 * Copyright (C) 2007, 2008, Marvell International Ltd. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 */ 14 15 #include <linux/init.h> 16 #include <linux/module.h> 17 #include <linux/slab.h> 18 #include <linux/delay.h> 19 #include <linux/dma-mapping.h> 20 #include <linux/spinlock.h> 21 #include <linux/interrupt.h> 22 #include <linux/platform_device.h> 23 #include <linux/memory.h> 24 #include <linux/clk.h> 25 #include <linux/of.h> 26 #include <linux/of_irq.h> 27 #include <linux/irqdomain.h> 28 #include <linux/platform_data/dma-mv_xor.h> 29 30 #include "dmaengine.h" 31 #include "mv_xor.h" 32 33 static void mv_xor_issue_pending(struct dma_chan *chan); 34 35 #define to_mv_xor_chan(chan) \ 36 container_of(chan, struct mv_xor_chan, dmachan) 37 38 #define to_mv_xor_slot(tx) \ 39 container_of(tx, struct mv_xor_desc_slot, async_tx) 40 41 #define mv_chan_to_devp(chan) \ 42 ((chan)->dmadev.dev) 43 44 static void mv_desc_init(struct mv_xor_desc_slot *desc, 45 dma_addr_t addr, u32 byte_count, 46 enum dma_ctrl_flags flags) 47 { 48 struct mv_xor_desc *hw_desc = desc->hw_desc; 49 50 hw_desc->status = XOR_DESC_DMA_OWNED; 51 hw_desc->phy_next_desc = 0; 52 /* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */ 53 hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ? 54 XOR_DESC_EOD_INT_EN : 0; 55 hw_desc->phy_dest_addr = addr; 56 hw_desc->byte_count = byte_count; 57 } 58 59 static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc, 60 u32 next_desc_addr) 61 { 62 struct mv_xor_desc *hw_desc = desc->hw_desc; 63 BUG_ON(hw_desc->phy_next_desc); 64 hw_desc->phy_next_desc = next_desc_addr; 65 } 66 67 static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc) 68 { 69 struct mv_xor_desc *hw_desc = desc->hw_desc; 70 hw_desc->phy_next_desc = 0; 71 } 72 73 static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, 74 int index, dma_addr_t addr) 75 { 76 struct mv_xor_desc *hw_desc = desc->hw_desc; 77 hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr; 78 if (desc->type == DMA_XOR) 79 hw_desc->desc_command |= (1 << index); 80 } 81 82 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan) 83 { 84 return readl_relaxed(XOR_CURR_DESC(chan)); 85 } 86 87 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, 88 u32 next_desc_addr) 89 { 90 writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan)); 91 } 92 93 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) 94 { 95 u32 val = readl_relaxed(XOR_INTR_MASK(chan)); 96 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16); 97 writel_relaxed(val, XOR_INTR_MASK(chan)); 98 } 99 100 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) 101 { 102 u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan)); 103 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF; 104 return intr_cause; 105 } 106 107 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) 108 { 109 u32 val; 110 111 val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED; 112 val = ~(val << (chan->idx * 16)); 113 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val); 114 writel_relaxed(val, XOR_INTR_CAUSE(chan)); 115 } 116 117 static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan) 118 { 119 u32 val = 0xFFFF0000 >> (chan->idx * 16); 120 writel_relaxed(val, XOR_INTR_CAUSE(chan)); 121 } 122 123 static void mv_set_mode(struct mv_xor_chan *chan, 124 enum dma_transaction_type type) 125 { 126 u32 op_mode; 127 u32 config = readl_relaxed(XOR_CONFIG(chan)); 128 129 switch (type) { 130 case DMA_XOR: 131 op_mode = XOR_OPERATION_MODE_XOR; 132 break; 133 case DMA_MEMCPY: 134 op_mode = XOR_OPERATION_MODE_MEMCPY; 135 break; 136 default: 137 dev_err(mv_chan_to_devp(chan), 138 "error: unsupported operation %d\n", 139 type); 140 BUG(); 141 return; 142 } 143 144 config &= ~0x7; 145 config |= op_mode; 146 147 #if defined(__BIG_ENDIAN) 148 config |= XOR_DESCRIPTOR_SWAP; 149 #else 150 config &= ~XOR_DESCRIPTOR_SWAP; 151 #endif 152 153 writel_relaxed(config, XOR_CONFIG(chan)); 154 chan->current_type = type; 155 } 156 157 static void mv_chan_activate(struct mv_xor_chan *chan) 158 { 159 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n"); 160 161 /* writel ensures all descriptors are flushed before activation */ 162 writel(BIT(0), XOR_ACTIVATION(chan)); 163 } 164 165 static char mv_chan_is_busy(struct mv_xor_chan *chan) 166 { 167 u32 state = readl_relaxed(XOR_ACTIVATION(chan)); 168 169 state = (state >> 4) & 0x3; 170 171 return (state == 1) ? 1 : 0; 172 } 173 174 /** 175 * mv_xor_free_slots - flags descriptor slots for reuse 176 * @slot: Slot to free 177 * Caller must hold &mv_chan->lock while calling this function 178 */ 179 static void mv_xor_free_slots(struct mv_xor_chan *mv_chan, 180 struct mv_xor_desc_slot *slot) 181 { 182 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n", 183 __func__, __LINE__, slot); 184 185 slot->slot_used = 0; 186 187 } 188 189 /* 190 * mv_xor_start_new_chain - program the engine to operate on new chain headed by 191 * sw_desc 192 * Caller must hold &mv_chan->lock while calling this function 193 */ 194 static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, 195 struct mv_xor_desc_slot *sw_desc) 196 { 197 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n", 198 __func__, __LINE__, sw_desc); 199 200 /* set the hardware chain */ 201 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); 202 203 mv_chan->pending++; 204 mv_xor_issue_pending(&mv_chan->dmachan); 205 } 206 207 static dma_cookie_t 208 mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, 209 struct mv_xor_chan *mv_chan, dma_cookie_t cookie) 210 { 211 BUG_ON(desc->async_tx.cookie < 0); 212 213 if (desc->async_tx.cookie > 0) { 214 cookie = desc->async_tx.cookie; 215 216 /* call the callback (must not sleep or submit new 217 * operations to this channel) 218 */ 219 if (desc->async_tx.callback) 220 desc->async_tx.callback( 221 desc->async_tx.callback_param); 222 223 dma_descriptor_unmap(&desc->async_tx); 224 } 225 226 /* run dependent operations */ 227 dma_run_dependencies(&desc->async_tx); 228 229 return cookie; 230 } 231 232 static int 233 mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan) 234 { 235 struct mv_xor_desc_slot *iter, *_iter; 236 237 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); 238 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, 239 completed_node) { 240 241 if (async_tx_test_ack(&iter->async_tx)) { 242 list_del(&iter->completed_node); 243 mv_xor_free_slots(mv_chan, iter); 244 } 245 } 246 return 0; 247 } 248 249 static int 250 mv_xor_clean_slot(struct mv_xor_desc_slot *desc, 251 struct mv_xor_chan *mv_chan) 252 { 253 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n", 254 __func__, __LINE__, desc, desc->async_tx.flags); 255 list_del(&desc->chain_node); 256 /* the client is allowed to attach dependent operations 257 * until 'ack' is set 258 */ 259 if (!async_tx_test_ack(&desc->async_tx)) { 260 /* move this slot to the completed_slots */ 261 list_add_tail(&desc->completed_node, &mv_chan->completed_slots); 262 return 0; 263 } 264 265 mv_xor_free_slots(mv_chan, desc); 266 return 0; 267 } 268 269 /* This function must be called with the mv_xor_chan spinlock held */ 270 static void mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) 271 { 272 struct mv_xor_desc_slot *iter, *_iter; 273 dma_cookie_t cookie = 0; 274 int busy = mv_chan_is_busy(mv_chan); 275 u32 current_desc = mv_chan_get_current_desc(mv_chan); 276 int seen_current = 0; 277 278 dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); 279 dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc); 280 mv_xor_clean_completed_slots(mv_chan); 281 282 /* free completed slots from the chain starting with 283 * the oldest descriptor 284 */ 285 286 list_for_each_entry_safe(iter, _iter, &mv_chan->chain, 287 chain_node) { 288 prefetch(_iter); 289 prefetch(&_iter->async_tx); 290 291 /* do not advance past the current descriptor loaded into the 292 * hardware channel, subsequent descriptors are either in 293 * process or have not been submitted 294 */ 295 if (seen_current) 296 break; 297 298 /* stop the search if we reach the current descriptor and the 299 * channel is busy 300 */ 301 if (iter->async_tx.phys == current_desc) { 302 seen_current = 1; 303 if (busy) 304 break; 305 } 306 307 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie); 308 309 if (mv_xor_clean_slot(iter, mv_chan)) 310 break; 311 } 312 313 if ((busy == 0) && !list_empty(&mv_chan->chain)) { 314 struct mv_xor_desc_slot *chain_head; 315 chain_head = list_entry(mv_chan->chain.next, 316 struct mv_xor_desc_slot, 317 chain_node); 318 319 mv_xor_start_new_chain(mv_chan, chain_head); 320 } 321 322 if (cookie > 0) 323 mv_chan->dmachan.completed_cookie = cookie; 324 } 325 326 static void mv_xor_tasklet(unsigned long data) 327 { 328 struct mv_xor_chan *chan = (struct mv_xor_chan *) data; 329 330 spin_lock_bh(&chan->lock); 331 mv_xor_slot_cleanup(chan); 332 spin_unlock_bh(&chan->lock); 333 } 334 335 static struct mv_xor_desc_slot * 336 mv_xor_alloc_slot(struct mv_xor_chan *mv_chan) 337 { 338 struct mv_xor_desc_slot *iter, *_iter; 339 int retry = 0; 340 341 /* start search from the last allocated descrtiptor 342 * if a contiguous allocation can not be found start searching 343 * from the beginning of the list 344 */ 345 retry: 346 if (retry == 0) 347 iter = mv_chan->last_used; 348 else 349 iter = list_entry(&mv_chan->all_slots, 350 struct mv_xor_desc_slot, 351 slot_node); 352 353 list_for_each_entry_safe_continue( 354 iter, _iter, &mv_chan->all_slots, slot_node) { 355 356 prefetch(_iter); 357 prefetch(&_iter->async_tx); 358 if (iter->slot_used) { 359 /* give up after finding the first busy slot 360 * on the second pass through the list 361 */ 362 if (retry) 363 break; 364 continue; 365 } 366 367 /* pre-ack descriptor */ 368 async_tx_ack(&iter->async_tx); 369 370 iter->slot_used = 1; 371 INIT_LIST_HEAD(&iter->chain_node); 372 iter->async_tx.cookie = -EBUSY; 373 mv_chan->last_used = iter; 374 mv_desc_clear_next_desc(iter); 375 376 return iter; 377 378 } 379 if (!retry++) 380 goto retry; 381 382 /* try to free some slots if the allocation fails */ 383 tasklet_schedule(&mv_chan->irq_tasklet); 384 385 return NULL; 386 } 387 388 /************************ DMA engine API functions ****************************/ 389 static dma_cookie_t 390 mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) 391 { 392 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx); 393 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan); 394 struct mv_xor_desc_slot *old_chain_tail; 395 dma_cookie_t cookie; 396 int new_hw_chain = 1; 397 398 dev_dbg(mv_chan_to_devp(mv_chan), 399 "%s sw_desc %p: async_tx %p\n", 400 __func__, sw_desc, &sw_desc->async_tx); 401 402 spin_lock_bh(&mv_chan->lock); 403 cookie = dma_cookie_assign(tx); 404 405 if (list_empty(&mv_chan->chain)) 406 list_add_tail(&sw_desc->chain_node, &mv_chan->chain); 407 else { 408 new_hw_chain = 0; 409 410 old_chain_tail = list_entry(mv_chan->chain.prev, 411 struct mv_xor_desc_slot, 412 chain_node); 413 list_add_tail(&sw_desc->chain_node, &mv_chan->chain); 414 415 dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n", 416 &old_chain_tail->async_tx.phys); 417 418 /* fix up the hardware chain */ 419 mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys); 420 421 /* if the channel is not busy */ 422 if (!mv_chan_is_busy(mv_chan)) { 423 u32 current_desc = mv_chan_get_current_desc(mv_chan); 424 /* 425 * and the curren desc is the end of the chain before 426 * the append, then we need to start the channel 427 */ 428 if (current_desc == old_chain_tail->async_tx.phys) 429 new_hw_chain = 1; 430 } 431 } 432 433 if (new_hw_chain) 434 mv_xor_start_new_chain(mv_chan, sw_desc); 435 436 spin_unlock_bh(&mv_chan->lock); 437 438 return cookie; 439 } 440 441 /* returns the number of allocated descriptors */ 442 static int mv_xor_alloc_chan_resources(struct dma_chan *chan) 443 { 444 void *virt_desc; 445 dma_addr_t dma_desc; 446 int idx; 447 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 448 struct mv_xor_desc_slot *slot = NULL; 449 int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE; 450 451 /* Allocate descriptor slots */ 452 idx = mv_chan->slots_allocated; 453 while (idx < num_descs_in_pool) { 454 slot = kzalloc(sizeof(*slot), GFP_KERNEL); 455 if (!slot) { 456 dev_info(mv_chan_to_devp(mv_chan), 457 "channel only initialized %d descriptor slots", 458 idx); 459 break; 460 } 461 virt_desc = mv_chan->dma_desc_pool_virt; 462 slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE; 463 464 dma_async_tx_descriptor_init(&slot->async_tx, chan); 465 slot->async_tx.tx_submit = mv_xor_tx_submit; 466 INIT_LIST_HEAD(&slot->chain_node); 467 INIT_LIST_HEAD(&slot->slot_node); 468 dma_desc = mv_chan->dma_desc_pool; 469 slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE; 470 slot->idx = idx++; 471 472 spin_lock_bh(&mv_chan->lock); 473 mv_chan->slots_allocated = idx; 474 list_add_tail(&slot->slot_node, &mv_chan->all_slots); 475 spin_unlock_bh(&mv_chan->lock); 476 } 477 478 if (mv_chan->slots_allocated && !mv_chan->last_used) 479 mv_chan->last_used = list_entry(mv_chan->all_slots.next, 480 struct mv_xor_desc_slot, 481 slot_node); 482 483 dev_dbg(mv_chan_to_devp(mv_chan), 484 "allocated %d descriptor slots last_used: %p\n", 485 mv_chan->slots_allocated, mv_chan->last_used); 486 487 return mv_chan->slots_allocated ? : -ENOMEM; 488 } 489 490 static struct dma_async_tx_descriptor * 491 mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, 492 unsigned int src_cnt, size_t len, unsigned long flags) 493 { 494 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 495 struct mv_xor_desc_slot *sw_desc; 496 497 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) 498 return NULL; 499 500 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); 501 502 dev_dbg(mv_chan_to_devp(mv_chan), 503 "%s src_cnt: %d len: %u dest %pad flags: %ld\n", 504 __func__, src_cnt, len, &dest, flags); 505 506 spin_lock_bh(&mv_chan->lock); 507 sw_desc = mv_xor_alloc_slot(mv_chan); 508 if (sw_desc) { 509 sw_desc->type = DMA_XOR; 510 sw_desc->async_tx.flags = flags; 511 mv_desc_init(sw_desc, dest, len, flags); 512 while (src_cnt--) 513 mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]); 514 } 515 spin_unlock_bh(&mv_chan->lock); 516 dev_dbg(mv_chan_to_devp(mv_chan), 517 "%s sw_desc %p async_tx %p \n", 518 __func__, sw_desc, &sw_desc->async_tx); 519 return sw_desc ? &sw_desc->async_tx : NULL; 520 } 521 522 static struct dma_async_tx_descriptor * 523 mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 524 size_t len, unsigned long flags) 525 { 526 /* 527 * A MEMCPY operation is identical to an XOR operation with only 528 * a single source address. 529 */ 530 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); 531 } 532 533 static struct dma_async_tx_descriptor * 534 mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) 535 { 536 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 537 dma_addr_t src, dest; 538 size_t len; 539 540 src = mv_chan->dummy_src_addr; 541 dest = mv_chan->dummy_dst_addr; 542 len = MV_XOR_MIN_BYTE_COUNT; 543 544 /* 545 * We implement the DMA_INTERRUPT operation as a minimum sized 546 * XOR operation with a single dummy source address. 547 */ 548 return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); 549 } 550 551 static void mv_xor_free_chan_resources(struct dma_chan *chan) 552 { 553 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 554 struct mv_xor_desc_slot *iter, *_iter; 555 int in_use_descs = 0; 556 557 spin_lock_bh(&mv_chan->lock); 558 559 mv_xor_slot_cleanup(mv_chan); 560 561 list_for_each_entry_safe(iter, _iter, &mv_chan->chain, 562 chain_node) { 563 in_use_descs++; 564 list_del(&iter->chain_node); 565 } 566 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, 567 completed_node) { 568 in_use_descs++; 569 list_del(&iter->completed_node); 570 } 571 list_for_each_entry_safe_reverse( 572 iter, _iter, &mv_chan->all_slots, slot_node) { 573 list_del(&iter->slot_node); 574 kfree(iter); 575 mv_chan->slots_allocated--; 576 } 577 mv_chan->last_used = NULL; 578 579 dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n", 580 __func__, mv_chan->slots_allocated); 581 spin_unlock_bh(&mv_chan->lock); 582 583 if (in_use_descs) 584 dev_err(mv_chan_to_devp(mv_chan), 585 "freeing %d in use descriptors!\n", in_use_descs); 586 } 587 588 /** 589 * mv_xor_status - poll the status of an XOR transaction 590 * @chan: XOR channel handle 591 * @cookie: XOR transaction identifier 592 * @txstate: XOR transactions state holder (or NULL) 593 */ 594 static enum dma_status mv_xor_status(struct dma_chan *chan, 595 dma_cookie_t cookie, 596 struct dma_tx_state *txstate) 597 { 598 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 599 enum dma_status ret; 600 601 ret = dma_cookie_status(chan, cookie, txstate); 602 if (ret == DMA_COMPLETE) 603 return ret; 604 605 spin_lock_bh(&mv_chan->lock); 606 mv_xor_slot_cleanup(mv_chan); 607 spin_unlock_bh(&mv_chan->lock); 608 609 return dma_cookie_status(chan, cookie, txstate); 610 } 611 612 static void mv_dump_xor_regs(struct mv_xor_chan *chan) 613 { 614 u32 val; 615 616 val = readl_relaxed(XOR_CONFIG(chan)); 617 dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val); 618 619 val = readl_relaxed(XOR_ACTIVATION(chan)); 620 dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val); 621 622 val = readl_relaxed(XOR_INTR_CAUSE(chan)); 623 dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val); 624 625 val = readl_relaxed(XOR_INTR_MASK(chan)); 626 dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val); 627 628 val = readl_relaxed(XOR_ERROR_CAUSE(chan)); 629 dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val); 630 631 val = readl_relaxed(XOR_ERROR_ADDR(chan)); 632 dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val); 633 } 634 635 static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, 636 u32 intr_cause) 637 { 638 if (intr_cause & XOR_INT_ERR_DECODE) { 639 dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n"); 640 return; 641 } 642 643 dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n", 644 chan->idx, intr_cause); 645 646 mv_dump_xor_regs(chan); 647 WARN_ON(1); 648 } 649 650 static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) 651 { 652 struct mv_xor_chan *chan = data; 653 u32 intr_cause = mv_chan_get_intr_cause(chan); 654 655 dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause); 656 657 if (intr_cause & XOR_INTR_ERRORS) 658 mv_xor_err_interrupt_handler(chan, intr_cause); 659 660 tasklet_schedule(&chan->irq_tasklet); 661 662 mv_xor_device_clear_eoc_cause(chan); 663 664 return IRQ_HANDLED; 665 } 666 667 static void mv_xor_issue_pending(struct dma_chan *chan) 668 { 669 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 670 671 if (mv_chan->pending >= MV_XOR_THRESHOLD) { 672 mv_chan->pending = 0; 673 mv_chan_activate(mv_chan); 674 } 675 } 676 677 /* 678 * Perform a transaction to verify the HW works. 679 */ 680 681 static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) 682 { 683 int i, ret; 684 void *src, *dest; 685 dma_addr_t src_dma, dest_dma; 686 struct dma_chan *dma_chan; 687 dma_cookie_t cookie; 688 struct dma_async_tx_descriptor *tx; 689 struct dmaengine_unmap_data *unmap; 690 int err = 0; 691 692 src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); 693 if (!src) 694 return -ENOMEM; 695 696 dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); 697 if (!dest) { 698 kfree(src); 699 return -ENOMEM; 700 } 701 702 /* Fill in src buffer */ 703 for (i = 0; i < PAGE_SIZE; i++) 704 ((u8 *) src)[i] = (u8)i; 705 706 dma_chan = &mv_chan->dmachan; 707 if (mv_xor_alloc_chan_resources(dma_chan) < 1) { 708 err = -ENODEV; 709 goto out; 710 } 711 712 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL); 713 if (!unmap) { 714 err = -ENOMEM; 715 goto free_resources; 716 } 717 718 src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0, 719 PAGE_SIZE, DMA_TO_DEVICE); 720 unmap->addr[0] = src_dma; 721 722 ret = dma_mapping_error(dma_chan->device->dev, src_dma); 723 if (ret) { 724 err = -ENOMEM; 725 goto free_resources; 726 } 727 unmap->to_cnt = 1; 728 729 dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0, 730 PAGE_SIZE, DMA_FROM_DEVICE); 731 unmap->addr[1] = dest_dma; 732 733 ret = dma_mapping_error(dma_chan->device->dev, dest_dma); 734 if (ret) { 735 err = -ENOMEM; 736 goto free_resources; 737 } 738 unmap->from_cnt = 1; 739 unmap->len = PAGE_SIZE; 740 741 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, 742 PAGE_SIZE, 0); 743 if (!tx) { 744 dev_err(dma_chan->device->dev, 745 "Self-test cannot prepare operation, disabling\n"); 746 err = -ENODEV; 747 goto free_resources; 748 } 749 750 cookie = mv_xor_tx_submit(tx); 751 if (dma_submit_error(cookie)) { 752 dev_err(dma_chan->device->dev, 753 "Self-test submit error, disabling\n"); 754 err = -ENODEV; 755 goto free_resources; 756 } 757 758 mv_xor_issue_pending(dma_chan); 759 async_tx_ack(tx); 760 msleep(1); 761 762 if (mv_xor_status(dma_chan, cookie, NULL) != 763 DMA_COMPLETE) { 764 dev_err(dma_chan->device->dev, 765 "Self-test copy timed out, disabling\n"); 766 err = -ENODEV; 767 goto free_resources; 768 } 769 770 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, 771 PAGE_SIZE, DMA_FROM_DEVICE); 772 if (memcmp(src, dest, PAGE_SIZE)) { 773 dev_err(dma_chan->device->dev, 774 "Self-test copy failed compare, disabling\n"); 775 err = -ENODEV; 776 goto free_resources; 777 } 778 779 free_resources: 780 dmaengine_unmap_put(unmap); 781 mv_xor_free_chan_resources(dma_chan); 782 out: 783 kfree(src); 784 kfree(dest); 785 return err; 786 } 787 788 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */ 789 static int 790 mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) 791 { 792 int i, src_idx, ret; 793 struct page *dest; 794 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST]; 795 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; 796 dma_addr_t dest_dma; 797 struct dma_async_tx_descriptor *tx; 798 struct dmaengine_unmap_data *unmap; 799 struct dma_chan *dma_chan; 800 dma_cookie_t cookie; 801 u8 cmp_byte = 0; 802 u32 cmp_word; 803 int err = 0; 804 int src_count = MV_XOR_NUM_SRC_TEST; 805 806 for (src_idx = 0; src_idx < src_count; src_idx++) { 807 xor_srcs[src_idx] = alloc_page(GFP_KERNEL); 808 if (!xor_srcs[src_idx]) { 809 while (src_idx--) 810 __free_page(xor_srcs[src_idx]); 811 return -ENOMEM; 812 } 813 } 814 815 dest = alloc_page(GFP_KERNEL); 816 if (!dest) { 817 while (src_idx--) 818 __free_page(xor_srcs[src_idx]); 819 return -ENOMEM; 820 } 821 822 /* Fill in src buffers */ 823 for (src_idx = 0; src_idx < src_count; src_idx++) { 824 u8 *ptr = page_address(xor_srcs[src_idx]); 825 for (i = 0; i < PAGE_SIZE; i++) 826 ptr[i] = (1 << src_idx); 827 } 828 829 for (src_idx = 0; src_idx < src_count; src_idx++) 830 cmp_byte ^= (u8) (1 << src_idx); 831 832 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | 833 (cmp_byte << 8) | cmp_byte; 834 835 memset(page_address(dest), 0, PAGE_SIZE); 836 837 dma_chan = &mv_chan->dmachan; 838 if (mv_xor_alloc_chan_resources(dma_chan) < 1) { 839 err = -ENODEV; 840 goto out; 841 } 842 843 unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1, 844 GFP_KERNEL); 845 if (!unmap) { 846 err = -ENOMEM; 847 goto free_resources; 848 } 849 850 /* test xor */ 851 for (i = 0; i < src_count; i++) { 852 unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 853 0, PAGE_SIZE, DMA_TO_DEVICE); 854 dma_srcs[i] = unmap->addr[i]; 855 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]); 856 if (ret) { 857 err = -ENOMEM; 858 goto free_resources; 859 } 860 unmap->to_cnt++; 861 } 862 863 unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, 864 DMA_FROM_DEVICE); 865 dest_dma = unmap->addr[src_count]; 866 ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]); 867 if (ret) { 868 err = -ENOMEM; 869 goto free_resources; 870 } 871 unmap->from_cnt = 1; 872 unmap->len = PAGE_SIZE; 873 874 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 875 src_count, PAGE_SIZE, 0); 876 if (!tx) { 877 dev_err(dma_chan->device->dev, 878 "Self-test cannot prepare operation, disabling\n"); 879 err = -ENODEV; 880 goto free_resources; 881 } 882 883 cookie = mv_xor_tx_submit(tx); 884 if (dma_submit_error(cookie)) { 885 dev_err(dma_chan->device->dev, 886 "Self-test submit error, disabling\n"); 887 err = -ENODEV; 888 goto free_resources; 889 } 890 891 mv_xor_issue_pending(dma_chan); 892 async_tx_ack(tx); 893 msleep(8); 894 895 if (mv_xor_status(dma_chan, cookie, NULL) != 896 DMA_COMPLETE) { 897 dev_err(dma_chan->device->dev, 898 "Self-test xor timed out, disabling\n"); 899 err = -ENODEV; 900 goto free_resources; 901 } 902 903 dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, 904 PAGE_SIZE, DMA_FROM_DEVICE); 905 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { 906 u32 *ptr = page_address(dest); 907 if (ptr[i] != cmp_word) { 908 dev_err(dma_chan->device->dev, 909 "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n", 910 i, ptr[i], cmp_word); 911 err = -ENODEV; 912 goto free_resources; 913 } 914 } 915 916 free_resources: 917 dmaengine_unmap_put(unmap); 918 mv_xor_free_chan_resources(dma_chan); 919 out: 920 src_idx = src_count; 921 while (src_idx--) 922 __free_page(xor_srcs[src_idx]); 923 __free_page(dest); 924 return err; 925 } 926 927 static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan) 928 { 929 struct dma_chan *chan, *_chan; 930 struct device *dev = mv_chan->dmadev.dev; 931 932 dma_async_device_unregister(&mv_chan->dmadev); 933 934 dma_free_coherent(dev, MV_XOR_POOL_SIZE, 935 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); 936 dma_unmap_single(dev, mv_chan->dummy_src_addr, 937 MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE); 938 dma_unmap_single(dev, mv_chan->dummy_dst_addr, 939 MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE); 940 941 list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels, 942 device_node) { 943 list_del(&chan->device_node); 944 } 945 946 free_irq(mv_chan->irq, mv_chan); 947 948 return 0; 949 } 950 951 static struct mv_xor_chan * 952 mv_xor_channel_add(struct mv_xor_device *xordev, 953 struct platform_device *pdev, 954 int idx, dma_cap_mask_t cap_mask, int irq) 955 { 956 int ret = 0; 957 struct mv_xor_chan *mv_chan; 958 struct dma_device *dma_dev; 959 960 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); 961 if (!mv_chan) 962 return ERR_PTR(-ENOMEM); 963 964 mv_chan->idx = idx; 965 mv_chan->irq = irq; 966 967 dma_dev = &mv_chan->dmadev; 968 969 /* 970 * These source and destination dummy buffers are used to implement 971 * a DMA_INTERRUPT operation as a minimum-sized XOR operation. 972 * Hence, we only need to map the buffers at initialization-time. 973 */ 974 mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev, 975 mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE); 976 mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev, 977 mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE); 978 979 /* allocate coherent memory for hardware descriptors 980 * note: writecombine gives slightly better performance, but 981 * requires that we explicitly flush the writes 982 */ 983 mv_chan->dma_desc_pool_virt = 984 dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE, 985 &mv_chan->dma_desc_pool, GFP_KERNEL); 986 if (!mv_chan->dma_desc_pool_virt) 987 return ERR_PTR(-ENOMEM); 988 989 /* discover transaction capabilites from the platform data */ 990 dma_dev->cap_mask = cap_mask; 991 992 INIT_LIST_HEAD(&dma_dev->channels); 993 994 /* set base routines */ 995 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources; 996 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; 997 dma_dev->device_tx_status = mv_xor_status; 998 dma_dev->device_issue_pending = mv_xor_issue_pending; 999 dma_dev->dev = &pdev->dev; 1000 1001 /* set prep routines based on capability */ 1002 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) 1003 dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt; 1004 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) 1005 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; 1006 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1007 dma_dev->max_xor = 8; 1008 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; 1009 } 1010 1011 mv_chan->mmr_base = xordev->xor_base; 1012 mv_chan->mmr_high_base = xordev->xor_high_base; 1013 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) 1014 mv_chan); 1015 1016 /* clear errors before enabling interrupts */ 1017 mv_xor_device_clear_err_status(mv_chan); 1018 1019 ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler, 1020 0, dev_name(&pdev->dev), mv_chan); 1021 if (ret) 1022 goto err_free_dma; 1023 1024 mv_chan_unmask_interrupts(mv_chan); 1025 1026 mv_set_mode(mv_chan, DMA_XOR); 1027 1028 spin_lock_init(&mv_chan->lock); 1029 INIT_LIST_HEAD(&mv_chan->chain); 1030 INIT_LIST_HEAD(&mv_chan->completed_slots); 1031 INIT_LIST_HEAD(&mv_chan->all_slots); 1032 mv_chan->dmachan.device = dma_dev; 1033 dma_cookie_init(&mv_chan->dmachan); 1034 1035 list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels); 1036 1037 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { 1038 ret = mv_xor_memcpy_self_test(mv_chan); 1039 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); 1040 if (ret) 1041 goto err_free_irq; 1042 } 1043 1044 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1045 ret = mv_xor_xor_self_test(mv_chan); 1046 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); 1047 if (ret) 1048 goto err_free_irq; 1049 } 1050 1051 dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s)\n", 1052 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", 1053 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", 1054 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); 1055 1056 dma_async_device_register(dma_dev); 1057 return mv_chan; 1058 1059 err_free_irq: 1060 free_irq(mv_chan->irq, mv_chan); 1061 err_free_dma: 1062 dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE, 1063 mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); 1064 return ERR_PTR(ret); 1065 } 1066 1067 static void 1068 mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, 1069 const struct mbus_dram_target_info *dram) 1070 { 1071 void __iomem *base = xordev->xor_high_base; 1072 u32 win_enable = 0; 1073 int i; 1074 1075 for (i = 0; i < 8; i++) { 1076 writel(0, base + WINDOW_BASE(i)); 1077 writel(0, base + WINDOW_SIZE(i)); 1078 if (i < 4) 1079 writel(0, base + WINDOW_REMAP_HIGH(i)); 1080 } 1081 1082 for (i = 0; i < dram->num_cs; i++) { 1083 const struct mbus_dram_window *cs = dram->cs + i; 1084 1085 writel((cs->base & 0xffff0000) | 1086 (cs->mbus_attr << 8) | 1087 dram->mbus_dram_target_id, base + WINDOW_BASE(i)); 1088 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); 1089 1090 win_enable |= (1 << i); 1091 win_enable |= 3 << (16 + (2 * i)); 1092 } 1093 1094 writel(win_enable, base + WINDOW_BAR_ENABLE(0)); 1095 writel(win_enable, base + WINDOW_BAR_ENABLE(1)); 1096 writel(0, base + WINDOW_OVERRIDE_CTRL(0)); 1097 writel(0, base + WINDOW_OVERRIDE_CTRL(1)); 1098 } 1099 1100 static int mv_xor_probe(struct platform_device *pdev) 1101 { 1102 const struct mbus_dram_target_info *dram; 1103 struct mv_xor_device *xordev; 1104 struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev); 1105 struct resource *res; 1106 int i, ret; 1107 1108 dev_notice(&pdev->dev, "Marvell shared XOR driver\n"); 1109 1110 xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL); 1111 if (!xordev) 1112 return -ENOMEM; 1113 1114 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1115 if (!res) 1116 return -ENODEV; 1117 1118 xordev->xor_base = devm_ioremap(&pdev->dev, res->start, 1119 resource_size(res)); 1120 if (!xordev->xor_base) 1121 return -EBUSY; 1122 1123 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1124 if (!res) 1125 return -ENODEV; 1126 1127 xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start, 1128 resource_size(res)); 1129 if (!xordev->xor_high_base) 1130 return -EBUSY; 1131 1132 platform_set_drvdata(pdev, xordev); 1133 1134 /* 1135 * (Re-)program MBUS remapping windows if we are asked to. 1136 */ 1137 dram = mv_mbus_dram_info(); 1138 if (dram) 1139 mv_xor_conf_mbus_windows(xordev, dram); 1140 1141 /* Not all platforms can gate the clock, so it is not 1142 * an error if the clock does not exists. 1143 */ 1144 xordev->clk = clk_get(&pdev->dev, NULL); 1145 if (!IS_ERR(xordev->clk)) 1146 clk_prepare_enable(xordev->clk); 1147 1148 if (pdev->dev.of_node) { 1149 struct device_node *np; 1150 int i = 0; 1151 1152 for_each_child_of_node(pdev->dev.of_node, np) { 1153 struct mv_xor_chan *chan; 1154 dma_cap_mask_t cap_mask; 1155 int irq; 1156 1157 dma_cap_zero(cap_mask); 1158 if (of_property_read_bool(np, "dmacap,memcpy")) 1159 dma_cap_set(DMA_MEMCPY, cap_mask); 1160 if (of_property_read_bool(np, "dmacap,xor")) 1161 dma_cap_set(DMA_XOR, cap_mask); 1162 if (of_property_read_bool(np, "dmacap,interrupt")) 1163 dma_cap_set(DMA_INTERRUPT, cap_mask); 1164 1165 irq = irq_of_parse_and_map(np, 0); 1166 if (!irq) { 1167 ret = -ENODEV; 1168 goto err_channel_add; 1169 } 1170 1171 chan = mv_xor_channel_add(xordev, pdev, i, 1172 cap_mask, irq); 1173 if (IS_ERR(chan)) { 1174 ret = PTR_ERR(chan); 1175 irq_dispose_mapping(irq); 1176 goto err_channel_add; 1177 } 1178 1179 xordev->channels[i] = chan; 1180 i++; 1181 } 1182 } else if (pdata && pdata->channels) { 1183 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { 1184 struct mv_xor_channel_data *cd; 1185 struct mv_xor_chan *chan; 1186 int irq; 1187 1188 cd = &pdata->channels[i]; 1189 if (!cd) { 1190 ret = -ENODEV; 1191 goto err_channel_add; 1192 } 1193 1194 irq = platform_get_irq(pdev, i); 1195 if (irq < 0) { 1196 ret = irq; 1197 goto err_channel_add; 1198 } 1199 1200 chan = mv_xor_channel_add(xordev, pdev, i, 1201 cd->cap_mask, irq); 1202 if (IS_ERR(chan)) { 1203 ret = PTR_ERR(chan); 1204 goto err_channel_add; 1205 } 1206 1207 xordev->channels[i] = chan; 1208 } 1209 } 1210 1211 return 0; 1212 1213 err_channel_add: 1214 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) 1215 if (xordev->channels[i]) { 1216 mv_xor_channel_remove(xordev->channels[i]); 1217 if (pdev->dev.of_node) 1218 irq_dispose_mapping(xordev->channels[i]->irq); 1219 } 1220 1221 if (!IS_ERR(xordev->clk)) { 1222 clk_disable_unprepare(xordev->clk); 1223 clk_put(xordev->clk); 1224 } 1225 1226 return ret; 1227 } 1228 1229 static int mv_xor_remove(struct platform_device *pdev) 1230 { 1231 struct mv_xor_device *xordev = platform_get_drvdata(pdev); 1232 int i; 1233 1234 for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { 1235 if (xordev->channels[i]) 1236 mv_xor_channel_remove(xordev->channels[i]); 1237 } 1238 1239 if (!IS_ERR(xordev->clk)) { 1240 clk_disable_unprepare(xordev->clk); 1241 clk_put(xordev->clk); 1242 } 1243 1244 return 0; 1245 } 1246 1247 #ifdef CONFIG_OF 1248 static const struct of_device_id mv_xor_dt_ids[] = { 1249 { .compatible = "marvell,orion-xor", }, 1250 {}, 1251 }; 1252 MODULE_DEVICE_TABLE(of, mv_xor_dt_ids); 1253 #endif 1254 1255 static struct platform_driver mv_xor_driver = { 1256 .probe = mv_xor_probe, 1257 .remove = mv_xor_remove, 1258 .driver = { 1259 .name = MV_XOR_NAME, 1260 .of_match_table = of_match_ptr(mv_xor_dt_ids), 1261 }, 1262 }; 1263 1264 1265 static int __init mv_xor_init(void) 1266 { 1267 return platform_driver_register(&mv_xor_driver); 1268 } 1269 module_init(mv_xor_init); 1270 1271 /* it's currently unsafe to unload this module */ 1272 #if 0 1273 static void __exit mv_xor_exit(void) 1274 { 1275 platform_driver_unregister(&mv_xor_driver); 1276 return; 1277 } 1278 1279 module_exit(mv_xor_exit); 1280 #endif 1281 1282 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>"); 1283 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine"); 1284 MODULE_LICENSE("GPL"); 1285