1 /* 2 * Renesas USB driver 3 * 4 * Copyright (C) 2011 Renesas Solutions Corp. 5 * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> 6 * 7 * This program is distributed in the hope that it will be useful, 8 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * GNU General Public License for more details. 11 * 12 * You should have received a copy of the GNU General Public License 13 * along with this program; if not, write to the Free Software 14 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 15 * 16 */ 17 #include <linux/delay.h> 18 #include <linux/io.h> 19 #include <linux/scatterlist.h> 20 #include "common.h" 21 #include "pipe.h" 22 23 #define usbhsf_get_cfifo(p) (&((p)->fifo_info.cfifo)) 24 #define usbhsf_get_d0fifo(p) (&((p)->fifo_info.d0fifo)) 25 #define usbhsf_get_d1fifo(p) (&((p)->fifo_info.d1fifo)) 26 #define usbhsf_is_cfifo(p, f) (usbhsf_get_cfifo(p) == f) 27 28 #define usbhsf_fifo_is_busy(f) ((f)->pipe) /* see usbhs_pipe_select_fifo */ 29 30 /* 31 * packet initialize 32 */ 33 void usbhs_pkt_init(struct usbhs_pkt *pkt) 34 { 35 INIT_LIST_HEAD(&pkt->node); 36 } 37 38 /* 39 * packet control function 40 */ 41 static int usbhsf_null_handle(struct usbhs_pkt *pkt, int *is_done) 42 { 43 struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe); 44 struct device *dev = usbhs_priv_to_dev(priv); 45 46 dev_err(dev, "null handler\n"); 47 48 return -EINVAL; 49 } 50 51 static struct usbhs_pkt_handle usbhsf_null_handler = { 52 .prepare = usbhsf_null_handle, 53 .try_run = usbhsf_null_handle, 54 }; 55 56 void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt, 57 void (*done)(struct usbhs_priv *priv, 58 struct usbhs_pkt *pkt), 59 void *buf, int len, int zero, int sequence) 60 { 61 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 62 struct device *dev = usbhs_priv_to_dev(priv); 63 unsigned long flags; 64 65 if (!done) { 66 dev_err(dev, "no done function\n"); 67 return; 68 } 69 70 /******************** spin lock ********************/ 71 usbhs_lock(priv, flags); 72 73 if (!pipe->handler) { 74 dev_err(dev, "no handler function\n"); 75 pipe->handler = &usbhsf_null_handler; 76 } 77 78 list_move_tail(&pkt->node, &pipe->list); 79 80 /* 81 * each pkt must hold own handler. 82 * because handler might be changed by its situation. 83 * dma handler -> pio handler. 84 */ 85 pkt->pipe = pipe; 86 pkt->buf = buf; 87 pkt->handler = pipe->handler; 88 pkt->length = len; 89 pkt->zero = zero; 90 pkt->actual = 0; 91 pkt->done = done; 92 pkt->sequence = sequence; 93 94 usbhs_unlock(priv, flags); 95 /******************** spin unlock ******************/ 96 } 97 98 static void __usbhsf_pkt_del(struct usbhs_pkt *pkt) 99 { 100 list_del_init(&pkt->node); 101 } 102 103 static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe) 104 { 105 if (list_empty(&pipe->list)) 106 return NULL; 107 108 return list_first_entry(&pipe->list, struct usbhs_pkt, node); 109 } 110 111 static void usbhsf_fifo_clear(struct usbhs_pipe *pipe, 112 struct usbhs_fifo *fifo); 113 static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe, 114 struct usbhs_fifo *fifo); 115 static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo, 116 struct usbhs_pkt *pkt); 117 #define usbhsf_dma_map(p) __usbhsf_dma_map_ctrl(p, 1) 118 #define usbhsf_dma_unmap(p) __usbhsf_dma_map_ctrl(p, 0) 119 static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map); 120 struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt) 121 { 122 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 123 struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe); 124 unsigned long flags; 125 126 /******************** spin lock ********************/ 127 usbhs_lock(priv, flags); 128 129 usbhs_pipe_disable(pipe); 130 131 if (!pkt) 132 pkt = __usbhsf_pkt_get(pipe); 133 134 if (pkt) { 135 struct dma_chan *chan = NULL; 136 137 if (fifo) 138 chan = usbhsf_dma_chan_get(fifo, pkt); 139 if (chan) { 140 dmaengine_terminate_all(chan); 141 usbhsf_fifo_clear(pipe, fifo); 142 usbhsf_dma_unmap(pkt); 143 } 144 145 __usbhsf_pkt_del(pkt); 146 } 147 148 if (fifo) 149 usbhsf_fifo_unselect(pipe, fifo); 150 151 usbhs_unlock(priv, flags); 152 /******************** spin unlock ******************/ 153 154 return pkt; 155 } 156 157 enum { 158 USBHSF_PKT_PREPARE, 159 USBHSF_PKT_TRY_RUN, 160 USBHSF_PKT_DMA_DONE, 161 }; 162 163 static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type) 164 { 165 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 166 struct usbhs_pkt *pkt; 167 struct device *dev = usbhs_priv_to_dev(priv); 168 int (*func)(struct usbhs_pkt *pkt, int *is_done); 169 unsigned long flags; 170 int ret = 0; 171 int is_done = 0; 172 173 /******************** spin lock ********************/ 174 usbhs_lock(priv, flags); 175 176 pkt = __usbhsf_pkt_get(pipe); 177 if (!pkt) 178 goto __usbhs_pkt_handler_end; 179 180 switch (type) { 181 case USBHSF_PKT_PREPARE: 182 func = pkt->handler->prepare; 183 break; 184 case USBHSF_PKT_TRY_RUN: 185 func = pkt->handler->try_run; 186 break; 187 case USBHSF_PKT_DMA_DONE: 188 func = pkt->handler->dma_done; 189 break; 190 default: 191 dev_err(dev, "unknown pkt handler\n"); 192 goto __usbhs_pkt_handler_end; 193 } 194 195 ret = func(pkt, &is_done); 196 197 if (is_done) 198 __usbhsf_pkt_del(pkt); 199 200 __usbhs_pkt_handler_end: 201 usbhs_unlock(priv, flags); 202 /******************** spin unlock ******************/ 203 204 if (is_done) { 205 pkt->done(priv, pkt); 206 usbhs_pkt_start(pipe); 207 } 208 209 return ret; 210 } 211 212 void usbhs_pkt_start(struct usbhs_pipe *pipe) 213 { 214 usbhsf_pkt_handler(pipe, USBHSF_PKT_PREPARE); 215 } 216 217 /* 218 * irq enable/disable function 219 */ 220 #define usbhsf_irq_empty_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_bempsts, e) 221 #define usbhsf_irq_ready_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_brdysts, e) 222 #define usbhsf_irq_callback_ctrl(pipe, status, enable) \ 223 ({ \ 224 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); \ 225 struct usbhs_mod *mod = usbhs_mod_get_current(priv); \ 226 u16 status = (1 << usbhs_pipe_number(pipe)); \ 227 if (!mod) \ 228 return; \ 229 if (enable) \ 230 mod->status |= status; \ 231 else \ 232 mod->status &= ~status; \ 233 usbhs_irq_callback_update(priv, mod); \ 234 }) 235 236 static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable) 237 { 238 /* 239 * And DCP pipe can NOT use "ready interrupt" for "send" 240 * it should use "empty" interrupt. 241 * see 242 * "Operation" - "Interrupt Function" - "BRDY Interrupt" 243 * 244 * on the other hand, normal pipe can use "ready interrupt" for "send" 245 * even though it is single/double buffer 246 */ 247 if (usbhs_pipe_is_dcp(pipe)) 248 usbhsf_irq_empty_ctrl(pipe, enable); 249 else 250 usbhsf_irq_ready_ctrl(pipe, enable); 251 } 252 253 static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable) 254 { 255 usbhsf_irq_ready_ctrl(pipe, enable); 256 } 257 258 /* 259 * FIFO ctrl 260 */ 261 static void usbhsf_send_terminator(struct usbhs_pipe *pipe, 262 struct usbhs_fifo *fifo) 263 { 264 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 265 266 usbhs_bset(priv, fifo->ctr, BVAL, BVAL); 267 } 268 269 static int usbhsf_fifo_barrier(struct usbhs_priv *priv, 270 struct usbhs_fifo *fifo) 271 { 272 int timeout = 1024; 273 274 do { 275 /* The FIFO port is accessible */ 276 if (usbhs_read(priv, fifo->ctr) & FRDY) 277 return 0; 278 279 udelay(10); 280 } while (timeout--); 281 282 return -EBUSY; 283 } 284 285 static void usbhsf_fifo_clear(struct usbhs_pipe *pipe, 286 struct usbhs_fifo *fifo) 287 { 288 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 289 290 if (!usbhs_pipe_is_dcp(pipe)) 291 usbhsf_fifo_barrier(priv, fifo); 292 293 usbhs_write(priv, fifo->ctr, BCLR); 294 } 295 296 static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv, 297 struct usbhs_fifo *fifo) 298 { 299 return usbhs_read(priv, fifo->ctr) & DTLN_MASK; 300 } 301 302 static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe, 303 struct usbhs_fifo *fifo) 304 { 305 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 306 307 usbhs_pipe_select_fifo(pipe, NULL); 308 usbhs_write(priv, fifo->sel, 0); 309 } 310 311 static int usbhsf_fifo_select(struct usbhs_pipe *pipe, 312 struct usbhs_fifo *fifo, 313 int write) 314 { 315 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 316 struct device *dev = usbhs_priv_to_dev(priv); 317 int timeout = 1024; 318 u16 mask = ((1 << 5) | 0xF); /* mask of ISEL | CURPIPE */ 319 u16 base = usbhs_pipe_number(pipe); /* CURPIPE */ 320 321 if (usbhs_pipe_is_busy(pipe) || 322 usbhsf_fifo_is_busy(fifo)) 323 return -EBUSY; 324 325 if (usbhs_pipe_is_dcp(pipe)) { 326 base |= (1 == write) << 5; /* ISEL */ 327 328 if (usbhs_mod_is_host(priv)) 329 usbhs_dcp_dir_for_host(pipe, write); 330 } 331 332 /* "base" will be used below */ 333 if (usbhs_get_dparam(priv, has_sudmac) && !usbhsf_is_cfifo(priv, fifo)) 334 usbhs_write(priv, fifo->sel, base); 335 else 336 usbhs_write(priv, fifo->sel, base | MBW_32); 337 338 /* check ISEL and CURPIPE value */ 339 while (timeout--) { 340 if (base == (mask & usbhs_read(priv, fifo->sel))) { 341 usbhs_pipe_select_fifo(pipe, fifo); 342 return 0; 343 } 344 udelay(10); 345 } 346 347 dev_err(dev, "fifo select error\n"); 348 349 return -EIO; 350 } 351 352 /* 353 * DCP status stage 354 */ 355 static int usbhs_dcp_dir_switch_to_write(struct usbhs_pkt *pkt, int *is_done) 356 { 357 struct usbhs_pipe *pipe = pkt->pipe; 358 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 359 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */ 360 struct device *dev = usbhs_priv_to_dev(priv); 361 int ret; 362 363 usbhs_pipe_disable(pipe); 364 365 ret = usbhsf_fifo_select(pipe, fifo, 1); 366 if (ret < 0) { 367 dev_err(dev, "%s() faile\n", __func__); 368 return ret; 369 } 370 371 usbhs_pipe_sequence_data1(pipe); /* DATA1 */ 372 373 usbhsf_fifo_clear(pipe, fifo); 374 usbhsf_send_terminator(pipe, fifo); 375 376 usbhsf_fifo_unselect(pipe, fifo); 377 378 usbhsf_tx_irq_ctrl(pipe, 1); 379 usbhs_pipe_enable(pipe); 380 381 return ret; 382 } 383 384 static int usbhs_dcp_dir_switch_to_read(struct usbhs_pkt *pkt, int *is_done) 385 { 386 struct usbhs_pipe *pipe = pkt->pipe; 387 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 388 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */ 389 struct device *dev = usbhs_priv_to_dev(priv); 390 int ret; 391 392 usbhs_pipe_disable(pipe); 393 394 ret = usbhsf_fifo_select(pipe, fifo, 0); 395 if (ret < 0) { 396 dev_err(dev, "%s() fail\n", __func__); 397 return ret; 398 } 399 400 usbhs_pipe_sequence_data1(pipe); /* DATA1 */ 401 usbhsf_fifo_clear(pipe, fifo); 402 403 usbhsf_fifo_unselect(pipe, fifo); 404 405 usbhsf_rx_irq_ctrl(pipe, 1); 406 usbhs_pipe_enable(pipe); 407 408 return ret; 409 410 } 411 412 static int usbhs_dcp_dir_switch_done(struct usbhs_pkt *pkt, int *is_done) 413 { 414 struct usbhs_pipe *pipe = pkt->pipe; 415 416 if (pkt->handler == &usbhs_dcp_status_stage_in_handler) 417 usbhsf_tx_irq_ctrl(pipe, 0); 418 else 419 usbhsf_rx_irq_ctrl(pipe, 0); 420 421 pkt->actual = pkt->length; 422 *is_done = 1; 423 424 return 0; 425 } 426 427 struct usbhs_pkt_handle usbhs_dcp_status_stage_in_handler = { 428 .prepare = usbhs_dcp_dir_switch_to_write, 429 .try_run = usbhs_dcp_dir_switch_done, 430 }; 431 432 struct usbhs_pkt_handle usbhs_dcp_status_stage_out_handler = { 433 .prepare = usbhs_dcp_dir_switch_to_read, 434 .try_run = usbhs_dcp_dir_switch_done, 435 }; 436 437 /* 438 * DCP data stage (push) 439 */ 440 static int usbhsf_dcp_data_stage_try_push(struct usbhs_pkt *pkt, int *is_done) 441 { 442 struct usbhs_pipe *pipe = pkt->pipe; 443 444 usbhs_pipe_sequence_data1(pipe); /* DATA1 */ 445 446 /* 447 * change handler to PIO push 448 */ 449 pkt->handler = &usbhs_fifo_pio_push_handler; 450 451 return pkt->handler->prepare(pkt, is_done); 452 } 453 454 struct usbhs_pkt_handle usbhs_dcp_data_stage_out_handler = { 455 .prepare = usbhsf_dcp_data_stage_try_push, 456 }; 457 458 /* 459 * DCP data stage (pop) 460 */ 461 static int usbhsf_dcp_data_stage_prepare_pop(struct usbhs_pkt *pkt, 462 int *is_done) 463 { 464 struct usbhs_pipe *pipe = pkt->pipe; 465 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 466 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); 467 468 if (usbhs_pipe_is_busy(pipe)) 469 return 0; 470 471 /* 472 * prepare pop for DCP should 473 * - change DCP direction, 474 * - clear fifo 475 * - DATA1 476 */ 477 usbhs_pipe_disable(pipe); 478 479 usbhs_pipe_sequence_data1(pipe); /* DATA1 */ 480 481 usbhsf_fifo_select(pipe, fifo, 0); 482 usbhsf_fifo_clear(pipe, fifo); 483 usbhsf_fifo_unselect(pipe, fifo); 484 485 /* 486 * change handler to PIO pop 487 */ 488 pkt->handler = &usbhs_fifo_pio_pop_handler; 489 490 return pkt->handler->prepare(pkt, is_done); 491 } 492 493 struct usbhs_pkt_handle usbhs_dcp_data_stage_in_handler = { 494 .prepare = usbhsf_dcp_data_stage_prepare_pop, 495 }; 496 497 /* 498 * PIO push handler 499 */ 500 static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done) 501 { 502 struct usbhs_pipe *pipe = pkt->pipe; 503 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 504 struct device *dev = usbhs_priv_to_dev(priv); 505 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */ 506 void __iomem *addr = priv->base + fifo->port; 507 u8 *buf; 508 int maxp = usbhs_pipe_get_maxpacket(pipe); 509 int total_len; 510 int i, ret, len; 511 int is_short; 512 513 usbhs_pipe_data_sequence(pipe, pkt->sequence); 514 pkt->sequence = -1; /* -1 sequence will be ignored */ 515 516 usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length); 517 518 ret = usbhsf_fifo_select(pipe, fifo, 1); 519 if (ret < 0) 520 return 0; 521 522 ret = usbhs_pipe_is_accessible(pipe); 523 if (ret < 0) { 524 /* inaccessible pipe is not an error */ 525 ret = 0; 526 goto usbhs_fifo_write_busy; 527 } 528 529 ret = usbhsf_fifo_barrier(priv, fifo); 530 if (ret < 0) 531 goto usbhs_fifo_write_busy; 532 533 buf = pkt->buf + pkt->actual; 534 len = pkt->length - pkt->actual; 535 len = min(len, maxp); 536 total_len = len; 537 is_short = total_len < maxp; 538 539 /* 540 * FIXME 541 * 542 * 32-bit access only 543 */ 544 if (len >= 4 && !((unsigned long)buf & 0x03)) { 545 iowrite32_rep(addr, buf, len / 4); 546 len %= 4; 547 buf += total_len - len; 548 } 549 550 /* the rest operation */ 551 for (i = 0; i < len; i++) 552 iowrite8(buf[i], addr + (0x03 - (i & 0x03))); 553 554 /* 555 * variable update 556 */ 557 pkt->actual += total_len; 558 559 if (pkt->actual < pkt->length) 560 *is_done = 0; /* there are remainder data */ 561 else if (is_short) 562 *is_done = 1; /* short packet */ 563 else 564 *is_done = !pkt->zero; /* send zero packet ? */ 565 566 /* 567 * pipe/irq handling 568 */ 569 if (is_short) 570 usbhsf_send_terminator(pipe, fifo); 571 572 usbhsf_tx_irq_ctrl(pipe, !*is_done); 573 usbhs_pipe_running(pipe, !*is_done); 574 usbhs_pipe_enable(pipe); 575 576 dev_dbg(dev, " send %d (%d/ %d/ %d/ %d)\n", 577 usbhs_pipe_number(pipe), 578 pkt->length, pkt->actual, *is_done, pkt->zero); 579 580 /* 581 * Transmission end 582 */ 583 if (*is_done) { 584 if (usbhs_pipe_is_dcp(pipe)) 585 usbhs_dcp_control_transfer_done(pipe); 586 } 587 588 usbhsf_fifo_unselect(pipe, fifo); 589 590 return 0; 591 592 usbhs_fifo_write_busy: 593 usbhsf_fifo_unselect(pipe, fifo); 594 595 /* 596 * pipe is busy. 597 * retry in interrupt 598 */ 599 usbhsf_tx_irq_ctrl(pipe, 1); 600 usbhs_pipe_running(pipe, 1); 601 602 return ret; 603 } 604 605 static int usbhsf_pio_prepare_push(struct usbhs_pkt *pkt, int *is_done) 606 { 607 if (usbhs_pipe_is_running(pkt->pipe)) 608 return 0; 609 610 return usbhsf_pio_try_push(pkt, is_done); 611 } 612 613 struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = { 614 .prepare = usbhsf_pio_prepare_push, 615 .try_run = usbhsf_pio_try_push, 616 }; 617 618 /* 619 * PIO pop handler 620 */ 621 static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done) 622 { 623 struct usbhs_pipe *pipe = pkt->pipe; 624 625 if (usbhs_pipe_is_busy(pipe)) 626 return 0; 627 628 if (usbhs_pipe_is_running(pipe)) 629 return 0; 630 631 /* 632 * pipe enable to prepare packet receive 633 */ 634 usbhs_pipe_data_sequence(pipe, pkt->sequence); 635 pkt->sequence = -1; /* -1 sequence will be ignored */ 636 637 usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length); 638 usbhs_pipe_enable(pipe); 639 usbhs_pipe_running(pipe, 1); 640 usbhsf_rx_irq_ctrl(pipe, 1); 641 642 return 0; 643 } 644 645 static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done) 646 { 647 struct usbhs_pipe *pipe = pkt->pipe; 648 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 649 struct device *dev = usbhs_priv_to_dev(priv); 650 struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */ 651 void __iomem *addr = priv->base + fifo->port; 652 u8 *buf; 653 u32 data = 0; 654 int maxp = usbhs_pipe_get_maxpacket(pipe); 655 int rcv_len, len; 656 int i, ret; 657 int total_len = 0; 658 659 ret = usbhsf_fifo_select(pipe, fifo, 0); 660 if (ret < 0) 661 return 0; 662 663 ret = usbhsf_fifo_barrier(priv, fifo); 664 if (ret < 0) 665 goto usbhs_fifo_read_busy; 666 667 rcv_len = usbhsf_fifo_rcv_len(priv, fifo); 668 669 buf = pkt->buf + pkt->actual; 670 len = pkt->length - pkt->actual; 671 len = min(len, rcv_len); 672 total_len = len; 673 674 /* 675 * update actual length first here to decide disable pipe. 676 * if this pipe keeps BUF status and all data were popped, 677 * then, next interrupt/token will be issued again 678 */ 679 pkt->actual += total_len; 680 681 if ((pkt->actual == pkt->length) || /* receive all data */ 682 (total_len < maxp)) { /* short packet */ 683 *is_done = 1; 684 usbhsf_rx_irq_ctrl(pipe, 0); 685 usbhs_pipe_running(pipe, 0); 686 usbhs_pipe_disable(pipe); /* disable pipe first */ 687 } 688 689 /* 690 * Buffer clear if Zero-Length packet 691 * 692 * see 693 * "Operation" - "FIFO Buffer Memory" - "FIFO Port Function" 694 */ 695 if (0 == rcv_len) { 696 pkt->zero = 1; 697 usbhsf_fifo_clear(pipe, fifo); 698 goto usbhs_fifo_read_end; 699 } 700 701 /* 702 * FIXME 703 * 704 * 32-bit access only 705 */ 706 if (len >= 4 && !((unsigned long)buf & 0x03)) { 707 ioread32_rep(addr, buf, len / 4); 708 len %= 4; 709 buf += total_len - len; 710 } 711 712 /* the rest operation */ 713 for (i = 0; i < len; i++) { 714 if (!(i & 0x03)) 715 data = ioread32(addr); 716 717 buf[i] = (data >> ((i & 0x03) * 8)) & 0xff; 718 } 719 720 usbhs_fifo_read_end: 721 dev_dbg(dev, " recv %d (%d/ %d/ %d/ %d)\n", 722 usbhs_pipe_number(pipe), 723 pkt->length, pkt->actual, *is_done, pkt->zero); 724 725 /* 726 * Transmission end 727 */ 728 if (*is_done) { 729 if (usbhs_pipe_is_dcp(pipe)) 730 usbhs_dcp_control_transfer_done(pipe); 731 } 732 733 usbhs_fifo_read_busy: 734 usbhsf_fifo_unselect(pipe, fifo); 735 736 return ret; 737 } 738 739 struct usbhs_pkt_handle usbhs_fifo_pio_pop_handler = { 740 .prepare = usbhsf_prepare_pop, 741 .try_run = usbhsf_pio_try_pop, 742 }; 743 744 /* 745 * DCP ctrol statge handler 746 */ 747 static int usbhsf_ctrl_stage_end(struct usbhs_pkt *pkt, int *is_done) 748 { 749 usbhs_dcp_control_transfer_done(pkt->pipe); 750 751 *is_done = 1; 752 753 return 0; 754 } 755 756 struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler = { 757 .prepare = usbhsf_ctrl_stage_end, 758 .try_run = usbhsf_ctrl_stage_end, 759 }; 760 761 /* 762 * DMA fifo functions 763 */ 764 static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo, 765 struct usbhs_pkt *pkt) 766 { 767 if (&usbhs_fifo_dma_push_handler == pkt->handler) 768 return fifo->tx_chan; 769 770 if (&usbhs_fifo_dma_pop_handler == pkt->handler) 771 return fifo->rx_chan; 772 773 return NULL; 774 } 775 776 static struct usbhs_fifo *usbhsf_get_dma_fifo(struct usbhs_priv *priv, 777 struct usbhs_pkt *pkt) 778 { 779 struct usbhs_fifo *fifo; 780 781 /* DMA :: D0FIFO */ 782 fifo = usbhsf_get_d0fifo(priv); 783 if (usbhsf_dma_chan_get(fifo, pkt) && 784 !usbhsf_fifo_is_busy(fifo)) 785 return fifo; 786 787 /* DMA :: D1FIFO */ 788 fifo = usbhsf_get_d1fifo(priv); 789 if (usbhsf_dma_chan_get(fifo, pkt) && 790 !usbhsf_fifo_is_busy(fifo)) 791 return fifo; 792 793 return NULL; 794 } 795 796 #define usbhsf_dma_start(p, f) __usbhsf_dma_ctrl(p, f, DREQE) 797 #define usbhsf_dma_stop(p, f) __usbhsf_dma_ctrl(p, f, 0) 798 static void __usbhsf_dma_ctrl(struct usbhs_pipe *pipe, 799 struct usbhs_fifo *fifo, 800 u16 dreqe) 801 { 802 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 803 804 usbhs_bset(priv, fifo->sel, DREQE, dreqe); 805 } 806 807 static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map) 808 { 809 struct usbhs_pipe *pipe = pkt->pipe; 810 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 811 struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv); 812 813 return info->dma_map_ctrl(pkt, map); 814 } 815 816 static void usbhsf_dma_complete(void *arg); 817 static void xfer_work(struct work_struct *work) 818 { 819 struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work); 820 struct usbhs_pipe *pipe = pkt->pipe; 821 struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe); 822 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 823 struct dma_async_tx_descriptor *desc; 824 struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt); 825 struct device *dev = usbhs_priv_to_dev(priv); 826 enum dma_transfer_direction dir; 827 828 dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; 829 830 desc = dmaengine_prep_slave_single(chan, pkt->dma + pkt->actual, 831 pkt->trans, dir, 832 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 833 if (!desc) 834 return; 835 836 desc->callback = usbhsf_dma_complete; 837 desc->callback_param = pipe; 838 839 if (dmaengine_submit(desc) < 0) { 840 dev_err(dev, "Failed to submit dma descriptor\n"); 841 return; 842 } 843 844 dev_dbg(dev, " %s %d (%d/ %d)\n", 845 fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero); 846 847 usbhs_pipe_running(pipe, 1); 848 usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans); 849 usbhs_pipe_enable(pipe); 850 usbhsf_dma_start(pipe, fifo); 851 dma_async_issue_pending(chan); 852 } 853 854 /* 855 * DMA push handler 856 */ 857 static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done) 858 { 859 struct usbhs_pipe *pipe = pkt->pipe; 860 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 861 struct usbhs_fifo *fifo; 862 int len = pkt->length - pkt->actual; 863 int ret; 864 865 if (usbhs_pipe_is_busy(pipe)) 866 return 0; 867 868 /* use PIO if packet is less than pio_dma_border or pipe is DCP */ 869 if ((len < usbhs_get_dparam(priv, pio_dma_border)) || 870 usbhs_pipe_is_dcp(pipe)) 871 goto usbhsf_pio_prepare_push; 872 873 if (len & 0x7) /* 8byte alignment */ 874 goto usbhsf_pio_prepare_push; 875 876 if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */ 877 goto usbhsf_pio_prepare_push; 878 879 /* return at this time if the pipe is running */ 880 if (usbhs_pipe_is_running(pipe)) 881 return 0; 882 883 /* get enable DMA fifo */ 884 fifo = usbhsf_get_dma_fifo(priv, pkt); 885 if (!fifo) 886 goto usbhsf_pio_prepare_push; 887 888 if (usbhsf_dma_map(pkt) < 0) 889 goto usbhsf_pio_prepare_push; 890 891 ret = usbhsf_fifo_select(pipe, fifo, 0); 892 if (ret < 0) 893 goto usbhsf_pio_prepare_push_unmap; 894 895 pkt->trans = len; 896 897 INIT_WORK(&pkt->work, xfer_work); 898 schedule_work(&pkt->work); 899 900 return 0; 901 902 usbhsf_pio_prepare_push_unmap: 903 usbhsf_dma_unmap(pkt); 904 usbhsf_pio_prepare_push: 905 /* 906 * change handler to PIO 907 */ 908 pkt->handler = &usbhs_fifo_pio_push_handler; 909 910 return pkt->handler->prepare(pkt, is_done); 911 } 912 913 static int usbhsf_dma_push_done(struct usbhs_pkt *pkt, int *is_done) 914 { 915 struct usbhs_pipe *pipe = pkt->pipe; 916 int is_short = pkt->trans % usbhs_pipe_get_maxpacket(pipe); 917 918 pkt->actual += pkt->trans; 919 920 if (pkt->actual < pkt->length) 921 *is_done = 0; /* there are remainder data */ 922 else if (is_short) 923 *is_done = 1; /* short packet */ 924 else 925 *is_done = !pkt->zero; /* send zero packet? */ 926 927 usbhs_pipe_running(pipe, !*is_done); 928 929 usbhsf_dma_stop(pipe, pipe->fifo); 930 usbhsf_dma_unmap(pkt); 931 usbhsf_fifo_unselect(pipe, pipe->fifo); 932 933 if (!*is_done) { 934 /* change handler to PIO */ 935 pkt->handler = &usbhs_fifo_pio_push_handler; 936 return pkt->handler->try_run(pkt, is_done); 937 } 938 939 return 0; 940 } 941 942 struct usbhs_pkt_handle usbhs_fifo_dma_push_handler = { 943 .prepare = usbhsf_dma_prepare_push, 944 .dma_done = usbhsf_dma_push_done, 945 }; 946 947 /* 948 * DMA pop handler 949 */ 950 static int usbhsf_dma_try_pop(struct usbhs_pkt *pkt, int *is_done) 951 { 952 struct usbhs_pipe *pipe = pkt->pipe; 953 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 954 struct usbhs_fifo *fifo; 955 int len, ret; 956 957 if (usbhs_pipe_is_busy(pipe)) 958 return 0; 959 960 if (usbhs_pipe_is_dcp(pipe)) 961 goto usbhsf_pio_prepare_pop; 962 963 /* get enable DMA fifo */ 964 fifo = usbhsf_get_dma_fifo(priv, pkt); 965 if (!fifo) 966 goto usbhsf_pio_prepare_pop; 967 968 if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */ 969 goto usbhsf_pio_prepare_pop; 970 971 ret = usbhsf_fifo_select(pipe, fifo, 0); 972 if (ret < 0) 973 goto usbhsf_pio_prepare_pop; 974 975 /* use PIO if packet is less than pio_dma_border */ 976 len = usbhsf_fifo_rcv_len(priv, fifo); 977 len = min(pkt->length - pkt->actual, len); 978 if (len & 0x7) /* 8byte alignment */ 979 goto usbhsf_pio_prepare_pop_unselect; 980 981 if (len < usbhs_get_dparam(priv, pio_dma_border)) 982 goto usbhsf_pio_prepare_pop_unselect; 983 984 ret = usbhsf_fifo_barrier(priv, fifo); 985 if (ret < 0) 986 goto usbhsf_pio_prepare_pop_unselect; 987 988 if (usbhsf_dma_map(pkt) < 0) 989 goto usbhsf_pio_prepare_pop_unselect; 990 991 /* DMA */ 992 993 /* 994 * usbhs_fifo_dma_pop_handler :: prepare 995 * enabled irq to come here. 996 * but it is no longer needed for DMA. disable it. 997 */ 998 usbhsf_rx_irq_ctrl(pipe, 0); 999 1000 pkt->trans = len; 1001 1002 INIT_WORK(&pkt->work, xfer_work); 1003 schedule_work(&pkt->work); 1004 1005 return 0; 1006 1007 usbhsf_pio_prepare_pop_unselect: 1008 usbhsf_fifo_unselect(pipe, fifo); 1009 usbhsf_pio_prepare_pop: 1010 1011 /* 1012 * change handler to PIO 1013 */ 1014 pkt->handler = &usbhs_fifo_pio_pop_handler; 1015 1016 return pkt->handler->try_run(pkt, is_done); 1017 } 1018 1019 static int usbhsf_dma_pop_done(struct usbhs_pkt *pkt, int *is_done) 1020 { 1021 struct usbhs_pipe *pipe = pkt->pipe; 1022 int maxp = usbhs_pipe_get_maxpacket(pipe); 1023 1024 usbhsf_dma_stop(pipe, pipe->fifo); 1025 usbhsf_dma_unmap(pkt); 1026 usbhsf_fifo_unselect(pipe, pipe->fifo); 1027 1028 pkt->actual += pkt->trans; 1029 1030 if ((pkt->actual == pkt->length) || /* receive all data */ 1031 (pkt->trans < maxp)) { /* short packet */ 1032 *is_done = 1; 1033 usbhs_pipe_running(pipe, 0); 1034 } else { 1035 /* re-enable */ 1036 usbhs_pipe_running(pipe, 0); 1037 usbhsf_prepare_pop(pkt, is_done); 1038 } 1039 1040 return 0; 1041 } 1042 1043 struct usbhs_pkt_handle usbhs_fifo_dma_pop_handler = { 1044 .prepare = usbhsf_prepare_pop, 1045 .try_run = usbhsf_dma_try_pop, 1046 .dma_done = usbhsf_dma_pop_done 1047 }; 1048 1049 /* 1050 * DMA setting 1051 */ 1052 static bool usbhsf_dma_filter(struct dma_chan *chan, void *param) 1053 { 1054 struct sh_dmae_slave *slave = param; 1055 1056 /* 1057 * FIXME 1058 * 1059 * usbhs doesn't recognize id = 0 as valid DMA 1060 */ 1061 if (0 == slave->shdma_slave.slave_id) 1062 return false; 1063 1064 chan->private = slave; 1065 1066 return true; 1067 } 1068 1069 static void usbhsf_dma_quit(struct usbhs_priv *priv, struct usbhs_fifo *fifo) 1070 { 1071 if (fifo->tx_chan) 1072 dma_release_channel(fifo->tx_chan); 1073 if (fifo->rx_chan) 1074 dma_release_channel(fifo->rx_chan); 1075 1076 fifo->tx_chan = NULL; 1077 fifo->rx_chan = NULL; 1078 } 1079 1080 static void usbhsf_dma_init(struct usbhs_priv *priv, 1081 struct usbhs_fifo *fifo) 1082 { 1083 struct device *dev = usbhs_priv_to_dev(priv); 1084 dma_cap_mask_t mask; 1085 1086 dma_cap_zero(mask); 1087 dma_cap_set(DMA_SLAVE, mask); 1088 fifo->tx_chan = dma_request_channel(mask, usbhsf_dma_filter, 1089 &fifo->tx_slave); 1090 1091 dma_cap_zero(mask); 1092 dma_cap_set(DMA_SLAVE, mask); 1093 fifo->rx_chan = dma_request_channel(mask, usbhsf_dma_filter, 1094 &fifo->rx_slave); 1095 1096 if (fifo->tx_chan || fifo->rx_chan) 1097 dev_dbg(dev, "enable DMAEngine (%s%s%s)\n", 1098 fifo->name, 1099 fifo->tx_chan ? "[TX]" : " ", 1100 fifo->rx_chan ? "[RX]" : " "); 1101 } 1102 1103 /* 1104 * irq functions 1105 */ 1106 static int usbhsf_irq_empty(struct usbhs_priv *priv, 1107 struct usbhs_irq_state *irq_state) 1108 { 1109 struct usbhs_pipe *pipe; 1110 struct device *dev = usbhs_priv_to_dev(priv); 1111 int i, ret; 1112 1113 if (!irq_state->bempsts) { 1114 dev_err(dev, "debug %s !!\n", __func__); 1115 return -EIO; 1116 } 1117 1118 dev_dbg(dev, "irq empty [0x%04x]\n", irq_state->bempsts); 1119 1120 /* 1121 * search interrupted "pipe" 1122 * not "uep". 1123 */ 1124 usbhs_for_each_pipe_with_dcp(pipe, priv, i) { 1125 if (!(irq_state->bempsts & (1 << i))) 1126 continue; 1127 1128 ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN); 1129 if (ret < 0) 1130 dev_err(dev, "irq_empty run_error %d : %d\n", i, ret); 1131 } 1132 1133 return 0; 1134 } 1135 1136 static int usbhsf_irq_ready(struct usbhs_priv *priv, 1137 struct usbhs_irq_state *irq_state) 1138 { 1139 struct usbhs_pipe *pipe; 1140 struct device *dev = usbhs_priv_to_dev(priv); 1141 int i, ret; 1142 1143 if (!irq_state->brdysts) { 1144 dev_err(dev, "debug %s !!\n", __func__); 1145 return -EIO; 1146 } 1147 1148 dev_dbg(dev, "irq ready [0x%04x]\n", irq_state->brdysts); 1149 1150 /* 1151 * search interrupted "pipe" 1152 * not "uep". 1153 */ 1154 usbhs_for_each_pipe_with_dcp(pipe, priv, i) { 1155 if (!(irq_state->brdysts & (1 << i))) 1156 continue; 1157 1158 ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN); 1159 if (ret < 0) 1160 dev_err(dev, "irq_ready run_error %d : %d\n", i, ret); 1161 } 1162 1163 return 0; 1164 } 1165 1166 static void usbhsf_dma_complete(void *arg) 1167 { 1168 struct usbhs_pipe *pipe = arg; 1169 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 1170 struct device *dev = usbhs_priv_to_dev(priv); 1171 int ret; 1172 1173 ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_DMA_DONE); 1174 if (ret < 0) 1175 dev_err(dev, "dma_complete run_error %d : %d\n", 1176 usbhs_pipe_number(pipe), ret); 1177 } 1178 1179 /* 1180 * fifo init 1181 */ 1182 void usbhs_fifo_init(struct usbhs_priv *priv) 1183 { 1184 struct usbhs_mod *mod = usbhs_mod_get_current(priv); 1185 struct usbhs_fifo *cfifo = usbhsf_get_cfifo(priv); 1186 struct usbhs_fifo *d0fifo = usbhsf_get_d0fifo(priv); 1187 struct usbhs_fifo *d1fifo = usbhsf_get_d1fifo(priv); 1188 1189 mod->irq_empty = usbhsf_irq_empty; 1190 mod->irq_ready = usbhsf_irq_ready; 1191 mod->irq_bempsts = 0; 1192 mod->irq_brdysts = 0; 1193 1194 cfifo->pipe = NULL; 1195 d0fifo->pipe = NULL; 1196 d1fifo->pipe = NULL; 1197 } 1198 1199 void usbhs_fifo_quit(struct usbhs_priv *priv) 1200 { 1201 struct usbhs_mod *mod = usbhs_mod_get_current(priv); 1202 1203 mod->irq_empty = NULL; 1204 mod->irq_ready = NULL; 1205 mod->irq_bempsts = 0; 1206 mod->irq_brdysts = 0; 1207 } 1208 1209 int usbhs_fifo_probe(struct usbhs_priv *priv) 1210 { 1211 struct usbhs_fifo *fifo; 1212 1213 /* CFIFO */ 1214 fifo = usbhsf_get_cfifo(priv); 1215 fifo->name = "CFIFO"; 1216 fifo->port = CFIFO; 1217 fifo->sel = CFIFOSEL; 1218 fifo->ctr = CFIFOCTR; 1219 1220 /* D0FIFO */ 1221 fifo = usbhsf_get_d0fifo(priv); 1222 fifo->name = "D0FIFO"; 1223 fifo->port = D0FIFO; 1224 fifo->sel = D0FIFOSEL; 1225 fifo->ctr = D0FIFOCTR; 1226 fifo->tx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d0_tx_id); 1227 fifo->rx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d0_rx_id); 1228 usbhsf_dma_init(priv, fifo); 1229 1230 /* D1FIFO */ 1231 fifo = usbhsf_get_d1fifo(priv); 1232 fifo->name = "D1FIFO"; 1233 fifo->port = D1FIFO; 1234 fifo->sel = D1FIFOSEL; 1235 fifo->ctr = D1FIFOCTR; 1236 fifo->tx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d1_tx_id); 1237 fifo->rx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d1_rx_id); 1238 usbhsf_dma_init(priv, fifo); 1239 1240 return 0; 1241 } 1242 1243 void usbhs_fifo_remove(struct usbhs_priv *priv) 1244 { 1245 usbhsf_dma_quit(priv, usbhsf_get_d0fifo(priv)); 1246 usbhsf_dma_quit(priv, usbhsf_get_d1fifo(priv)); 1247 } 1248