1 /* 2 * c67x00-sched.c: Cypress C67X00 USB Host Controller Driver - TD scheduling 3 * 4 * Copyright (C) 2006-2008 Barco N.V. 5 * Derived from the Cypress cy7c67200/300 ezusb linux driver and 6 * based on multiple host controller drivers inside the linux kernel. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, 21 * MA 02110-1301 USA. 22 */ 23 24 #include <linux/kthread.h> 25 26 #include "c67x00.h" 27 #include "c67x00-hcd.h" 28 29 /* 30 * These are the stages for a control urb, they are kept 31 * in both urb->interval and td->privdata. 32 */ 33 #define SETUP_STAGE 0 34 #define DATA_STAGE 1 35 #define STATUS_STAGE 2 36 37 /* -------------------------------------------------------------------------- */ 38 39 /** 40 * struct c67x00_ep_data: Host endpoint data structure 41 */ 42 struct c67x00_ep_data { 43 struct list_head queue; 44 struct list_head node; 45 struct usb_host_endpoint *hep; 46 struct usb_device *dev; 47 u16 next_frame; /* For int/isoc transactions */ 48 }; 49 50 /** 51 * struct c67x00_td 52 * 53 * Hardware parts are little endiannes, SW in CPU endianess. 54 */ 55 struct c67x00_td { 56 /* HW specific part */ 57 __le16 ly_base_addr; /* Bytes 0-1 */ 58 __le16 port_length; /* Bytes 2-3 */ 59 u8 pid_ep; /* Byte 4 */ 60 u8 dev_addr; /* Byte 5 */ 61 u8 ctrl_reg; /* Byte 6 */ 62 u8 status; /* Byte 7 */ 63 u8 retry_cnt; /* Byte 8 */ 64 #define TT_OFFSET 2 65 #define TT_CONTROL 0 66 #define TT_ISOCHRONOUS 1 67 #define TT_BULK 2 68 #define TT_INTERRUPT 3 69 u8 residue; /* Byte 9 */ 70 __le16 next_td_addr; /* Bytes 10-11 */ 71 /* SW part */ 72 struct list_head td_list; 73 u16 td_addr; 74 void *data; 75 struct urb *urb; 76 unsigned long privdata; 77 78 /* These are needed for handling the toggle bits: 79 * an urb can be dequeued while a td is in progress 80 * after checking the td, the toggle bit might need to 81 * be fixed */ 82 struct c67x00_ep_data *ep_data; 83 unsigned int pipe; 84 }; 85 86 struct c67x00_urb_priv { 87 struct list_head hep_node; 88 struct urb *urb; 89 int port; 90 int cnt; /* packet number for isoc */ 91 int status; 92 struct c67x00_ep_data *ep_data; 93 }; 94 95 #define td_udev(td) ((td)->ep_data->dev) 96 97 #define CY_TD_SIZE 12 98 99 #define TD_PIDEP_OFFSET 0x04 100 #define TD_PIDEPMASK_PID 0xF0 101 #define TD_PIDEPMASK_EP 0x0F 102 #define TD_PORTLENMASK_DL 0x02FF 103 #define TD_PORTLENMASK_PN 0xC000 104 105 #define TD_STATUS_OFFSET 0x07 106 #define TD_STATUSMASK_ACK 0x01 107 #define TD_STATUSMASK_ERR 0x02 108 #define TD_STATUSMASK_TMOUT 0x04 109 #define TD_STATUSMASK_SEQ 0x08 110 #define TD_STATUSMASK_SETUP 0x10 111 #define TD_STATUSMASK_OVF 0x20 112 #define TD_STATUSMASK_NAK 0x40 113 #define TD_STATUSMASK_STALL 0x80 114 115 #define TD_ERROR_MASK (TD_STATUSMASK_ERR | TD_STATUSMASK_TMOUT | \ 116 TD_STATUSMASK_STALL) 117 118 #define TD_RETRYCNT_OFFSET 0x08 119 #define TD_RETRYCNTMASK_ACT_FLG 0x10 120 #define TD_RETRYCNTMASK_TX_TYPE 0x0C 121 #define TD_RETRYCNTMASK_RTY_CNT 0x03 122 123 #define TD_RESIDUE_OVERFLOW 0x80 124 125 #define TD_PID_IN 0x90 126 127 /* Residue: signed 8bits, neg -> OVERFLOW, pos -> UNDERFLOW */ 128 #define td_residue(td) ((__s8)(td->residue)) 129 #define td_ly_base_addr(td) (__le16_to_cpu((td)->ly_base_addr)) 130 #define td_port_length(td) (__le16_to_cpu((td)->port_length)) 131 #define td_next_td_addr(td) (__le16_to_cpu((td)->next_td_addr)) 132 133 #define td_active(td) ((td)->retry_cnt & TD_RETRYCNTMASK_ACT_FLG) 134 #define td_length(td) (td_port_length(td) & TD_PORTLENMASK_DL) 135 136 #define td_sequence_ok(td) (!td->status || \ 137 (!(td->status & TD_STATUSMASK_SEQ) == \ 138 !(td->ctrl_reg & SEQ_SEL))) 139 140 #define td_acked(td) (!td->status || \ 141 (td->status & TD_STATUSMASK_ACK)) 142 #define td_actual_bytes(td) (td_length(td) - td_residue(td)) 143 144 /* -------------------------------------------------------------------------- */ 145 146 #ifdef DEBUG 147 148 /** 149 * dbg_td - Dump the contents of the TD 150 */ 151 static void dbg_td(struct c67x00_hcd *c67x00, struct c67x00_td *td, char *msg) 152 { 153 struct device *dev = c67x00_hcd_dev(c67x00); 154 155 dev_dbg(dev, "### %s at 0x%04x\n", msg, td->td_addr); 156 dev_dbg(dev, "urb: 0x%p\n", td->urb); 157 dev_dbg(dev, "endpoint: %4d\n", usb_pipeendpoint(td->pipe)); 158 dev_dbg(dev, "pipeout: %4d\n", usb_pipeout(td->pipe)); 159 dev_dbg(dev, "ly_base_addr: 0x%04x\n", td_ly_base_addr(td)); 160 dev_dbg(dev, "port_length: 0x%04x\n", td_port_length(td)); 161 dev_dbg(dev, "pid_ep: 0x%02x\n", td->pid_ep); 162 dev_dbg(dev, "dev_addr: 0x%02x\n", td->dev_addr); 163 dev_dbg(dev, "ctrl_reg: 0x%02x\n", td->ctrl_reg); 164 dev_dbg(dev, "status: 0x%02x\n", td->status); 165 dev_dbg(dev, "retry_cnt: 0x%02x\n", td->retry_cnt); 166 dev_dbg(dev, "residue: 0x%02x\n", td->residue); 167 dev_dbg(dev, "next_td_addr: 0x%04x\n", td_next_td_addr(td)); 168 dev_dbg(dev, "data:"); 169 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 1, 170 td->data, td_length(td), 1); 171 } 172 #else /* DEBUG */ 173 174 static inline void 175 dbg_td(struct c67x00_hcd *c67x00, struct c67x00_td *td, char *msg) { } 176 177 #endif /* DEBUG */ 178 179 /* -------------------------------------------------------------------------- */ 180 /* Helper functions */ 181 182 static inline u16 c67x00_get_current_frame_number(struct c67x00_hcd *c67x00) 183 { 184 return c67x00_ll_husb_get_frame(c67x00->sie) & HOST_FRAME_MASK; 185 } 186 187 /** 188 * frame_add 189 * Software wraparound for framenumbers. 190 */ 191 static inline u16 frame_add(u16 a, u16 b) 192 { 193 return (a + b) & HOST_FRAME_MASK; 194 } 195 196 /** 197 * frame_after - is frame a after frame b 198 */ 199 static inline int frame_after(u16 a, u16 b) 200 { 201 return ((HOST_FRAME_MASK + a - b) & HOST_FRAME_MASK) < 202 (HOST_FRAME_MASK / 2); 203 } 204 205 /** 206 * frame_after_eq - is frame a after or equal to frame b 207 */ 208 static inline int frame_after_eq(u16 a, u16 b) 209 { 210 return ((HOST_FRAME_MASK + 1 + a - b) & HOST_FRAME_MASK) < 211 (HOST_FRAME_MASK / 2); 212 } 213 214 /* -------------------------------------------------------------------------- */ 215 216 /** 217 * c67x00_release_urb - remove link from all tds to this urb 218 * Disconnects the urb from it's tds, so that it can be given back. 219 * pre: urb->hcpriv != NULL 220 */ 221 static void c67x00_release_urb(struct c67x00_hcd *c67x00, struct urb *urb) 222 { 223 struct c67x00_td *td; 224 struct c67x00_urb_priv *urbp; 225 226 BUG_ON(!urb); 227 228 c67x00->urb_count--; 229 230 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 231 c67x00->urb_iso_count--; 232 if (c67x00->urb_iso_count == 0) 233 c67x00->max_frame_bw = MAX_FRAME_BW_STD; 234 } 235 236 /* TODO this might be not so efficient when we've got many urbs! 237 * Alternatives: 238 * * only clear when needed 239 * * keep a list of tds with each urbp 240 */ 241 list_for_each_entry(td, &c67x00->td_list, td_list) 242 if (urb == td->urb) 243 td->urb = NULL; 244 245 urbp = urb->hcpriv; 246 urb->hcpriv = NULL; 247 list_del(&urbp->hep_node); 248 kfree(urbp); 249 } 250 251 /* -------------------------------------------------------------------------- */ 252 253 static struct c67x00_ep_data * 254 c67x00_ep_data_alloc(struct c67x00_hcd *c67x00, struct urb *urb) 255 { 256 struct usb_host_endpoint *hep = urb->ep; 257 struct c67x00_ep_data *ep_data; 258 int type; 259 260 c67x00->current_frame = c67x00_get_current_frame_number(c67x00); 261 262 /* Check if endpoint already has a c67x00_ep_data struct allocated */ 263 if (hep->hcpriv) { 264 ep_data = hep->hcpriv; 265 if (frame_after(c67x00->current_frame, ep_data->next_frame)) 266 ep_data->next_frame = 267 frame_add(c67x00->current_frame, 1); 268 return hep->hcpriv; 269 } 270 271 /* Allocate and initialize a new c67x00 endpoint data structure */ 272 ep_data = kzalloc(sizeof(*ep_data), GFP_ATOMIC); 273 if (!ep_data) 274 return NULL; 275 276 INIT_LIST_HEAD(&ep_data->queue); 277 INIT_LIST_HEAD(&ep_data->node); 278 ep_data->hep = hep; 279 280 /* hold a reference to udev as long as this endpoint lives, 281 * this is needed to possibly fix the data toggle */ 282 ep_data->dev = usb_get_dev(urb->dev); 283 hep->hcpriv = ep_data; 284 285 /* For ISOC and INT endpoints, start ASAP: */ 286 ep_data->next_frame = frame_add(c67x00->current_frame, 1); 287 288 /* Add the endpoint data to one of the pipe lists; must be added 289 in order of endpoint address */ 290 type = usb_pipetype(urb->pipe); 291 if (list_empty(&ep_data->node)) { 292 list_add(&ep_data->node, &c67x00->list[type]); 293 } else { 294 struct c67x00_ep_data *prev; 295 296 list_for_each_entry(prev, &c67x00->list[type], node) { 297 if (prev->hep->desc.bEndpointAddress > 298 hep->desc.bEndpointAddress) { 299 list_add(&ep_data->node, prev->node.prev); 300 break; 301 } 302 } 303 } 304 305 return ep_data; 306 } 307 308 static int c67x00_ep_data_free(struct usb_host_endpoint *hep) 309 { 310 struct c67x00_ep_data *ep_data = hep->hcpriv; 311 312 if (!ep_data) 313 return 0; 314 315 if (!list_empty(&ep_data->queue)) 316 return -EBUSY; 317 318 usb_put_dev(ep_data->dev); 319 list_del(&ep_data->queue); 320 list_del(&ep_data->node); 321 322 kfree(ep_data); 323 hep->hcpriv = NULL; 324 325 return 0; 326 } 327 328 void c67x00_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep) 329 { 330 struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd); 331 unsigned long flags; 332 333 if (!list_empty(&ep->urb_list)) 334 dev_warn(c67x00_hcd_dev(c67x00), "error: urb list not empty\n"); 335 336 spin_lock_irqsave(&c67x00->lock, flags); 337 338 /* loop waiting for all transfers in the endpoint queue to complete */ 339 while (c67x00_ep_data_free(ep)) { 340 /* Drop the lock so we can sleep waiting for the hardware */ 341 spin_unlock_irqrestore(&c67x00->lock, flags); 342 343 /* it could happen that we reinitialize this completion, while 344 * somebody was waiting for that completion. The timeout and 345 * while loop handle such cases, but this might be improved */ 346 INIT_COMPLETION(c67x00->endpoint_disable); 347 c67x00_sched_kick(c67x00); 348 wait_for_completion_timeout(&c67x00->endpoint_disable, 1 * HZ); 349 350 spin_lock_irqsave(&c67x00->lock, flags); 351 } 352 353 spin_unlock_irqrestore(&c67x00->lock, flags); 354 } 355 356 /* -------------------------------------------------------------------------- */ 357 358 static inline int get_root_port(struct usb_device *dev) 359 { 360 while (dev->parent->parent) 361 dev = dev->parent; 362 return dev->portnum; 363 } 364 365 int c67x00_urb_enqueue(struct usb_hcd *hcd, 366 struct urb *urb, gfp_t mem_flags) 367 { 368 int ret; 369 unsigned long flags; 370 struct c67x00_urb_priv *urbp; 371 struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd); 372 int port = get_root_port(urb->dev)-1; 373 374 spin_lock_irqsave(&c67x00->lock, flags); 375 376 /* Make sure host controller is running */ 377 if (!HC_IS_RUNNING(hcd->state)) { 378 ret = -ENODEV; 379 goto err_not_linked; 380 } 381 382 ret = usb_hcd_link_urb_to_ep(hcd, urb); 383 if (ret) 384 goto err_not_linked; 385 386 /* Allocate and initialize urb private data */ 387 urbp = kzalloc(sizeof(*urbp), mem_flags); 388 if (!urbp) { 389 ret = -ENOMEM; 390 goto err_urbp; 391 } 392 393 INIT_LIST_HEAD(&urbp->hep_node); 394 urbp->urb = urb; 395 urbp->port = port; 396 397 urbp->ep_data = c67x00_ep_data_alloc(c67x00, urb); 398 399 if (!urbp->ep_data) { 400 ret = -ENOMEM; 401 goto err_epdata; 402 } 403 404 /* TODO claim bandwidth with usb_claim_bandwidth? 405 * also release it somewhere! */ 406 407 urb->hcpriv = urbp; 408 409 urb->actual_length = 0; /* Nothing received/transmitted yet */ 410 411 switch (usb_pipetype(urb->pipe)) { 412 case PIPE_CONTROL: 413 urb->interval = SETUP_STAGE; 414 break; 415 case PIPE_INTERRUPT: 416 break; 417 case PIPE_BULK: 418 break; 419 case PIPE_ISOCHRONOUS: 420 if (c67x00->urb_iso_count == 0) 421 c67x00->max_frame_bw = MAX_FRAME_BW_ISO; 422 c67x00->urb_iso_count++; 423 /* Assume always URB_ISO_ASAP, FIXME */ 424 if (list_empty(&urbp->ep_data->queue)) 425 urb->start_frame = urbp->ep_data->next_frame; 426 else { 427 /* Go right after the last one */ 428 struct urb *last_urb; 429 430 last_urb = list_entry(urbp->ep_data->queue.prev, 431 struct c67x00_urb_priv, 432 hep_node)->urb; 433 urb->start_frame = 434 frame_add(last_urb->start_frame, 435 last_urb->number_of_packets * 436 last_urb->interval); 437 } 438 urbp->cnt = 0; 439 break; 440 } 441 442 /* Add the URB to the endpoint queue */ 443 list_add_tail(&urbp->hep_node, &urbp->ep_data->queue); 444 445 /* If this is the only URB, kick start the controller */ 446 if (!c67x00->urb_count++) 447 c67x00_ll_hpi_enable_sofeop(c67x00->sie); 448 449 c67x00_sched_kick(c67x00); 450 spin_unlock_irqrestore(&c67x00->lock, flags); 451 452 return 0; 453 454 err_epdata: 455 kfree(urbp); 456 err_urbp: 457 usb_hcd_unlink_urb_from_ep(hcd, urb); 458 err_not_linked: 459 spin_unlock_irqrestore(&c67x00->lock, flags); 460 461 return ret; 462 } 463 464 int c67x00_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) 465 { 466 struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd); 467 unsigned long flags; 468 int rc; 469 470 spin_lock_irqsave(&c67x00->lock, flags); 471 rc = usb_hcd_check_unlink_urb(hcd, urb, status); 472 if (rc) 473 goto done; 474 475 c67x00_release_urb(c67x00, urb); 476 usb_hcd_unlink_urb_from_ep(hcd, urb); 477 478 spin_unlock(&c67x00->lock); 479 usb_hcd_giveback_urb(hcd, urb, status); 480 spin_lock(&c67x00->lock); 481 482 spin_unlock_irqrestore(&c67x00->lock, flags); 483 484 return 0; 485 486 done: 487 spin_unlock_irqrestore(&c67x00->lock, flags); 488 return rc; 489 } 490 491 /* -------------------------------------------------------------------------- */ 492 493 /* 494 * pre: c67x00 locked, urb unlocked 495 */ 496 static void 497 c67x00_giveback_urb(struct c67x00_hcd *c67x00, struct urb *urb, int status) 498 { 499 struct c67x00_urb_priv *urbp; 500 501 if (!urb) 502 return; 503 504 urbp = urb->hcpriv; 505 urbp->status = status; 506 507 list_del_init(&urbp->hep_node); 508 509 c67x00_release_urb(c67x00, urb); 510 usb_hcd_unlink_urb_from_ep(c67x00_hcd_to_hcd(c67x00), urb); 511 spin_unlock(&c67x00->lock); 512 usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, urbp->status); 513 spin_lock(&c67x00->lock); 514 } 515 516 /* -------------------------------------------------------------------------- */ 517 518 static int c67x00_claim_frame_bw(struct c67x00_hcd *c67x00, struct urb *urb, 519 int len, int periodic) 520 { 521 struct c67x00_urb_priv *urbp = urb->hcpriv; 522 int bit_time; 523 524 /* According to the C67x00 BIOS user manual, page 3-18,19, the 525 * following calculations provide the full speed bit times for 526 * a transaction. 527 * 528 * FS(in) = 112.5 + 9.36*BC + HOST_DELAY 529 * FS(in,iso) = 90.5 + 9.36*BC + HOST_DELAY 530 * FS(out) = 112.5 + 9.36*BC + HOST_DELAY 531 * FS(out,iso) = 78.4 + 9.36*BC + HOST_DELAY 532 * LS(in) = 802.4 + 75.78*BC + HOST_DELAY 533 * LS(out) = 802.6 + 74.67*BC + HOST_DELAY 534 * 535 * HOST_DELAY == 106 for the c67200 and c67300. 536 */ 537 538 /* make calculations in 1/100 bit times to maintain resolution */ 539 if (urbp->ep_data->dev->speed == USB_SPEED_LOW) { 540 /* Low speed pipe */ 541 if (usb_pipein(urb->pipe)) 542 bit_time = 80240 + 7578*len; 543 else 544 bit_time = 80260 + 7467*len; 545 } else { 546 /* FS pipes */ 547 if (usb_pipeisoc(urb->pipe)) 548 bit_time = usb_pipein(urb->pipe) ? 9050 : 7840; 549 else 550 bit_time = 11250; 551 bit_time += 936*len; 552 } 553 554 /* Scale back down to integer bit times. Use a host delay of 106. 555 * (this is the only place it is used) */ 556 bit_time = ((bit_time+50) / 100) + 106; 557 558 if (unlikely(bit_time + c67x00->bandwidth_allocated >= 559 c67x00->max_frame_bw)) 560 return -EMSGSIZE; 561 562 if (unlikely(c67x00->next_td_addr + CY_TD_SIZE >= 563 c67x00->td_base_addr + SIE_TD_SIZE)) 564 return -EMSGSIZE; 565 566 if (unlikely(c67x00->next_buf_addr + len >= 567 c67x00->buf_base_addr + SIE_TD_BUF_SIZE)) 568 return -EMSGSIZE; 569 570 if (periodic) { 571 if (unlikely(bit_time + c67x00->periodic_bw_allocated >= 572 MAX_PERIODIC_BW(c67x00->max_frame_bw))) 573 return -EMSGSIZE; 574 c67x00->periodic_bw_allocated += bit_time; 575 } 576 577 c67x00->bandwidth_allocated += bit_time; 578 return 0; 579 } 580 581 /* -------------------------------------------------------------------------- */ 582 583 /** 584 * td_addr and buf_addr must be word aligned 585 */ 586 static int c67x00_create_td(struct c67x00_hcd *c67x00, struct urb *urb, 587 void *data, int len, int pid, int toggle, 588 unsigned long privdata) 589 { 590 struct c67x00_td *td; 591 struct c67x00_urb_priv *urbp = urb->hcpriv; 592 const __u8 active_flag = 1, retry_cnt = 1; 593 __u8 cmd = 0; 594 int tt = 0; 595 596 if (c67x00_claim_frame_bw(c67x00, urb, len, usb_pipeisoc(urb->pipe) 597 || usb_pipeint(urb->pipe))) 598 return -EMSGSIZE; /* Not really an error, but expected */ 599 600 td = kzalloc(sizeof(*td), GFP_ATOMIC); 601 if (!td) 602 return -ENOMEM; 603 604 td->pipe = urb->pipe; 605 td->ep_data = urbp->ep_data; 606 607 if ((td_udev(td)->speed == USB_SPEED_LOW) && 608 !(c67x00->low_speed_ports & (1 << urbp->port))) 609 cmd |= PREAMBLE_EN; 610 611 switch (usb_pipetype(td->pipe)) { 612 case PIPE_ISOCHRONOUS: 613 tt = TT_ISOCHRONOUS; 614 cmd |= ISO_EN; 615 break; 616 case PIPE_CONTROL: 617 tt = TT_CONTROL; 618 break; 619 case PIPE_BULK: 620 tt = TT_BULK; 621 break; 622 case PIPE_INTERRUPT: 623 tt = TT_INTERRUPT; 624 break; 625 } 626 627 if (toggle) 628 cmd |= SEQ_SEL; 629 630 cmd |= ARM_EN; 631 632 /* SW part */ 633 td->td_addr = c67x00->next_td_addr; 634 c67x00->next_td_addr = c67x00->next_td_addr + CY_TD_SIZE; 635 636 /* HW part */ 637 td->ly_base_addr = __cpu_to_le16(c67x00->next_buf_addr); 638 td->port_length = __cpu_to_le16((c67x00->sie->sie_num << 15) | 639 (urbp->port << 14) | (len & 0x3FF)); 640 td->pid_ep = ((pid & 0xF) << TD_PIDEP_OFFSET) | 641 (usb_pipeendpoint(td->pipe) & 0xF); 642 td->dev_addr = usb_pipedevice(td->pipe) & 0x7F; 643 td->ctrl_reg = cmd; 644 td->status = 0; 645 td->retry_cnt = (tt << TT_OFFSET) | (active_flag << 4) | retry_cnt; 646 td->residue = 0; 647 td->next_td_addr = __cpu_to_le16(c67x00->next_td_addr); 648 649 /* SW part */ 650 td->data = data; 651 td->urb = urb; 652 td->privdata = privdata; 653 654 c67x00->next_buf_addr += (len + 1) & ~0x01; /* properly align */ 655 656 list_add_tail(&td->td_list, &c67x00->td_list); 657 return 0; 658 } 659 660 static inline void c67x00_release_td(struct c67x00_td *td) 661 { 662 list_del_init(&td->td_list); 663 kfree(td); 664 } 665 666 /* -------------------------------------------------------------------------- */ 667 668 static int c67x00_add_data_urb(struct c67x00_hcd *c67x00, struct urb *urb) 669 { 670 int remaining; 671 int toggle; 672 int pid; 673 int ret = 0; 674 int maxps; 675 int need_empty; 676 677 toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), 678 usb_pipeout(urb->pipe)); 679 remaining = urb->transfer_buffer_length - urb->actual_length; 680 681 maxps = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)); 682 683 need_empty = (urb->transfer_flags & URB_ZERO_PACKET) && 684 usb_pipeout(urb->pipe) && !(remaining % maxps); 685 686 while (remaining || need_empty) { 687 int len; 688 char *td_buf; 689 690 len = (remaining > maxps) ? maxps : remaining; 691 if (!len) 692 need_empty = 0; 693 694 pid = usb_pipeout(urb->pipe) ? USB_PID_OUT : USB_PID_IN; 695 td_buf = urb->transfer_buffer + urb->transfer_buffer_length - 696 remaining; 697 ret = c67x00_create_td(c67x00, urb, td_buf, len, pid, toggle, 698 DATA_STAGE); 699 if (ret) 700 return ret; /* td wasn't created */ 701 702 toggle ^= 1; 703 remaining -= len; 704 if (usb_pipecontrol(urb->pipe)) 705 break; 706 } 707 708 return 0; 709 } 710 711 /** 712 * return 0 in case more bandwidth is available, else errorcode 713 */ 714 static int c67x00_add_ctrl_urb(struct c67x00_hcd *c67x00, struct urb *urb) 715 { 716 int ret; 717 int pid; 718 719 switch (urb->interval) { 720 default: 721 case SETUP_STAGE: 722 ret = c67x00_create_td(c67x00, urb, urb->setup_packet, 723 8, USB_PID_SETUP, 0, SETUP_STAGE); 724 if (ret) 725 return ret; 726 urb->interval = SETUP_STAGE; 727 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), 728 usb_pipeout(urb->pipe), 1); 729 break; 730 case DATA_STAGE: 731 if (urb->transfer_buffer_length) { 732 ret = c67x00_add_data_urb(c67x00, urb); 733 if (ret) 734 return ret; 735 break; 736 } /* else fallthrough */ 737 case STATUS_STAGE: 738 pid = !usb_pipeout(urb->pipe) ? USB_PID_OUT : USB_PID_IN; 739 ret = c67x00_create_td(c67x00, urb, NULL, 0, pid, 1, 740 STATUS_STAGE); 741 if (ret) 742 return ret; 743 break; 744 } 745 746 return 0; 747 } 748 749 /* 750 * return 0 in case more bandwidth is available, else errorcode 751 */ 752 static int c67x00_add_int_urb(struct c67x00_hcd *c67x00, struct urb *urb) 753 { 754 struct c67x00_urb_priv *urbp = urb->hcpriv; 755 756 if (frame_after_eq(c67x00->current_frame, urbp->ep_data->next_frame)) { 757 urbp->ep_data->next_frame = 758 frame_add(urbp->ep_data->next_frame, urb->interval); 759 return c67x00_add_data_urb(c67x00, urb); 760 } 761 return 0; 762 } 763 764 static int c67x00_add_iso_urb(struct c67x00_hcd *c67x00, struct urb *urb) 765 { 766 struct c67x00_urb_priv *urbp = urb->hcpriv; 767 768 if (frame_after_eq(c67x00->current_frame, urbp->ep_data->next_frame)) { 769 char *td_buf; 770 int len, pid, ret; 771 772 BUG_ON(urbp->cnt >= urb->number_of_packets); 773 774 td_buf = urb->transfer_buffer + 775 urb->iso_frame_desc[urbp->cnt].offset; 776 len = urb->iso_frame_desc[urbp->cnt].length; 777 pid = usb_pipeout(urb->pipe) ? USB_PID_OUT : USB_PID_IN; 778 779 ret = c67x00_create_td(c67x00, urb, td_buf, len, pid, 0, 780 urbp->cnt); 781 if (ret) { 782 printk(KERN_DEBUG "create failed: %d\n", ret); 783 urb->iso_frame_desc[urbp->cnt].actual_length = 0; 784 urb->iso_frame_desc[urbp->cnt].status = ret; 785 if (urbp->cnt + 1 == urb->number_of_packets) 786 c67x00_giveback_urb(c67x00, urb, 0); 787 } 788 789 urbp->ep_data->next_frame = 790 frame_add(urbp->ep_data->next_frame, urb->interval); 791 urbp->cnt++; 792 } 793 return 0; 794 } 795 796 /* -------------------------------------------------------------------------- */ 797 798 static void c67x00_fill_from_list(struct c67x00_hcd *c67x00, int type, 799 int (*add)(struct c67x00_hcd *, struct urb *)) 800 { 801 struct c67x00_ep_data *ep_data; 802 struct urb *urb; 803 804 /* traverse every endpoint on the list */ 805 list_for_each_entry(ep_data, &c67x00->list[type], node) { 806 if (!list_empty(&ep_data->queue)) { 807 /* and add the first urb */ 808 /* isochronous transfer rely on this */ 809 urb = list_entry(ep_data->queue.next, 810 struct c67x00_urb_priv, 811 hep_node)->urb; 812 add(c67x00, urb); 813 } 814 } 815 } 816 817 static void c67x00_fill_frame(struct c67x00_hcd *c67x00) 818 { 819 struct c67x00_td *td, *ttd; 820 821 /* Check if we can proceed */ 822 if (!list_empty(&c67x00->td_list)) { 823 dev_warn(c67x00_hcd_dev(c67x00), 824 "TD list not empty! This should not happen!\n"); 825 list_for_each_entry_safe(td, ttd, &c67x00->td_list, td_list) { 826 dbg_td(c67x00, td, "Unprocessed td"); 827 c67x00_release_td(td); 828 } 829 } 830 831 /* Reinitialize variables */ 832 c67x00->bandwidth_allocated = 0; 833 c67x00->periodic_bw_allocated = 0; 834 835 c67x00->next_td_addr = c67x00->td_base_addr; 836 c67x00->next_buf_addr = c67x00->buf_base_addr; 837 838 /* Fill the list */ 839 c67x00_fill_from_list(c67x00, PIPE_ISOCHRONOUS, c67x00_add_iso_urb); 840 c67x00_fill_from_list(c67x00, PIPE_INTERRUPT, c67x00_add_int_urb); 841 c67x00_fill_from_list(c67x00, PIPE_CONTROL, c67x00_add_ctrl_urb); 842 c67x00_fill_from_list(c67x00, PIPE_BULK, c67x00_add_data_urb); 843 } 844 845 /* -------------------------------------------------------------------------- */ 846 847 /** 848 * Get TD from C67X00 849 */ 850 static inline void 851 c67x00_parse_td(struct c67x00_hcd *c67x00, struct c67x00_td *td) 852 { 853 c67x00_ll_read_mem_le16(c67x00->sie->dev, 854 td->td_addr, td, CY_TD_SIZE); 855 856 if (usb_pipein(td->pipe) && td_actual_bytes(td)) 857 c67x00_ll_read_mem_le16(c67x00->sie->dev, td_ly_base_addr(td), 858 td->data, td_actual_bytes(td)); 859 } 860 861 static int c67x00_td_to_error(struct c67x00_hcd *c67x00, struct c67x00_td *td) 862 { 863 if (td->status & TD_STATUSMASK_ERR) { 864 dbg_td(c67x00, td, "ERROR_FLAG"); 865 return -EILSEQ; 866 } 867 if (td->status & TD_STATUSMASK_STALL) { 868 /* dbg_td(c67x00, td, "STALL"); */ 869 return -EPIPE; 870 } 871 if (td->status & TD_STATUSMASK_TMOUT) { 872 dbg_td(c67x00, td, "TIMEOUT"); 873 return -ETIMEDOUT; 874 } 875 876 return 0; 877 } 878 879 static inline int c67x00_end_of_data(struct c67x00_td *td) 880 { 881 int maxps, need_empty, remaining; 882 struct urb *urb = td->urb; 883 int act_bytes; 884 885 act_bytes = td_actual_bytes(td); 886 887 if (unlikely(!act_bytes)) 888 return 1; /* This was an empty packet */ 889 890 maxps = usb_maxpacket(td_udev(td), td->pipe, usb_pipeout(td->pipe)); 891 892 if (unlikely(act_bytes < maxps)) 893 return 1; /* Smaller then full packet */ 894 895 remaining = urb->transfer_buffer_length - urb->actual_length; 896 need_empty = (urb->transfer_flags & URB_ZERO_PACKET) && 897 usb_pipeout(urb->pipe) && !(remaining % maxps); 898 899 if (unlikely(!remaining && !need_empty)) 900 return 1; 901 902 return 0; 903 } 904 905 /* -------------------------------------------------------------------------- */ 906 907 /* Remove all td's from the list which come 908 * after last_td and are meant for the same pipe. 909 * This is used when a short packet has occured */ 910 static inline void c67x00_clear_pipe(struct c67x00_hcd *c67x00, 911 struct c67x00_td *last_td) 912 { 913 struct c67x00_td *td, *tmp; 914 td = last_td; 915 tmp = last_td; 916 while (td->td_list.next != &c67x00->td_list) { 917 td = list_entry(td->td_list.next, struct c67x00_td, td_list); 918 if (td->pipe == last_td->pipe) { 919 c67x00_release_td(td); 920 td = tmp; 921 } 922 tmp = td; 923 } 924 } 925 926 /* -------------------------------------------------------------------------- */ 927 928 static void c67x00_handle_successful_td(struct c67x00_hcd *c67x00, 929 struct c67x00_td *td) 930 { 931 struct urb *urb = td->urb; 932 933 if (!urb) 934 return; 935 936 urb->actual_length += td_actual_bytes(td); 937 938 switch (usb_pipetype(td->pipe)) { 939 /* isochronous tds are handled separately */ 940 case PIPE_CONTROL: 941 switch (td->privdata) { 942 case SETUP_STAGE: 943 urb->interval = 944 urb->transfer_buffer_length ? 945 DATA_STAGE : STATUS_STAGE; 946 /* Don't count setup_packet with normal data: */ 947 urb->actual_length = 0; 948 break; 949 950 case DATA_STAGE: 951 if (c67x00_end_of_data(td)) { 952 urb->interval = STATUS_STAGE; 953 c67x00_clear_pipe(c67x00, td); 954 } 955 break; 956 957 case STATUS_STAGE: 958 urb->interval = 0; 959 c67x00_giveback_urb(c67x00, urb, 0); 960 break; 961 } 962 break; 963 964 case PIPE_INTERRUPT: 965 case PIPE_BULK: 966 if (unlikely(c67x00_end_of_data(td))) { 967 c67x00_clear_pipe(c67x00, td); 968 c67x00_giveback_urb(c67x00, urb, 0); 969 } 970 break; 971 } 972 } 973 974 static void c67x00_handle_isoc(struct c67x00_hcd *c67x00, struct c67x00_td *td) 975 { 976 struct urb *urb = td->urb; 977 struct c67x00_urb_priv *urbp; 978 int cnt; 979 980 if (!urb) 981 return; 982 983 urbp = urb->hcpriv; 984 cnt = td->privdata; 985 986 if (td->status & TD_ERROR_MASK) 987 urb->error_count++; 988 989 urb->iso_frame_desc[cnt].actual_length = td_actual_bytes(td); 990 urb->iso_frame_desc[cnt].status = c67x00_td_to_error(c67x00, td); 991 if (cnt + 1 == urb->number_of_packets) /* Last packet */ 992 c67x00_giveback_urb(c67x00, urb, 0); 993 } 994 995 /* -------------------------------------------------------------------------- */ 996 997 /** 998 * c67x00_check_td_list - handle tds which have been processed by the c67x00 999 * pre: current_td == 0 1000 */ 1001 static inline void c67x00_check_td_list(struct c67x00_hcd *c67x00) 1002 { 1003 struct c67x00_td *td, *tmp; 1004 struct urb *urb; 1005 int ack_ok; 1006 int clear_endpoint; 1007 1008 list_for_each_entry_safe(td, tmp, &c67x00->td_list, td_list) { 1009 /* get the TD */ 1010 c67x00_parse_td(c67x00, td); 1011 urb = td->urb; /* urb can be NULL! */ 1012 ack_ok = 0; 1013 clear_endpoint = 1; 1014 1015 /* Handle isochronous transfers separately */ 1016 if (usb_pipeisoc(td->pipe)) { 1017 clear_endpoint = 0; 1018 c67x00_handle_isoc(c67x00, td); 1019 goto cont; 1020 } 1021 1022 /* When an error occurs, all td's for that pipe go into an 1023 * inactive state. This state matches successful transfers so 1024 * we must make sure not to service them. */ 1025 if (td->status & TD_ERROR_MASK) { 1026 c67x00_giveback_urb(c67x00, urb, 1027 c67x00_td_to_error(c67x00, td)); 1028 goto cont; 1029 } 1030 1031 if ((td->status & TD_STATUSMASK_NAK) || !td_sequence_ok(td) || 1032 !td_acked(td)) 1033 goto cont; 1034 1035 /* Sequence ok and acked, don't need to fix toggle */ 1036 ack_ok = 1; 1037 1038 if (unlikely(td->status & TD_STATUSMASK_OVF)) { 1039 if (td_residue(td) & TD_RESIDUE_OVERFLOW) { 1040 /* Overflow */ 1041 c67x00_giveback_urb(c67x00, urb, -EOVERFLOW); 1042 goto cont; 1043 } 1044 } 1045 1046 clear_endpoint = 0; 1047 c67x00_handle_successful_td(c67x00, td); 1048 1049 cont: 1050 if (clear_endpoint) 1051 c67x00_clear_pipe(c67x00, td); 1052 if (ack_ok) 1053 usb_settoggle(td_udev(td), usb_pipeendpoint(td->pipe), 1054 usb_pipeout(td->pipe), 1055 !(td->ctrl_reg & SEQ_SEL)); 1056 /* next in list could have been removed, due to clear_pipe! */ 1057 tmp = list_entry(td->td_list.next, typeof(*td), td_list); 1058 c67x00_release_td(td); 1059 } 1060 } 1061 1062 /* -------------------------------------------------------------------------- */ 1063 1064 static inline int c67x00_all_tds_processed(struct c67x00_hcd *c67x00) 1065 { 1066 /* If all tds are processed, we can check the previous frame (if 1067 * there was any) and start our next frame. 1068 */ 1069 return !c67x00_ll_husb_get_current_td(c67x00->sie); 1070 } 1071 1072 /** 1073 * Send td to C67X00 1074 */ 1075 static void c67x00_send_td(struct c67x00_hcd *c67x00, struct c67x00_td *td) 1076 { 1077 int len = td_length(td); 1078 1079 if (len && ((td->pid_ep & TD_PIDEPMASK_PID) != TD_PID_IN)) 1080 c67x00_ll_write_mem_le16(c67x00->sie->dev, td_ly_base_addr(td), 1081 td->data, len); 1082 1083 c67x00_ll_write_mem_le16(c67x00->sie->dev, 1084 td->td_addr, td, CY_TD_SIZE); 1085 } 1086 1087 static void c67x00_send_frame(struct c67x00_hcd *c67x00) 1088 { 1089 struct c67x00_td *td; 1090 1091 if (list_empty(&c67x00->td_list)) 1092 dev_warn(c67x00_hcd_dev(c67x00), 1093 "%s: td list should not be empty here!\n", 1094 __func__); 1095 1096 list_for_each_entry(td, &c67x00->td_list, td_list) { 1097 if (td->td_list.next == &c67x00->td_list) 1098 td->next_td_addr = 0; /* Last td in list */ 1099 1100 c67x00_send_td(c67x00, td); 1101 } 1102 1103 c67x00_ll_husb_set_current_td(c67x00->sie, c67x00->td_base_addr); 1104 } 1105 1106 /* -------------------------------------------------------------------------- */ 1107 1108 /** 1109 * c67x00_do_work - Schedulers state machine 1110 */ 1111 static void c67x00_do_work(struct c67x00_hcd *c67x00) 1112 { 1113 spin_lock(&c67x00->lock); 1114 /* Make sure all tds are processed */ 1115 if (!c67x00_all_tds_processed(c67x00)) 1116 goto out; 1117 1118 c67x00_check_td_list(c67x00); 1119 1120 /* no td's are being processed (current == 0) 1121 * and all have been "checked" */ 1122 complete(&c67x00->endpoint_disable); 1123 1124 if (!list_empty(&c67x00->td_list)) 1125 goto out; 1126 1127 c67x00->current_frame = c67x00_get_current_frame_number(c67x00); 1128 if (c67x00->current_frame == c67x00->last_frame) 1129 goto out; /* Don't send tds in same frame */ 1130 c67x00->last_frame = c67x00->current_frame; 1131 1132 /* If no urbs are scheduled, our work is done */ 1133 if (!c67x00->urb_count) { 1134 c67x00_ll_hpi_disable_sofeop(c67x00->sie); 1135 goto out; 1136 } 1137 1138 c67x00_fill_frame(c67x00); 1139 if (!list_empty(&c67x00->td_list)) 1140 /* TD's have been added to the frame */ 1141 c67x00_send_frame(c67x00); 1142 1143 out: 1144 spin_unlock(&c67x00->lock); 1145 } 1146 1147 /* -------------------------------------------------------------------------- */ 1148 1149 static void c67x00_sched_tasklet(unsigned long __c67x00) 1150 { 1151 struct c67x00_hcd *c67x00 = (struct c67x00_hcd *)__c67x00; 1152 c67x00_do_work(c67x00); 1153 } 1154 1155 void c67x00_sched_kick(struct c67x00_hcd *c67x00) 1156 { 1157 tasklet_hi_schedule(&c67x00->tasklet); 1158 } 1159 1160 int c67x00_sched_start_scheduler(struct c67x00_hcd *c67x00) 1161 { 1162 tasklet_init(&c67x00->tasklet, c67x00_sched_tasklet, 1163 (unsigned long)c67x00); 1164 return 0; 1165 } 1166 1167 void c67x00_sched_stop_scheduler(struct c67x00_hcd *c67x00) 1168 { 1169 tasklet_kill(&c67x00->tasklet); 1170 } 1171