1 /* 2 * MAX3421 Host Controller driver for USB. 3 * 4 * Author: David Mosberger-Tang <davidm@egauge.net> 5 * 6 * (C) Copyright 2014 David Mosberger-Tang <davidm@egauge.net> 7 * 8 * MAX3421 is a chip implementing a USB 2.0 Full-/Low-Speed host 9 * controller on a SPI bus. 10 * 11 * Based on: 12 * o MAX3421E datasheet 13 * http://datasheets.maximintegrated.com/en/ds/MAX3421E.pdf 14 * o MAX3421E Programming Guide 15 * http://www.hdl.co.jp/ftpdata/utl-001/AN3785.pdf 16 * o gadget/dummy_hcd.c 17 * For USB HCD implementation. 18 * o Arduino MAX3421 driver 19 * https://github.com/felis/USB_Host_Shield_2.0/blob/master/Usb.cpp 20 * 21 * This file is licenced under the GPL v2. 22 * 23 * Important note on worst-case (full-speed) packet size constraints 24 * (See USB 2.0 Section 5.6.3 and following): 25 * 26 * - control: 64 bytes 27 * - isochronous: 1023 bytes 28 * - interrupt: 64 bytes 29 * - bulk: 64 bytes 30 * 31 * Since the MAX3421 FIFO size is 64 bytes, we do not have to work about 32 * multi-FIFO writes/reads for a single USB packet *except* for isochronous 33 * transfers. We don't support isochronous transfers at this time, so we 34 * just assume that a USB packet always fits into a single FIFO buffer. 35 * 36 * NOTE: The June 2006 version of "MAX3421E Programming Guide" 37 * (AN3785) has conflicting info for the RCVDAVIRQ bit: 38 * 39 * The description of RCVDAVIRQ says "The CPU *must* clear 40 * this IRQ bit (by writing a 1 to it) before reading the 41 * RCVFIFO data. 42 * 43 * However, the earlier section on "Programming BULK-IN 44 * Transfers" says * that: 45 * 46 * After the CPU retrieves the data, it clears the 47 * RCVDAVIRQ bit. 48 * 49 * The December 2006 version has been corrected and it consistently 50 * states the second behavior is the correct one. 51 * 52 * Synchronous SPI transactions sleep so we can't perform any such 53 * transactions while holding a spin-lock (and/or while interrupts are 54 * masked). To achieve this, all SPI transactions are issued from a 55 * single thread (max3421_spi_thread). 56 */ 57 58 #include <linux/jiffies.h> 59 #include <linux/module.h> 60 #include <linux/spi/spi.h> 61 #include <linux/usb.h> 62 #include <linux/usb/hcd.h> 63 64 #include <linux/platform_data/max3421-hcd.h> 65 66 #define DRIVER_DESC "MAX3421 USB Host-Controller Driver" 67 #define DRIVER_VERSION "1.0" 68 69 /* 11-bit counter that wraps around (USB 2.0 Section 8.3.3): */ 70 #define USB_MAX_FRAME_NUMBER 0x7ff 71 #define USB_MAX_RETRIES 3 /* # of retries before error is reported */ 72 73 /* 74 * Max. # of times we're willing to retransmit a request immediately in 75 * resposne to a NAK. Afterwards, we fall back on trying once a frame. 76 */ 77 #define NAK_MAX_FAST_RETRANSMITS 2 78 79 #define POWER_BUDGET 500 /* in mA; use 8 for low-power port testing */ 80 81 /* Port-change mask: */ 82 #define PORT_C_MASK ((USB_PORT_STAT_C_CONNECTION | \ 83 USB_PORT_STAT_C_ENABLE | \ 84 USB_PORT_STAT_C_SUSPEND | \ 85 USB_PORT_STAT_C_OVERCURRENT | \ 86 USB_PORT_STAT_C_RESET) << 16) 87 88 enum max3421_rh_state { 89 MAX3421_RH_RESET, 90 MAX3421_RH_SUSPENDED, 91 MAX3421_RH_RUNNING 92 }; 93 94 enum pkt_state { 95 PKT_STATE_SETUP, /* waiting to send setup packet to ctrl pipe */ 96 PKT_STATE_TRANSFER, /* waiting to xfer transfer_buffer */ 97 PKT_STATE_TERMINATE /* waiting to terminate control transfer */ 98 }; 99 100 enum scheduling_pass { 101 SCHED_PASS_PERIODIC, 102 SCHED_PASS_NON_PERIODIC, 103 SCHED_PASS_DONE 104 }; 105 106 /* Bit numbers for max3421_hcd->todo: */ 107 enum { 108 ENABLE_IRQ = 0, 109 RESET_HCD, 110 RESET_PORT, 111 CHECK_UNLINK, 112 IOPIN_UPDATE 113 }; 114 115 struct max3421_dma_buf { 116 u8 data[2]; 117 }; 118 119 struct max3421_hcd { 120 spinlock_t lock; 121 122 struct task_struct *spi_thread; 123 124 struct max3421_hcd *next; 125 126 enum max3421_rh_state rh_state; 127 /* lower 16 bits contain port status, upper 16 bits the change mask: */ 128 u32 port_status; 129 130 unsigned active:1; 131 132 struct list_head ep_list; /* list of EP's with work */ 133 134 /* 135 * The following are owned by spi_thread (may be accessed by 136 * SPI-thread without acquiring the HCD lock: 137 */ 138 u8 rev; /* chip revision */ 139 u16 frame_number; 140 /* 141 * kmalloc'd buffers guaranteed to be in separate (DMA) 142 * cache-lines: 143 */ 144 struct max3421_dma_buf *tx; 145 struct max3421_dma_buf *rx; 146 /* 147 * URB we're currently processing. Must not be reset to NULL 148 * unless MAX3421E chip is idle: 149 */ 150 struct urb *curr_urb; 151 enum scheduling_pass sched_pass; 152 struct usb_device *loaded_dev; /* dev that's loaded into the chip */ 153 int loaded_epnum; /* epnum whose toggles are loaded */ 154 int urb_done; /* > 0 -> no errors, < 0: errno */ 155 size_t curr_len; 156 u8 hien; 157 u8 mode; 158 u8 iopins[2]; 159 unsigned long todo; 160 #ifdef DEBUG 161 unsigned long err_stat[16]; 162 #endif 163 }; 164 165 struct max3421_ep { 166 struct usb_host_endpoint *ep; 167 struct list_head ep_list; 168 u32 naks; 169 u16 last_active; /* frame # this ep was last active */ 170 enum pkt_state pkt_state; 171 u8 retries; 172 u8 retransmit; /* packet needs retransmission */ 173 }; 174 175 static struct max3421_hcd *max3421_hcd_list; 176 177 #define MAX3421_FIFO_SIZE 64 178 179 #define MAX3421_SPI_DIR_RD 0 /* read register from MAX3421 */ 180 #define MAX3421_SPI_DIR_WR 1 /* write register to MAX3421 */ 181 182 /* SPI commands: */ 183 #define MAX3421_SPI_DIR_SHIFT 1 184 #define MAX3421_SPI_REG_SHIFT 3 185 186 #define MAX3421_REG_RCVFIFO 1 187 #define MAX3421_REG_SNDFIFO 2 188 #define MAX3421_REG_SUDFIFO 4 189 #define MAX3421_REG_RCVBC 6 190 #define MAX3421_REG_SNDBC 7 191 #define MAX3421_REG_USBIRQ 13 192 #define MAX3421_REG_USBIEN 14 193 #define MAX3421_REG_USBCTL 15 194 #define MAX3421_REG_CPUCTL 16 195 #define MAX3421_REG_PINCTL 17 196 #define MAX3421_REG_REVISION 18 197 #define MAX3421_REG_IOPINS1 20 198 #define MAX3421_REG_IOPINS2 21 199 #define MAX3421_REG_GPINIRQ 22 200 #define MAX3421_REG_GPINIEN 23 201 #define MAX3421_REG_GPINPOL 24 202 #define MAX3421_REG_HIRQ 25 203 #define MAX3421_REG_HIEN 26 204 #define MAX3421_REG_MODE 27 205 #define MAX3421_REG_PERADDR 28 206 #define MAX3421_REG_HCTL 29 207 #define MAX3421_REG_HXFR 30 208 #define MAX3421_REG_HRSL 31 209 210 enum { 211 MAX3421_USBIRQ_OSCOKIRQ_BIT = 0, 212 MAX3421_USBIRQ_NOVBUSIRQ_BIT = 5, 213 MAX3421_USBIRQ_VBUSIRQ_BIT 214 }; 215 216 enum { 217 MAX3421_CPUCTL_IE_BIT = 0, 218 MAX3421_CPUCTL_PULSEWID0_BIT = 6, 219 MAX3421_CPUCTL_PULSEWID1_BIT 220 }; 221 222 enum { 223 MAX3421_USBCTL_PWRDOWN_BIT = 4, 224 MAX3421_USBCTL_CHIPRES_BIT 225 }; 226 227 enum { 228 MAX3421_PINCTL_GPXA_BIT = 0, 229 MAX3421_PINCTL_GPXB_BIT, 230 MAX3421_PINCTL_POSINT_BIT, 231 MAX3421_PINCTL_INTLEVEL_BIT, 232 MAX3421_PINCTL_FDUPSPI_BIT, 233 MAX3421_PINCTL_EP0INAK_BIT, 234 MAX3421_PINCTL_EP2INAK_BIT, 235 MAX3421_PINCTL_EP3INAK_BIT, 236 }; 237 238 enum { 239 MAX3421_HI_BUSEVENT_BIT = 0, /* bus-reset/-resume */ 240 MAX3421_HI_RWU_BIT, /* remote wakeup */ 241 MAX3421_HI_RCVDAV_BIT, /* receive FIFO data available */ 242 MAX3421_HI_SNDBAV_BIT, /* send buffer available */ 243 MAX3421_HI_SUSDN_BIT, /* suspend operation done */ 244 MAX3421_HI_CONDET_BIT, /* peripheral connect/disconnect */ 245 MAX3421_HI_FRAME_BIT, /* frame generator */ 246 MAX3421_HI_HXFRDN_BIT, /* host transfer done */ 247 }; 248 249 enum { 250 MAX3421_HCTL_BUSRST_BIT = 0, 251 MAX3421_HCTL_FRMRST_BIT, 252 MAX3421_HCTL_SAMPLEBUS_BIT, 253 MAX3421_HCTL_SIGRSM_BIT, 254 MAX3421_HCTL_RCVTOG0_BIT, 255 MAX3421_HCTL_RCVTOG1_BIT, 256 MAX3421_HCTL_SNDTOG0_BIT, 257 MAX3421_HCTL_SNDTOG1_BIT 258 }; 259 260 enum { 261 MAX3421_MODE_HOST_BIT = 0, 262 MAX3421_MODE_LOWSPEED_BIT, 263 MAX3421_MODE_HUBPRE_BIT, 264 MAX3421_MODE_SOFKAENAB_BIT, 265 MAX3421_MODE_SEPIRQ_BIT, 266 MAX3421_MODE_DELAYISO_BIT, 267 MAX3421_MODE_DMPULLDN_BIT, 268 MAX3421_MODE_DPPULLDN_BIT 269 }; 270 271 enum { 272 MAX3421_HRSL_OK = 0, 273 MAX3421_HRSL_BUSY, 274 MAX3421_HRSL_BADREQ, 275 MAX3421_HRSL_UNDEF, 276 MAX3421_HRSL_NAK, 277 MAX3421_HRSL_STALL, 278 MAX3421_HRSL_TOGERR, 279 MAX3421_HRSL_WRONGPID, 280 MAX3421_HRSL_BADBC, 281 MAX3421_HRSL_PIDERR, 282 MAX3421_HRSL_PKTERR, 283 MAX3421_HRSL_CRCERR, 284 MAX3421_HRSL_KERR, 285 MAX3421_HRSL_JERR, 286 MAX3421_HRSL_TIMEOUT, 287 MAX3421_HRSL_BABBLE, 288 MAX3421_HRSL_RESULT_MASK = 0xf, 289 MAX3421_HRSL_RCVTOGRD_BIT = 4, 290 MAX3421_HRSL_SNDTOGRD_BIT, 291 MAX3421_HRSL_KSTATUS_BIT, 292 MAX3421_HRSL_JSTATUS_BIT 293 }; 294 295 /* Return same error-codes as ohci.h:cc_to_error: */ 296 static const int hrsl_to_error[] = { 297 [MAX3421_HRSL_OK] = 0, 298 [MAX3421_HRSL_BUSY] = -EINVAL, 299 [MAX3421_HRSL_BADREQ] = -EINVAL, 300 [MAX3421_HRSL_UNDEF] = -EINVAL, 301 [MAX3421_HRSL_NAK] = -EAGAIN, 302 [MAX3421_HRSL_STALL] = -EPIPE, 303 [MAX3421_HRSL_TOGERR] = -EILSEQ, 304 [MAX3421_HRSL_WRONGPID] = -EPROTO, 305 [MAX3421_HRSL_BADBC] = -EREMOTEIO, 306 [MAX3421_HRSL_PIDERR] = -EPROTO, 307 [MAX3421_HRSL_PKTERR] = -EPROTO, 308 [MAX3421_HRSL_CRCERR] = -EILSEQ, 309 [MAX3421_HRSL_KERR] = -EIO, 310 [MAX3421_HRSL_JERR] = -EIO, 311 [MAX3421_HRSL_TIMEOUT] = -ETIME, 312 [MAX3421_HRSL_BABBLE] = -EOVERFLOW 313 }; 314 315 /* 316 * See http://www.beyondlogic.org/usbnutshell/usb4.shtml#Control for a 317 * reasonable overview of how control transfers use the the IN/OUT 318 * tokens. 319 */ 320 #define MAX3421_HXFR_BULK_IN(ep) (0x00 | (ep)) /* bulk or interrupt */ 321 #define MAX3421_HXFR_SETUP 0x10 322 #define MAX3421_HXFR_BULK_OUT(ep) (0x20 | (ep)) /* bulk or interrupt */ 323 #define MAX3421_HXFR_ISO_IN(ep) (0x40 | (ep)) 324 #define MAX3421_HXFR_ISO_OUT(ep) (0x60 | (ep)) 325 #define MAX3421_HXFR_HS_IN 0x80 /* handshake in */ 326 #define MAX3421_HXFR_HS_OUT 0xa0 /* handshake out */ 327 328 #define field(val, bit) ((val) << (bit)) 329 330 static inline s16 331 frame_diff(u16 left, u16 right) 332 { 333 return ((unsigned) (left - right)) % (USB_MAX_FRAME_NUMBER + 1); 334 } 335 336 static inline struct max3421_hcd * 337 hcd_to_max3421(struct usb_hcd *hcd) 338 { 339 return (struct max3421_hcd *) hcd->hcd_priv; 340 } 341 342 static inline struct usb_hcd * 343 max3421_to_hcd(struct max3421_hcd *max3421_hcd) 344 { 345 return container_of((void *) max3421_hcd, struct usb_hcd, hcd_priv); 346 } 347 348 static u8 349 spi_rd8(struct usb_hcd *hcd, unsigned int reg) 350 { 351 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 352 struct spi_device *spi = to_spi_device(hcd->self.controller); 353 struct spi_transfer transfer; 354 struct spi_message msg; 355 356 memset(&transfer, 0, sizeof(transfer)); 357 358 spi_message_init(&msg); 359 360 max3421_hcd->tx->data[0] = 361 (field(reg, MAX3421_SPI_REG_SHIFT) | 362 field(MAX3421_SPI_DIR_RD, MAX3421_SPI_DIR_SHIFT)); 363 364 transfer.tx_buf = max3421_hcd->tx->data; 365 transfer.rx_buf = max3421_hcd->rx->data; 366 transfer.len = 2; 367 368 spi_message_add_tail(&transfer, &msg); 369 spi_sync(spi, &msg); 370 371 return max3421_hcd->rx->data[1]; 372 } 373 374 static void 375 spi_wr8(struct usb_hcd *hcd, unsigned int reg, u8 val) 376 { 377 struct spi_device *spi = to_spi_device(hcd->self.controller); 378 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 379 struct spi_transfer transfer; 380 struct spi_message msg; 381 382 memset(&transfer, 0, sizeof(transfer)); 383 384 spi_message_init(&msg); 385 386 max3421_hcd->tx->data[0] = 387 (field(reg, MAX3421_SPI_REG_SHIFT) | 388 field(MAX3421_SPI_DIR_WR, MAX3421_SPI_DIR_SHIFT)); 389 max3421_hcd->tx->data[1] = val; 390 391 transfer.tx_buf = max3421_hcd->tx->data; 392 transfer.len = 2; 393 394 spi_message_add_tail(&transfer, &msg); 395 spi_sync(spi, &msg); 396 } 397 398 static void 399 spi_rd_buf(struct usb_hcd *hcd, unsigned int reg, void *buf, size_t len) 400 { 401 struct spi_device *spi = to_spi_device(hcd->self.controller); 402 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 403 struct spi_transfer transfer[2]; 404 struct spi_message msg; 405 406 memset(transfer, 0, sizeof(transfer)); 407 408 spi_message_init(&msg); 409 410 max3421_hcd->tx->data[0] = 411 (field(reg, MAX3421_SPI_REG_SHIFT) | 412 field(MAX3421_SPI_DIR_RD, MAX3421_SPI_DIR_SHIFT)); 413 transfer[0].tx_buf = max3421_hcd->tx->data; 414 transfer[0].len = 1; 415 416 transfer[1].rx_buf = buf; 417 transfer[1].len = len; 418 419 spi_message_add_tail(&transfer[0], &msg); 420 spi_message_add_tail(&transfer[1], &msg); 421 spi_sync(spi, &msg); 422 } 423 424 static void 425 spi_wr_buf(struct usb_hcd *hcd, unsigned int reg, void *buf, size_t len) 426 { 427 struct spi_device *spi = to_spi_device(hcd->self.controller); 428 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 429 struct spi_transfer transfer[2]; 430 struct spi_message msg; 431 432 memset(transfer, 0, sizeof(transfer)); 433 434 spi_message_init(&msg); 435 436 max3421_hcd->tx->data[0] = 437 (field(reg, MAX3421_SPI_REG_SHIFT) | 438 field(MAX3421_SPI_DIR_WR, MAX3421_SPI_DIR_SHIFT)); 439 440 transfer[0].tx_buf = max3421_hcd->tx->data; 441 transfer[0].len = 1; 442 443 transfer[1].tx_buf = buf; 444 transfer[1].len = len; 445 446 spi_message_add_tail(&transfer[0], &msg); 447 spi_message_add_tail(&transfer[1], &msg); 448 spi_sync(spi, &msg); 449 } 450 451 /* 452 * Figure out the correct setting for the LOWSPEED and HUBPRE mode 453 * bits. The HUBPRE bit needs to be set when MAX3421E operates at 454 * full speed, but it's talking to a low-speed device (i.e., through a 455 * hub). Setting that bit ensures that every low-speed packet is 456 * preceded by a full-speed PRE PID. Possible configurations: 457 * 458 * Hub speed: Device speed: => LOWSPEED bit: HUBPRE bit: 459 * FULL FULL => 0 0 460 * FULL LOW => 1 1 461 * LOW LOW => 1 0 462 * LOW FULL => 1 0 463 */ 464 static void 465 max3421_set_speed(struct usb_hcd *hcd, struct usb_device *dev) 466 { 467 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 468 u8 mode_lowspeed, mode_hubpre, mode = max3421_hcd->mode; 469 470 mode_lowspeed = BIT(MAX3421_MODE_LOWSPEED_BIT); 471 mode_hubpre = BIT(MAX3421_MODE_HUBPRE_BIT); 472 if (max3421_hcd->port_status & USB_PORT_STAT_LOW_SPEED) { 473 mode |= mode_lowspeed; 474 mode &= ~mode_hubpre; 475 } else if (dev->speed == USB_SPEED_LOW) { 476 mode |= mode_lowspeed | mode_hubpre; 477 } else { 478 mode &= ~(mode_lowspeed | mode_hubpre); 479 } 480 if (mode != max3421_hcd->mode) { 481 max3421_hcd->mode = mode; 482 spi_wr8(hcd, MAX3421_REG_MODE, max3421_hcd->mode); 483 } 484 485 } 486 487 /* 488 * Caller must NOT hold HCD spinlock. 489 */ 490 static void 491 max3421_set_address(struct usb_hcd *hcd, struct usb_device *dev, int epnum, 492 int force_toggles) 493 { 494 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 495 int old_epnum, same_ep, rcvtog, sndtog; 496 struct usb_device *old_dev; 497 u8 hctl; 498 499 old_dev = max3421_hcd->loaded_dev; 500 old_epnum = max3421_hcd->loaded_epnum; 501 502 same_ep = (dev == old_dev && epnum == old_epnum); 503 if (same_ep && !force_toggles) 504 return; 505 506 if (old_dev && !same_ep) { 507 /* save the old end-points toggles: */ 508 u8 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL); 509 510 rcvtog = (hrsl >> MAX3421_HRSL_RCVTOGRD_BIT) & 1; 511 sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1; 512 513 /* no locking: HCD (i.e., we) own toggles, don't we? */ 514 usb_settoggle(old_dev, old_epnum, 0, rcvtog); 515 usb_settoggle(old_dev, old_epnum, 1, sndtog); 516 } 517 /* setup new endpoint's toggle bits: */ 518 rcvtog = usb_gettoggle(dev, epnum, 0); 519 sndtog = usb_gettoggle(dev, epnum, 1); 520 hctl = (BIT(rcvtog + MAX3421_HCTL_RCVTOG0_BIT) | 521 BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT)); 522 523 max3421_hcd->loaded_epnum = epnum; 524 spi_wr8(hcd, MAX3421_REG_HCTL, hctl); 525 526 /* 527 * Note: devnum for one and the same device can change during 528 * address-assignment so it's best to just always load the 529 * address whenever the end-point changed/was forced. 530 */ 531 max3421_hcd->loaded_dev = dev; 532 spi_wr8(hcd, MAX3421_REG_PERADDR, dev->devnum); 533 } 534 535 static int 536 max3421_ctrl_setup(struct usb_hcd *hcd, struct urb *urb) 537 { 538 spi_wr_buf(hcd, MAX3421_REG_SUDFIFO, urb->setup_packet, 8); 539 return MAX3421_HXFR_SETUP; 540 } 541 542 static int 543 max3421_transfer_in(struct usb_hcd *hcd, struct urb *urb) 544 { 545 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 546 int epnum = usb_pipeendpoint(urb->pipe); 547 548 max3421_hcd->curr_len = 0; 549 max3421_hcd->hien |= BIT(MAX3421_HI_RCVDAV_BIT); 550 return MAX3421_HXFR_BULK_IN(epnum); 551 } 552 553 static int 554 max3421_transfer_out(struct usb_hcd *hcd, struct urb *urb, int fast_retransmit) 555 { 556 struct spi_device *spi = to_spi_device(hcd->self.controller); 557 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 558 int epnum = usb_pipeendpoint(urb->pipe); 559 u32 max_packet; 560 void *src; 561 562 src = urb->transfer_buffer + urb->actual_length; 563 564 if (fast_retransmit) { 565 if (max3421_hcd->rev == 0x12) { 566 /* work around rev 0x12 bug: */ 567 spi_wr8(hcd, MAX3421_REG_SNDBC, 0); 568 spi_wr8(hcd, MAX3421_REG_SNDFIFO, ((u8 *) src)[0]); 569 spi_wr8(hcd, MAX3421_REG_SNDBC, max3421_hcd->curr_len); 570 } 571 return MAX3421_HXFR_BULK_OUT(epnum); 572 } 573 574 max_packet = usb_maxpacket(urb->dev, urb->pipe, 1); 575 576 if (max_packet > MAX3421_FIFO_SIZE) { 577 /* 578 * We do not support isochronous transfers at this 579 * time. 580 */ 581 dev_err(&spi->dev, 582 "%s: packet-size of %u too big (limit is %u bytes)", 583 __func__, max_packet, MAX3421_FIFO_SIZE); 584 max3421_hcd->urb_done = -EMSGSIZE; 585 return -EMSGSIZE; 586 } 587 max3421_hcd->curr_len = min((urb->transfer_buffer_length - 588 urb->actual_length), max_packet); 589 590 spi_wr_buf(hcd, MAX3421_REG_SNDFIFO, src, max3421_hcd->curr_len); 591 spi_wr8(hcd, MAX3421_REG_SNDBC, max3421_hcd->curr_len); 592 return MAX3421_HXFR_BULK_OUT(epnum); 593 } 594 595 /* 596 * Issue the next host-transfer command. 597 * Caller must NOT hold HCD spinlock. 598 */ 599 static void 600 max3421_next_transfer(struct usb_hcd *hcd, int fast_retransmit) 601 { 602 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 603 struct urb *urb = max3421_hcd->curr_urb; 604 struct max3421_ep *max3421_ep; 605 int cmd = -EINVAL; 606 607 if (!urb) 608 return; /* nothing to do */ 609 610 max3421_ep = urb->ep->hcpriv; 611 612 switch (max3421_ep->pkt_state) { 613 case PKT_STATE_SETUP: 614 cmd = max3421_ctrl_setup(hcd, urb); 615 break; 616 617 case PKT_STATE_TRANSFER: 618 if (usb_urb_dir_in(urb)) 619 cmd = max3421_transfer_in(hcd, urb); 620 else 621 cmd = max3421_transfer_out(hcd, urb, fast_retransmit); 622 break; 623 624 case PKT_STATE_TERMINATE: 625 /* 626 * IN transfers are terminated with HS_OUT token, 627 * OUT transfers with HS_IN: 628 */ 629 if (usb_urb_dir_in(urb)) 630 cmd = MAX3421_HXFR_HS_OUT; 631 else 632 cmd = MAX3421_HXFR_HS_IN; 633 break; 634 } 635 636 if (cmd < 0) 637 return; 638 639 /* issue the command and wait for host-xfer-done interrupt: */ 640 641 spi_wr8(hcd, MAX3421_REG_HXFR, cmd); 642 max3421_hcd->hien |= BIT(MAX3421_HI_HXFRDN_BIT); 643 } 644 645 /* 646 * Find the next URB to process and start its execution. 647 * 648 * At this time, we do not anticipate ever connecting a USB hub to the 649 * MAX3421 chip, so at most USB device can be connected and we can use 650 * a simplistic scheduler: at the start of a frame, schedule all 651 * periodic transfers. Once that is done, use the remainder of the 652 * frame to process non-periodic (bulk & control) transfers. 653 * 654 * Preconditions: 655 * o Caller must NOT hold HCD spinlock. 656 * o max3421_hcd->curr_urb MUST BE NULL. 657 * o MAX3421E chip must be idle. 658 */ 659 static int 660 max3421_select_and_start_urb(struct usb_hcd *hcd) 661 { 662 struct spi_device *spi = to_spi_device(hcd->self.controller); 663 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 664 struct urb *urb, *curr_urb = NULL; 665 struct max3421_ep *max3421_ep; 666 int epnum, force_toggles = 0; 667 struct usb_host_endpoint *ep; 668 struct list_head *pos; 669 unsigned long flags; 670 671 spin_lock_irqsave(&max3421_hcd->lock, flags); 672 673 for (; 674 max3421_hcd->sched_pass < SCHED_PASS_DONE; 675 ++max3421_hcd->sched_pass) 676 list_for_each(pos, &max3421_hcd->ep_list) { 677 urb = NULL; 678 max3421_ep = container_of(pos, struct max3421_ep, 679 ep_list); 680 ep = max3421_ep->ep; 681 682 switch (usb_endpoint_type(&ep->desc)) { 683 case USB_ENDPOINT_XFER_ISOC: 684 case USB_ENDPOINT_XFER_INT: 685 if (max3421_hcd->sched_pass != 686 SCHED_PASS_PERIODIC) 687 continue; 688 break; 689 690 case USB_ENDPOINT_XFER_CONTROL: 691 case USB_ENDPOINT_XFER_BULK: 692 if (max3421_hcd->sched_pass != 693 SCHED_PASS_NON_PERIODIC) 694 continue; 695 break; 696 } 697 698 if (list_empty(&ep->urb_list)) 699 continue; /* nothing to do */ 700 urb = list_first_entry(&ep->urb_list, struct urb, 701 urb_list); 702 if (urb->unlinked) { 703 dev_dbg(&spi->dev, "%s: URB %p unlinked=%d", 704 __func__, urb, urb->unlinked); 705 max3421_hcd->curr_urb = urb; 706 max3421_hcd->urb_done = 1; 707 spin_unlock_irqrestore(&max3421_hcd->lock, 708 flags); 709 return 1; 710 } 711 712 switch (usb_endpoint_type(&ep->desc)) { 713 case USB_ENDPOINT_XFER_CONTROL: 714 /* 715 * Allow one control transaction per 716 * frame per endpoint: 717 */ 718 if (frame_diff(max3421_ep->last_active, 719 max3421_hcd->frame_number) == 0) 720 continue; 721 break; 722 723 case USB_ENDPOINT_XFER_BULK: 724 if (max3421_ep->retransmit 725 && (frame_diff(max3421_ep->last_active, 726 max3421_hcd->frame_number) 727 == 0)) 728 /* 729 * We already tried this EP 730 * during this frame and got a 731 * NAK or error; wait for next frame 732 */ 733 continue; 734 break; 735 736 case USB_ENDPOINT_XFER_ISOC: 737 case USB_ENDPOINT_XFER_INT: 738 if (frame_diff(max3421_hcd->frame_number, 739 max3421_ep->last_active) 740 < urb->interval) 741 /* 742 * We already processed this 743 * end-point in the current 744 * frame 745 */ 746 continue; 747 break; 748 } 749 750 /* move current ep to tail: */ 751 list_move_tail(pos, &max3421_hcd->ep_list); 752 curr_urb = urb; 753 goto done; 754 } 755 done: 756 if (!curr_urb) { 757 spin_unlock_irqrestore(&max3421_hcd->lock, flags); 758 return 0; 759 } 760 761 urb = max3421_hcd->curr_urb = curr_urb; 762 epnum = usb_endpoint_num(&urb->ep->desc); 763 if (max3421_ep->retransmit) 764 /* restart (part of) a USB transaction: */ 765 max3421_ep->retransmit = 0; 766 else { 767 /* start USB transaction: */ 768 if (usb_endpoint_xfer_control(&ep->desc)) { 769 /* 770 * See USB 2.0 spec section 8.6.1 771 * Initialization via SETUP Token: 772 */ 773 usb_settoggle(urb->dev, epnum, 0, 1); 774 usb_settoggle(urb->dev, epnum, 1, 1); 775 max3421_ep->pkt_state = PKT_STATE_SETUP; 776 force_toggles = 1; 777 } else 778 max3421_ep->pkt_state = PKT_STATE_TRANSFER; 779 } 780 781 spin_unlock_irqrestore(&max3421_hcd->lock, flags); 782 783 max3421_ep->last_active = max3421_hcd->frame_number; 784 max3421_set_address(hcd, urb->dev, epnum, force_toggles); 785 max3421_set_speed(hcd, urb->dev); 786 max3421_next_transfer(hcd, 0); 787 return 1; 788 } 789 790 /* 791 * Check all endpoints for URBs that got unlinked. 792 * 793 * Caller must NOT hold HCD spinlock. 794 */ 795 static int 796 max3421_check_unlink(struct usb_hcd *hcd) 797 { 798 struct spi_device *spi = to_spi_device(hcd->self.controller); 799 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 800 struct list_head *pos, *upos, *next_upos; 801 struct max3421_ep *max3421_ep; 802 struct usb_host_endpoint *ep; 803 struct urb *urb; 804 unsigned long flags; 805 int retval = 0; 806 807 spin_lock_irqsave(&max3421_hcd->lock, flags); 808 list_for_each(pos, &max3421_hcd->ep_list) { 809 max3421_ep = container_of(pos, struct max3421_ep, ep_list); 810 ep = max3421_ep->ep; 811 list_for_each_safe(upos, next_upos, &ep->urb_list) { 812 urb = container_of(upos, struct urb, urb_list); 813 if (urb->unlinked) { 814 retval = 1; 815 dev_dbg(&spi->dev, "%s: URB %p unlinked=%d", 816 __func__, urb, urb->unlinked); 817 usb_hcd_unlink_urb_from_ep(hcd, urb); 818 spin_unlock_irqrestore(&max3421_hcd->lock, 819 flags); 820 usb_hcd_giveback_urb(hcd, urb, 0); 821 spin_lock_irqsave(&max3421_hcd->lock, flags); 822 } 823 } 824 } 825 spin_unlock_irqrestore(&max3421_hcd->lock, flags); 826 return retval; 827 } 828 829 /* 830 * Caller must NOT hold HCD spinlock. 831 */ 832 static void 833 max3421_slow_retransmit(struct usb_hcd *hcd) 834 { 835 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 836 struct urb *urb = max3421_hcd->curr_urb; 837 struct max3421_ep *max3421_ep; 838 839 max3421_ep = urb->ep->hcpriv; 840 max3421_ep->retransmit = 1; 841 max3421_hcd->curr_urb = NULL; 842 } 843 844 /* 845 * Caller must NOT hold HCD spinlock. 846 */ 847 static void 848 max3421_recv_data_available(struct usb_hcd *hcd) 849 { 850 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 851 struct urb *urb = max3421_hcd->curr_urb; 852 size_t remaining, transfer_size; 853 u8 rcvbc; 854 855 rcvbc = spi_rd8(hcd, MAX3421_REG_RCVBC); 856 857 if (rcvbc > MAX3421_FIFO_SIZE) 858 rcvbc = MAX3421_FIFO_SIZE; 859 if (urb->actual_length >= urb->transfer_buffer_length) 860 remaining = 0; 861 else 862 remaining = urb->transfer_buffer_length - urb->actual_length; 863 transfer_size = rcvbc; 864 if (transfer_size > remaining) 865 transfer_size = remaining; 866 if (transfer_size > 0) { 867 void *dst = urb->transfer_buffer + urb->actual_length; 868 869 spi_rd_buf(hcd, MAX3421_REG_RCVFIFO, dst, transfer_size); 870 urb->actual_length += transfer_size; 871 max3421_hcd->curr_len = transfer_size; 872 } 873 874 /* ack the RCVDAV irq now that the FIFO has been read: */ 875 spi_wr8(hcd, MAX3421_REG_HIRQ, BIT(MAX3421_HI_RCVDAV_BIT)); 876 } 877 878 static void 879 max3421_handle_error(struct usb_hcd *hcd, u8 hrsl) 880 { 881 struct spi_device *spi = to_spi_device(hcd->self.controller); 882 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 883 u8 result_code = hrsl & MAX3421_HRSL_RESULT_MASK; 884 struct urb *urb = max3421_hcd->curr_urb; 885 struct max3421_ep *max3421_ep = urb->ep->hcpriv; 886 int switch_sndfifo; 887 888 /* 889 * If an OUT command results in any response other than OK 890 * (i.e., error or NAK), we have to perform a dummy-write to 891 * SNDBC so the FIFO gets switched back to us. Otherwise, we 892 * get out of sync with the SNDFIFO double buffer. 893 */ 894 switch_sndfifo = (max3421_ep->pkt_state == PKT_STATE_TRANSFER && 895 usb_urb_dir_out(urb)); 896 897 switch (result_code) { 898 case MAX3421_HRSL_OK: 899 return; /* this shouldn't happen */ 900 901 case MAX3421_HRSL_WRONGPID: /* received wrong PID */ 902 case MAX3421_HRSL_BUSY: /* SIE busy */ 903 case MAX3421_HRSL_BADREQ: /* bad val in HXFR */ 904 case MAX3421_HRSL_UNDEF: /* reserved */ 905 case MAX3421_HRSL_KERR: /* K-state instead of response */ 906 case MAX3421_HRSL_JERR: /* J-state instead of response */ 907 /* 908 * packet experienced an error that we cannot recover 909 * from; report error 910 */ 911 max3421_hcd->urb_done = hrsl_to_error[result_code]; 912 dev_dbg(&spi->dev, "%s: unexpected error HRSL=0x%02x", 913 __func__, hrsl); 914 break; 915 916 case MAX3421_HRSL_TOGERR: 917 if (usb_urb_dir_in(urb)) 918 ; /* don't do anything (device will switch toggle) */ 919 else { 920 /* flip the send toggle bit: */ 921 int sndtog = (hrsl >> MAX3421_HRSL_SNDTOGRD_BIT) & 1; 922 923 sndtog ^= 1; 924 spi_wr8(hcd, MAX3421_REG_HCTL, 925 BIT(sndtog + MAX3421_HCTL_SNDTOG0_BIT)); 926 } 927 /* FALL THROUGH */ 928 case MAX3421_HRSL_BADBC: /* bad byte count */ 929 case MAX3421_HRSL_PIDERR: /* received PID is corrupted */ 930 case MAX3421_HRSL_PKTERR: /* packet error (stuff, EOP) */ 931 case MAX3421_HRSL_CRCERR: /* CRC error */ 932 case MAX3421_HRSL_BABBLE: /* device talked too long */ 933 case MAX3421_HRSL_TIMEOUT: 934 if (max3421_ep->retries++ < USB_MAX_RETRIES) 935 /* retry the packet again in the next frame */ 936 max3421_slow_retransmit(hcd); 937 else { 938 /* Based on ohci.h cc_to_err[]: */ 939 max3421_hcd->urb_done = hrsl_to_error[result_code]; 940 dev_dbg(&spi->dev, "%s: unexpected error HRSL=0x%02x", 941 __func__, hrsl); 942 } 943 break; 944 945 case MAX3421_HRSL_STALL: 946 dev_dbg(&spi->dev, "%s: unexpected error HRSL=0x%02x", 947 __func__, hrsl); 948 max3421_hcd->urb_done = hrsl_to_error[result_code]; 949 break; 950 951 case MAX3421_HRSL_NAK: 952 /* 953 * Device wasn't ready for data or has no data 954 * available: retry the packet again. 955 */ 956 if (max3421_ep->naks++ < NAK_MAX_FAST_RETRANSMITS) { 957 max3421_next_transfer(hcd, 1); 958 switch_sndfifo = 0; 959 } else 960 max3421_slow_retransmit(hcd); 961 break; 962 } 963 if (switch_sndfifo) 964 spi_wr8(hcd, MAX3421_REG_SNDBC, 0); 965 } 966 967 /* 968 * Caller must NOT hold HCD spinlock. 969 */ 970 static int 971 max3421_transfer_in_done(struct usb_hcd *hcd, struct urb *urb) 972 { 973 struct spi_device *spi = to_spi_device(hcd->self.controller); 974 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 975 u32 max_packet; 976 977 if (urb->actual_length >= urb->transfer_buffer_length) 978 return 1; /* read is complete, so we're done */ 979 980 /* 981 * USB 2.0 Section 5.3.2 Pipes: packets must be full size 982 * except for last one. 983 */ 984 max_packet = usb_maxpacket(urb->dev, urb->pipe, 0); 985 if (max_packet > MAX3421_FIFO_SIZE) { 986 /* 987 * We do not support isochronous transfers at this 988 * time... 989 */ 990 dev_err(&spi->dev, 991 "%s: packet-size of %u too big (limit is %u bytes)", 992 __func__, max_packet, MAX3421_FIFO_SIZE); 993 return -EINVAL; 994 } 995 996 if (max3421_hcd->curr_len < max_packet) { 997 if (urb->transfer_flags & URB_SHORT_NOT_OK) { 998 /* 999 * remaining > 0 and received an 1000 * unexpected partial packet -> 1001 * error 1002 */ 1003 return -EREMOTEIO; 1004 } else 1005 /* short read, but it's OK */ 1006 return 1; 1007 } 1008 return 0; /* not done */ 1009 } 1010 1011 /* 1012 * Caller must NOT hold HCD spinlock. 1013 */ 1014 static int 1015 max3421_transfer_out_done(struct usb_hcd *hcd, struct urb *urb) 1016 { 1017 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 1018 1019 urb->actual_length += max3421_hcd->curr_len; 1020 if (urb->actual_length < urb->transfer_buffer_length) 1021 return 0; 1022 if (urb->transfer_flags & URB_ZERO_PACKET) { 1023 /* 1024 * Some hardware needs a zero-size packet at the end 1025 * of a bulk-out transfer if the last transfer was a 1026 * full-sized packet (i.e., such hardware use < 1027 * max_packet as an indicator that the end of the 1028 * packet has been reached). 1029 */ 1030 u32 max_packet = usb_maxpacket(urb->dev, urb->pipe, 1); 1031 1032 if (max3421_hcd->curr_len == max_packet) 1033 return 0; 1034 } 1035 return 1; 1036 } 1037 1038 /* 1039 * Caller must NOT hold HCD spinlock. 1040 */ 1041 static void 1042 max3421_host_transfer_done(struct usb_hcd *hcd) 1043 { 1044 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 1045 struct urb *urb = max3421_hcd->curr_urb; 1046 struct max3421_ep *max3421_ep; 1047 u8 result_code, hrsl; 1048 int urb_done = 0; 1049 1050 max3421_hcd->hien &= ~(BIT(MAX3421_HI_HXFRDN_BIT) | 1051 BIT(MAX3421_HI_RCVDAV_BIT)); 1052 1053 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL); 1054 result_code = hrsl & MAX3421_HRSL_RESULT_MASK; 1055 1056 #ifdef DEBUG 1057 ++max3421_hcd->err_stat[result_code]; 1058 #endif 1059 1060 max3421_ep = urb->ep->hcpriv; 1061 1062 if (unlikely(result_code != MAX3421_HRSL_OK)) { 1063 max3421_handle_error(hcd, hrsl); 1064 return; 1065 } 1066 1067 max3421_ep->naks = 0; 1068 max3421_ep->retries = 0; 1069 switch (max3421_ep->pkt_state) { 1070 1071 case PKT_STATE_SETUP: 1072 if (urb->transfer_buffer_length > 0) 1073 max3421_ep->pkt_state = PKT_STATE_TRANSFER; 1074 else 1075 max3421_ep->pkt_state = PKT_STATE_TERMINATE; 1076 break; 1077 1078 case PKT_STATE_TRANSFER: 1079 if (usb_urb_dir_in(urb)) 1080 urb_done = max3421_transfer_in_done(hcd, urb); 1081 else 1082 urb_done = max3421_transfer_out_done(hcd, urb); 1083 if (urb_done > 0 && usb_pipetype(urb->pipe) == PIPE_CONTROL) { 1084 /* 1085 * We aren't really done - we still need to 1086 * terminate the control transfer: 1087 */ 1088 max3421_hcd->urb_done = urb_done = 0; 1089 max3421_ep->pkt_state = PKT_STATE_TERMINATE; 1090 } 1091 break; 1092 1093 case PKT_STATE_TERMINATE: 1094 urb_done = 1; 1095 break; 1096 } 1097 1098 if (urb_done) 1099 max3421_hcd->urb_done = urb_done; 1100 else 1101 max3421_next_transfer(hcd, 0); 1102 } 1103 1104 /* 1105 * Caller must NOT hold HCD spinlock. 1106 */ 1107 static void 1108 max3421_detect_conn(struct usb_hcd *hcd) 1109 { 1110 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 1111 unsigned int jk, have_conn = 0; 1112 u32 old_port_status, chg; 1113 unsigned long flags; 1114 u8 hrsl, mode; 1115 1116 hrsl = spi_rd8(hcd, MAX3421_REG_HRSL); 1117 1118 jk = ((((hrsl >> MAX3421_HRSL_JSTATUS_BIT) & 1) << 0) | 1119 (((hrsl >> MAX3421_HRSL_KSTATUS_BIT) & 1) << 1)); 1120 1121 mode = max3421_hcd->mode; 1122 1123 switch (jk) { 1124 case 0x0: /* SE0: disconnect */ 1125 /* 1126 * Turn off SOFKAENAB bit to avoid getting interrupt 1127 * every milli-second: 1128 */ 1129 mode &= ~BIT(MAX3421_MODE_SOFKAENAB_BIT); 1130 break; 1131 1132 case 0x1: /* J=0,K=1: low-speed (in full-speed or vice versa) */ 1133 case 0x2: /* J=1,K=0: full-speed (in full-speed or vice versa) */ 1134 if (jk == 0x2) 1135 /* need to switch to the other speed: */ 1136 mode ^= BIT(MAX3421_MODE_LOWSPEED_BIT); 1137 /* turn on SOFKAENAB bit: */ 1138 mode |= BIT(MAX3421_MODE_SOFKAENAB_BIT); 1139 have_conn = 1; 1140 break; 1141 1142 case 0x3: /* illegal */ 1143 break; 1144 } 1145 1146 max3421_hcd->mode = mode; 1147 spi_wr8(hcd, MAX3421_REG_MODE, max3421_hcd->mode); 1148 1149 spin_lock_irqsave(&max3421_hcd->lock, flags); 1150 old_port_status = max3421_hcd->port_status; 1151 if (have_conn) 1152 max3421_hcd->port_status |= USB_PORT_STAT_CONNECTION; 1153 else 1154 max3421_hcd->port_status &= ~USB_PORT_STAT_CONNECTION; 1155 if (mode & BIT(MAX3421_MODE_LOWSPEED_BIT)) 1156 max3421_hcd->port_status |= USB_PORT_STAT_LOW_SPEED; 1157 else 1158 max3421_hcd->port_status &= ~USB_PORT_STAT_LOW_SPEED; 1159 chg = (old_port_status ^ max3421_hcd->port_status); 1160 max3421_hcd->port_status |= chg << 16; 1161 spin_unlock_irqrestore(&max3421_hcd->lock, flags); 1162 } 1163 1164 static irqreturn_t 1165 max3421_irq_handler(int irq, void *dev_id) 1166 { 1167 struct usb_hcd *hcd = dev_id; 1168 struct spi_device *spi = to_spi_device(hcd->self.controller); 1169 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 1170 1171 if (max3421_hcd->spi_thread && 1172 max3421_hcd->spi_thread->state != TASK_RUNNING) 1173 wake_up_process(max3421_hcd->spi_thread); 1174 if (!test_and_set_bit(ENABLE_IRQ, &max3421_hcd->todo)) 1175 disable_irq_nosync(spi->irq); 1176 return IRQ_HANDLED; 1177 } 1178 1179 #ifdef DEBUG 1180 1181 static void 1182 dump_eps(struct usb_hcd *hcd) 1183 { 1184 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 1185 struct max3421_ep *max3421_ep; 1186 struct usb_host_endpoint *ep; 1187 struct list_head *pos, *upos; 1188 char ubuf[512], *dp, *end; 1189 unsigned long flags; 1190 struct urb *urb; 1191 int epnum, ret; 1192 1193 spin_lock_irqsave(&max3421_hcd->lock, flags); 1194 list_for_each(pos, &max3421_hcd->ep_list) { 1195 max3421_ep = container_of(pos, struct max3421_ep, ep_list); 1196 ep = max3421_ep->ep; 1197 1198 dp = ubuf; 1199 end = dp + sizeof(ubuf); 1200 *dp = '\0'; 1201 list_for_each(upos, &ep->urb_list) { 1202 urb = container_of(upos, struct urb, urb_list); 1203 ret = snprintf(dp, end - dp, " %p(%d.%s %d/%d)", urb, 1204 usb_pipetype(urb->pipe), 1205 usb_urb_dir_in(urb) ? "IN" : "OUT", 1206 urb->actual_length, 1207 urb->transfer_buffer_length); 1208 if (ret < 0 || ret >= end - dp) 1209 break; /* error or buffer full */ 1210 dp += ret; 1211 } 1212 1213 epnum = usb_endpoint_num(&ep->desc); 1214 pr_info("EP%0u %u lst %04u rtr %u nak %6u rxmt %u: %s\n", 1215 epnum, max3421_ep->pkt_state, max3421_ep->last_active, 1216 max3421_ep->retries, max3421_ep->naks, 1217 max3421_ep->retransmit, ubuf); 1218 } 1219 spin_unlock_irqrestore(&max3421_hcd->lock, flags); 1220 } 1221 1222 #endif /* DEBUG */ 1223 1224 /* Return zero if no work was performed, 1 otherwise. */ 1225 static int 1226 max3421_handle_irqs(struct usb_hcd *hcd) 1227 { 1228 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 1229 u32 chg, old_port_status; 1230 unsigned long flags; 1231 u8 hirq; 1232 1233 /* 1234 * Read and ack pending interrupts (CPU must never 1235 * clear SNDBAV directly and RCVDAV must be cleared by 1236 * max3421_recv_data_available()!): 1237 */ 1238 hirq = spi_rd8(hcd, MAX3421_REG_HIRQ); 1239 hirq &= max3421_hcd->hien; 1240 if (!hirq) 1241 return 0; 1242 1243 spi_wr8(hcd, MAX3421_REG_HIRQ, 1244 hirq & ~(BIT(MAX3421_HI_SNDBAV_BIT) | 1245 BIT(MAX3421_HI_RCVDAV_BIT))); 1246 1247 if (hirq & BIT(MAX3421_HI_FRAME_BIT)) { 1248 max3421_hcd->frame_number = ((max3421_hcd->frame_number + 1) 1249 & USB_MAX_FRAME_NUMBER); 1250 max3421_hcd->sched_pass = SCHED_PASS_PERIODIC; 1251 } 1252 1253 if (hirq & BIT(MAX3421_HI_RCVDAV_BIT)) 1254 max3421_recv_data_available(hcd); 1255 1256 if (hirq & BIT(MAX3421_HI_HXFRDN_BIT)) 1257 max3421_host_transfer_done(hcd); 1258 1259 if (hirq & BIT(MAX3421_HI_CONDET_BIT)) 1260 max3421_detect_conn(hcd); 1261 1262 /* 1263 * Now process interrupts that may affect HCD state 1264 * other than the end-points: 1265 */ 1266 spin_lock_irqsave(&max3421_hcd->lock, flags); 1267 1268 old_port_status = max3421_hcd->port_status; 1269 if (hirq & BIT(MAX3421_HI_BUSEVENT_BIT)) { 1270 if (max3421_hcd->port_status & USB_PORT_STAT_RESET) { 1271 /* BUSEVENT due to completion of Bus Reset */ 1272 max3421_hcd->port_status &= ~USB_PORT_STAT_RESET; 1273 max3421_hcd->port_status |= USB_PORT_STAT_ENABLE; 1274 } else { 1275 /* BUSEVENT due to completion of Bus Resume */ 1276 pr_info("%s: BUSEVENT Bus Resume Done\n", __func__); 1277 } 1278 } 1279 if (hirq & BIT(MAX3421_HI_RWU_BIT)) 1280 pr_info("%s: RWU\n", __func__); 1281 if (hirq & BIT(MAX3421_HI_SUSDN_BIT)) 1282 pr_info("%s: SUSDN\n", __func__); 1283 1284 chg = (old_port_status ^ max3421_hcd->port_status); 1285 max3421_hcd->port_status |= chg << 16; 1286 1287 spin_unlock_irqrestore(&max3421_hcd->lock, flags); 1288 1289 #ifdef DEBUG 1290 { 1291 static unsigned long last_time; 1292 char sbuf[16 * 16], *dp, *end; 1293 int i; 1294 1295 if (time_after(jiffies, last_time + 5*HZ)) { 1296 dp = sbuf; 1297 end = sbuf + sizeof(sbuf); 1298 *dp = '\0'; 1299 for (i = 0; i < 16; ++i) { 1300 int ret = snprintf(dp, end - dp, " %lu", 1301 max3421_hcd->err_stat[i]); 1302 if (ret < 0 || ret >= end - dp) 1303 break; /* error or buffer full */ 1304 dp += ret; 1305 } 1306 pr_info("%s: hrsl_stats %s\n", __func__, sbuf); 1307 memset(max3421_hcd->err_stat, 0, 1308 sizeof(max3421_hcd->err_stat)); 1309 last_time = jiffies; 1310 1311 dump_eps(hcd); 1312 } 1313 } 1314 #endif 1315 return 1; 1316 } 1317 1318 static int 1319 max3421_reset_hcd(struct usb_hcd *hcd) 1320 { 1321 struct spi_device *spi = to_spi_device(hcd->self.controller); 1322 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 1323 int timeout; 1324 1325 /* perform a chip reset and wait for OSCIRQ signal to appear: */ 1326 spi_wr8(hcd, MAX3421_REG_USBCTL, BIT(MAX3421_USBCTL_CHIPRES_BIT)); 1327 /* clear reset: */ 1328 spi_wr8(hcd, MAX3421_REG_USBCTL, 0); 1329 timeout = 1000; 1330 while (1) { 1331 if (spi_rd8(hcd, MAX3421_REG_USBIRQ) 1332 & BIT(MAX3421_USBIRQ_OSCOKIRQ_BIT)) 1333 break; 1334 if (--timeout < 0) { 1335 dev_err(&spi->dev, 1336 "timed out waiting for oscillator OK signal"); 1337 return 1; 1338 } 1339 cond_resched(); 1340 } 1341 1342 /* 1343 * Turn on host mode, automatic generation of SOF packets, and 1344 * enable pull-down registers on DM/DP: 1345 */ 1346 max3421_hcd->mode = (BIT(MAX3421_MODE_HOST_BIT) | 1347 BIT(MAX3421_MODE_SOFKAENAB_BIT) | 1348 BIT(MAX3421_MODE_DMPULLDN_BIT) | 1349 BIT(MAX3421_MODE_DPPULLDN_BIT)); 1350 spi_wr8(hcd, MAX3421_REG_MODE, max3421_hcd->mode); 1351 1352 /* reset frame-number: */ 1353 max3421_hcd->frame_number = USB_MAX_FRAME_NUMBER; 1354 spi_wr8(hcd, MAX3421_REG_HCTL, BIT(MAX3421_HCTL_FRMRST_BIT)); 1355 1356 /* sample the state of the D+ and D- lines */ 1357 spi_wr8(hcd, MAX3421_REG_HCTL, BIT(MAX3421_HCTL_SAMPLEBUS_BIT)); 1358 max3421_detect_conn(hcd); 1359 1360 /* enable frame, connection-detected, and bus-event interrupts: */ 1361 max3421_hcd->hien = (BIT(MAX3421_HI_FRAME_BIT) | 1362 BIT(MAX3421_HI_CONDET_BIT) | 1363 BIT(MAX3421_HI_BUSEVENT_BIT)); 1364 spi_wr8(hcd, MAX3421_REG_HIEN, max3421_hcd->hien); 1365 1366 /* enable interrupts: */ 1367 spi_wr8(hcd, MAX3421_REG_CPUCTL, BIT(MAX3421_CPUCTL_IE_BIT)); 1368 return 1; 1369 } 1370 1371 static int 1372 max3421_urb_done(struct usb_hcd *hcd) 1373 { 1374 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 1375 unsigned long flags; 1376 struct urb *urb; 1377 int status; 1378 1379 status = max3421_hcd->urb_done; 1380 max3421_hcd->urb_done = 0; 1381 if (status > 0) 1382 status = 0; 1383 urb = max3421_hcd->curr_urb; 1384 if (urb) { 1385 max3421_hcd->curr_urb = NULL; 1386 spin_lock_irqsave(&max3421_hcd->lock, flags); 1387 usb_hcd_unlink_urb_from_ep(hcd, urb); 1388 spin_unlock_irqrestore(&max3421_hcd->lock, flags); 1389 1390 /* must be called without the HCD spinlock: */ 1391 usb_hcd_giveback_urb(hcd, urb, status); 1392 } 1393 return 1; 1394 } 1395 1396 static int 1397 max3421_spi_thread(void *dev_id) 1398 { 1399 struct usb_hcd *hcd = dev_id; 1400 struct spi_device *spi = to_spi_device(hcd->self.controller); 1401 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 1402 int i, i_worked = 1; 1403 1404 /* set full-duplex SPI mode, low-active interrupt pin: */ 1405 spi_wr8(hcd, MAX3421_REG_PINCTL, 1406 (BIT(MAX3421_PINCTL_FDUPSPI_BIT) | /* full-duplex */ 1407 BIT(MAX3421_PINCTL_INTLEVEL_BIT))); /* low-active irq */ 1408 1409 while (!kthread_should_stop()) { 1410 max3421_hcd->rev = spi_rd8(hcd, MAX3421_REG_REVISION); 1411 if (max3421_hcd->rev == 0x12 || max3421_hcd->rev == 0x13) 1412 break; 1413 dev_err(&spi->dev, "bad rev 0x%02x", max3421_hcd->rev); 1414 msleep(10000); 1415 } 1416 dev_info(&spi->dev, "rev 0x%x, SPI clk %dHz, bpw %u, irq %d\n", 1417 max3421_hcd->rev, spi->max_speed_hz, spi->bits_per_word, 1418 spi->irq); 1419 1420 while (!kthread_should_stop()) { 1421 if (!i_worked) { 1422 /* 1423 * We'll be waiting for wakeups from the hard 1424 * interrupt handler, so now is a good time to 1425 * sync our hien with the chip: 1426 */ 1427 spi_wr8(hcd, MAX3421_REG_HIEN, max3421_hcd->hien); 1428 1429 set_current_state(TASK_INTERRUPTIBLE); 1430 if (test_and_clear_bit(ENABLE_IRQ, &max3421_hcd->todo)) 1431 enable_irq(spi->irq); 1432 schedule(); 1433 __set_current_state(TASK_RUNNING); 1434 } 1435 1436 i_worked = 0; 1437 1438 if (max3421_hcd->urb_done) 1439 i_worked |= max3421_urb_done(hcd); 1440 else if (max3421_handle_irqs(hcd)) 1441 i_worked = 1; 1442 else if (!max3421_hcd->curr_urb) 1443 i_worked |= max3421_select_and_start_urb(hcd); 1444 1445 if (test_and_clear_bit(RESET_HCD, &max3421_hcd->todo)) 1446 /* reset the HCD: */ 1447 i_worked |= max3421_reset_hcd(hcd); 1448 if (test_and_clear_bit(RESET_PORT, &max3421_hcd->todo)) { 1449 /* perform a USB bus reset: */ 1450 spi_wr8(hcd, MAX3421_REG_HCTL, 1451 BIT(MAX3421_HCTL_BUSRST_BIT)); 1452 i_worked = 1; 1453 } 1454 if (test_and_clear_bit(CHECK_UNLINK, &max3421_hcd->todo)) 1455 i_worked |= max3421_check_unlink(hcd); 1456 if (test_and_clear_bit(IOPIN_UPDATE, &max3421_hcd->todo)) { 1457 /* 1458 * IOPINS1/IOPINS2 do not auto-increment, so we can't 1459 * use spi_wr_buf(). 1460 */ 1461 for (i = 0; i < ARRAY_SIZE(max3421_hcd->iopins); ++i) { 1462 u8 val = spi_rd8(hcd, MAX3421_REG_IOPINS1); 1463 1464 val = ((val & 0xf0) | 1465 (max3421_hcd->iopins[i] & 0x0f)); 1466 spi_wr8(hcd, MAX3421_REG_IOPINS1 + i, val); 1467 max3421_hcd->iopins[i] = val; 1468 } 1469 i_worked = 1; 1470 } 1471 } 1472 set_current_state(TASK_RUNNING); 1473 dev_info(&spi->dev, "SPI thread exiting"); 1474 return 0; 1475 } 1476 1477 static int 1478 max3421_reset_port(struct usb_hcd *hcd) 1479 { 1480 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 1481 1482 max3421_hcd->port_status &= ~(USB_PORT_STAT_ENABLE | 1483 USB_PORT_STAT_LOW_SPEED); 1484 max3421_hcd->port_status |= USB_PORT_STAT_RESET; 1485 set_bit(RESET_PORT, &max3421_hcd->todo); 1486 wake_up_process(max3421_hcd->spi_thread); 1487 return 0; 1488 } 1489 1490 static int 1491 max3421_reset(struct usb_hcd *hcd) 1492 { 1493 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 1494 1495 hcd->self.sg_tablesize = 0; 1496 hcd->speed = HCD_USB2; 1497 hcd->self.root_hub->speed = USB_SPEED_FULL; 1498 set_bit(RESET_HCD, &max3421_hcd->todo); 1499 wake_up_process(max3421_hcd->spi_thread); 1500 return 0; 1501 } 1502 1503 static int 1504 max3421_start(struct usb_hcd *hcd) 1505 { 1506 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 1507 1508 spin_lock_init(&max3421_hcd->lock); 1509 max3421_hcd->rh_state = MAX3421_RH_RUNNING; 1510 1511 INIT_LIST_HEAD(&max3421_hcd->ep_list); 1512 1513 hcd->power_budget = POWER_BUDGET; 1514 hcd->state = HC_STATE_RUNNING; 1515 hcd->uses_new_polling = 1; 1516 return 0; 1517 } 1518 1519 static void 1520 max3421_stop(struct usb_hcd *hcd) 1521 { 1522 } 1523 1524 static int 1525 max3421_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) 1526 { 1527 struct spi_device *spi = to_spi_device(hcd->self.controller); 1528 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 1529 struct max3421_ep *max3421_ep; 1530 unsigned long flags; 1531 int retval; 1532 1533 switch (usb_pipetype(urb->pipe)) { 1534 case PIPE_INTERRUPT: 1535 case PIPE_ISOCHRONOUS: 1536 if (urb->interval < 0) { 1537 dev_err(&spi->dev, 1538 "%s: interval=%d for intr-/iso-pipe; expected > 0\n", 1539 __func__, urb->interval); 1540 return -EINVAL; 1541 } 1542 default: 1543 break; 1544 } 1545 1546 spin_lock_irqsave(&max3421_hcd->lock, flags); 1547 1548 max3421_ep = urb->ep->hcpriv; 1549 if (!max3421_ep) { 1550 /* gets freed in max3421_endpoint_disable: */ 1551 max3421_ep = kzalloc(sizeof(struct max3421_ep), GFP_ATOMIC); 1552 if (!max3421_ep) { 1553 retval = -ENOMEM; 1554 goto out; 1555 } 1556 max3421_ep->ep = urb->ep; 1557 max3421_ep->last_active = max3421_hcd->frame_number; 1558 urb->ep->hcpriv = max3421_ep; 1559 1560 list_add_tail(&max3421_ep->ep_list, &max3421_hcd->ep_list); 1561 } 1562 1563 retval = usb_hcd_link_urb_to_ep(hcd, urb); 1564 if (retval == 0) { 1565 /* Since we added to the queue, restart scheduling: */ 1566 max3421_hcd->sched_pass = SCHED_PASS_PERIODIC; 1567 wake_up_process(max3421_hcd->spi_thread); 1568 } 1569 1570 out: 1571 spin_unlock_irqrestore(&max3421_hcd->lock, flags); 1572 return retval; 1573 } 1574 1575 static int 1576 max3421_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) 1577 { 1578 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 1579 unsigned long flags; 1580 int retval; 1581 1582 spin_lock_irqsave(&max3421_hcd->lock, flags); 1583 1584 /* 1585 * This will set urb->unlinked which in turn causes the entry 1586 * to be dropped at the next opportunity. 1587 */ 1588 retval = usb_hcd_check_unlink_urb(hcd, urb, status); 1589 if (retval == 0) { 1590 set_bit(CHECK_UNLINK, &max3421_hcd->todo); 1591 wake_up_process(max3421_hcd->spi_thread); 1592 } 1593 spin_unlock_irqrestore(&max3421_hcd->lock, flags); 1594 return retval; 1595 } 1596 1597 static void 1598 max3421_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep) 1599 { 1600 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 1601 unsigned long flags; 1602 1603 spin_lock_irqsave(&max3421_hcd->lock, flags); 1604 1605 if (ep->hcpriv) { 1606 struct max3421_ep *max3421_ep = ep->hcpriv; 1607 1608 /* remove myself from the ep_list: */ 1609 if (!list_empty(&max3421_ep->ep_list)) 1610 list_del(&max3421_ep->ep_list); 1611 kfree(max3421_ep); 1612 ep->hcpriv = NULL; 1613 } 1614 1615 spin_unlock_irqrestore(&max3421_hcd->lock, flags); 1616 } 1617 1618 static int 1619 max3421_get_frame_number(struct usb_hcd *hcd) 1620 { 1621 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 1622 return max3421_hcd->frame_number; 1623 } 1624 1625 /* 1626 * Should return a non-zero value when any port is undergoing a resume 1627 * transition while the root hub is suspended. 1628 */ 1629 static int 1630 max3421_hub_status_data(struct usb_hcd *hcd, char *buf) 1631 { 1632 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 1633 unsigned long flags; 1634 int retval = 0; 1635 1636 spin_lock_irqsave(&max3421_hcd->lock, flags); 1637 if (!HCD_HW_ACCESSIBLE(hcd)) 1638 goto done; 1639 1640 *buf = 0; 1641 if ((max3421_hcd->port_status & PORT_C_MASK) != 0) { 1642 *buf = (1 << 1); /* a hub over-current condition exists */ 1643 dev_dbg(hcd->self.controller, 1644 "port status 0x%08x has changes\n", 1645 max3421_hcd->port_status); 1646 retval = 1; 1647 if (max3421_hcd->rh_state == MAX3421_RH_SUSPENDED) 1648 usb_hcd_resume_root_hub(hcd); 1649 } 1650 done: 1651 spin_unlock_irqrestore(&max3421_hcd->lock, flags); 1652 return retval; 1653 } 1654 1655 static inline void 1656 hub_descriptor(struct usb_hub_descriptor *desc) 1657 { 1658 memset(desc, 0, sizeof(*desc)); 1659 /* 1660 * See Table 11-13: Hub Descriptor in USB 2.0 spec. 1661 */ 1662 desc->bDescriptorType = USB_DT_HUB; /* hub descriptor */ 1663 desc->bDescLength = 9; 1664 desc->wHubCharacteristics = cpu_to_le16(HUB_CHAR_INDV_PORT_LPSM | 1665 HUB_CHAR_COMMON_OCPM); 1666 desc->bNbrPorts = 1; 1667 } 1668 1669 /* 1670 * Set the MAX3421E general-purpose output with number PIN_NUMBER to 1671 * VALUE (0 or 1). PIN_NUMBER may be in the range from 1-8. For 1672 * any other value, this function acts as a no-op. 1673 */ 1674 static void 1675 max3421_gpout_set_value(struct usb_hcd *hcd, u8 pin_number, u8 value) 1676 { 1677 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 1678 u8 mask, idx; 1679 1680 --pin_number; 1681 if (pin_number > 7) 1682 return; 1683 1684 mask = 1u << pin_number; 1685 idx = pin_number / 4; 1686 1687 if (value) 1688 max3421_hcd->iopins[idx] |= mask; 1689 else 1690 max3421_hcd->iopins[idx] &= ~mask; 1691 set_bit(IOPIN_UPDATE, &max3421_hcd->todo); 1692 wake_up_process(max3421_hcd->spi_thread); 1693 } 1694 1695 static int 1696 max3421_hub_control(struct usb_hcd *hcd, u16 type_req, u16 value, u16 index, 1697 char *buf, u16 length) 1698 { 1699 struct spi_device *spi = to_spi_device(hcd->self.controller); 1700 struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd); 1701 struct max3421_hcd_platform_data *pdata; 1702 unsigned long flags; 1703 int retval = 0; 1704 1705 spin_lock_irqsave(&max3421_hcd->lock, flags); 1706 1707 pdata = spi->dev.platform_data; 1708 1709 switch (type_req) { 1710 case ClearHubFeature: 1711 break; 1712 case ClearPortFeature: 1713 switch (value) { 1714 case USB_PORT_FEAT_SUSPEND: 1715 break; 1716 case USB_PORT_FEAT_POWER: 1717 dev_dbg(hcd->self.controller, "power-off\n"); 1718 max3421_gpout_set_value(hcd, pdata->vbus_gpout, 1719 !pdata->vbus_active_level); 1720 /* FALLS THROUGH */ 1721 default: 1722 max3421_hcd->port_status &= ~(1 << value); 1723 } 1724 break; 1725 case GetHubDescriptor: 1726 hub_descriptor((struct usb_hub_descriptor *) buf); 1727 break; 1728 1729 case DeviceRequest | USB_REQ_GET_DESCRIPTOR: 1730 case GetPortErrorCount: 1731 case SetHubDepth: 1732 /* USB3 only */ 1733 goto error; 1734 1735 case GetHubStatus: 1736 *(__le32 *) buf = cpu_to_le32(0); 1737 break; 1738 1739 case GetPortStatus: 1740 if (index != 1) { 1741 retval = -EPIPE; 1742 goto error; 1743 } 1744 ((__le16 *) buf)[0] = cpu_to_le16(max3421_hcd->port_status); 1745 ((__le16 *) buf)[1] = 1746 cpu_to_le16(max3421_hcd->port_status >> 16); 1747 break; 1748 1749 case SetHubFeature: 1750 retval = -EPIPE; 1751 break; 1752 1753 case SetPortFeature: 1754 switch (value) { 1755 case USB_PORT_FEAT_LINK_STATE: 1756 case USB_PORT_FEAT_U1_TIMEOUT: 1757 case USB_PORT_FEAT_U2_TIMEOUT: 1758 case USB_PORT_FEAT_BH_PORT_RESET: 1759 goto error; 1760 case USB_PORT_FEAT_SUSPEND: 1761 if (max3421_hcd->active) 1762 max3421_hcd->port_status |= 1763 USB_PORT_STAT_SUSPEND; 1764 break; 1765 case USB_PORT_FEAT_POWER: 1766 dev_dbg(hcd->self.controller, "power-on\n"); 1767 max3421_hcd->port_status |= USB_PORT_STAT_POWER; 1768 max3421_gpout_set_value(hcd, pdata->vbus_gpout, 1769 pdata->vbus_active_level); 1770 break; 1771 case USB_PORT_FEAT_RESET: 1772 max3421_reset_port(hcd); 1773 /* FALLS THROUGH */ 1774 default: 1775 if ((max3421_hcd->port_status & USB_PORT_STAT_POWER) 1776 != 0) 1777 max3421_hcd->port_status |= (1 << value); 1778 } 1779 break; 1780 1781 default: 1782 dev_dbg(hcd->self.controller, 1783 "hub control req%04x v%04x i%04x l%d\n", 1784 type_req, value, index, length); 1785 error: /* "protocol stall" on error */ 1786 retval = -EPIPE; 1787 } 1788 1789 spin_unlock_irqrestore(&max3421_hcd->lock, flags); 1790 return retval; 1791 } 1792 1793 static int 1794 max3421_bus_suspend(struct usb_hcd *hcd) 1795 { 1796 return -1; 1797 } 1798 1799 static int 1800 max3421_bus_resume(struct usb_hcd *hcd) 1801 { 1802 return -1; 1803 } 1804 1805 /* 1806 * The SPI driver already takes care of DMA-mapping/unmapping, so no 1807 * reason to do it twice. 1808 */ 1809 static int 1810 max3421_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) 1811 { 1812 return 0; 1813 } 1814 1815 static void 1816 max3421_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) 1817 { 1818 } 1819 1820 static struct hc_driver max3421_hcd_desc = { 1821 .description = "max3421", 1822 .product_desc = DRIVER_DESC, 1823 .hcd_priv_size = sizeof(struct max3421_hcd), 1824 .flags = HCD_USB11, 1825 .reset = max3421_reset, 1826 .start = max3421_start, 1827 .stop = max3421_stop, 1828 .get_frame_number = max3421_get_frame_number, 1829 .urb_enqueue = max3421_urb_enqueue, 1830 .urb_dequeue = max3421_urb_dequeue, 1831 .map_urb_for_dma = max3421_map_urb_for_dma, 1832 .unmap_urb_for_dma = max3421_unmap_urb_for_dma, 1833 .endpoint_disable = max3421_endpoint_disable, 1834 .hub_status_data = max3421_hub_status_data, 1835 .hub_control = max3421_hub_control, 1836 .bus_suspend = max3421_bus_suspend, 1837 .bus_resume = max3421_bus_resume, 1838 }; 1839 1840 static int 1841 max3421_probe(struct spi_device *spi) 1842 { 1843 struct max3421_hcd *max3421_hcd; 1844 struct usb_hcd *hcd = NULL; 1845 int retval = -ENOMEM; 1846 1847 if (spi_setup(spi) < 0) { 1848 dev_err(&spi->dev, "Unable to setup SPI bus"); 1849 return -EFAULT; 1850 } 1851 1852 hcd = usb_create_hcd(&max3421_hcd_desc, &spi->dev, 1853 dev_name(&spi->dev)); 1854 if (!hcd) { 1855 dev_err(&spi->dev, "failed to create HCD structure\n"); 1856 goto error; 1857 } 1858 set_bit(HCD_FLAG_POLL_RH, &hcd->flags); 1859 max3421_hcd = hcd_to_max3421(hcd); 1860 max3421_hcd->next = max3421_hcd_list; 1861 max3421_hcd_list = max3421_hcd; 1862 INIT_LIST_HEAD(&max3421_hcd->ep_list); 1863 1864 max3421_hcd->tx = kmalloc(sizeof(*max3421_hcd->tx), GFP_KERNEL); 1865 if (!max3421_hcd->tx) { 1866 dev_err(&spi->dev, "failed to kmalloc tx buffer\n"); 1867 goto error; 1868 } 1869 max3421_hcd->rx = kmalloc(sizeof(*max3421_hcd->rx), GFP_KERNEL); 1870 if (!max3421_hcd->rx) { 1871 dev_err(&spi->dev, "failed to kmalloc rx buffer\n"); 1872 goto error; 1873 } 1874 1875 max3421_hcd->spi_thread = kthread_run(max3421_spi_thread, hcd, 1876 "max3421_spi_thread"); 1877 if (max3421_hcd->spi_thread == ERR_PTR(-ENOMEM)) { 1878 dev_err(&spi->dev, 1879 "failed to create SPI thread (out of memory)\n"); 1880 goto error; 1881 } 1882 1883 retval = usb_add_hcd(hcd, 0, 0); 1884 if (retval) { 1885 dev_err(&spi->dev, "failed to add HCD\n"); 1886 goto error; 1887 } 1888 1889 retval = request_irq(spi->irq, max3421_irq_handler, 1890 IRQF_TRIGGER_LOW, "max3421", hcd); 1891 if (retval < 0) { 1892 dev_err(&spi->dev, "failed to request irq %d\n", spi->irq); 1893 goto error; 1894 } 1895 return 0; 1896 1897 error: 1898 if (hcd) { 1899 kfree(max3421_hcd->tx); 1900 kfree(max3421_hcd->rx); 1901 if (max3421_hcd->spi_thread) 1902 kthread_stop(max3421_hcd->spi_thread); 1903 usb_put_hcd(hcd); 1904 } 1905 return retval; 1906 } 1907 1908 static int 1909 max3421_remove(struct spi_device *spi) 1910 { 1911 struct max3421_hcd *max3421_hcd = NULL, **prev; 1912 struct usb_hcd *hcd = NULL; 1913 unsigned long flags; 1914 1915 for (prev = &max3421_hcd_list; *prev; prev = &(*prev)->next) { 1916 max3421_hcd = *prev; 1917 hcd = max3421_to_hcd(max3421_hcd); 1918 if (hcd->self.controller == &spi->dev) 1919 break; 1920 } 1921 if (!max3421_hcd) { 1922 dev_err(&spi->dev, "no MAX3421 HCD found for SPI device %p\n", 1923 spi); 1924 return -ENODEV; 1925 } 1926 1927 usb_remove_hcd(hcd); 1928 1929 spin_lock_irqsave(&max3421_hcd->lock, flags); 1930 1931 kthread_stop(max3421_hcd->spi_thread); 1932 *prev = max3421_hcd->next; 1933 1934 spin_unlock_irqrestore(&max3421_hcd->lock, flags); 1935 1936 free_irq(spi->irq, hcd); 1937 1938 usb_put_hcd(hcd); 1939 return 0; 1940 } 1941 1942 static struct spi_driver max3421_driver = { 1943 .probe = max3421_probe, 1944 .remove = max3421_remove, 1945 .driver = { 1946 .name = "max3421-hcd", 1947 .owner = THIS_MODULE, 1948 }, 1949 }; 1950 1951 module_spi_driver(max3421_driver); 1952 1953 MODULE_DESCRIPTION(DRIVER_DESC); 1954 MODULE_AUTHOR("David Mosberger <davidm@egauge.net>"); 1955 MODULE_LICENSE("GPL"); 1956