1 /*
2 	Copyright (C) 2010 Willow Garage <http://www.willowgarage.com>
3 	Copyright (C) 2004 - 2010 Ivo van Doorn <IvDoorn@gmail.com>
4 	<http://rt2x00.serialmonkey.com>
5 
6 	This program is free software; you can redistribute it and/or modify
7 	it under the terms of the GNU General Public License as published by
8 	the Free Software Foundation; either version 2 of the License, or
9 	(at your option) any later version.
10 
11 	This program is distributed in the hope that it will be useful,
12 	but WITHOUT ANY WARRANTY; without even the implied warranty of
13 	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 	GNU General Public License for more details.
15 
16 	You should have received a copy of the GNU General Public License
17 	along with this program; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 /*
21 	Module: rt2x00usb
22 	Abstract: rt2x00 generic usb device routines.
23  */
24 
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/usb.h>
29 #include <linux/bug.h>
30 
31 #include "rt2x00.h"
32 #include "rt2x00usb.h"
33 
34 /*
35  * Interfacing with the HW.
36  */
37 int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
38 			     const u8 request, const u8 requesttype,
39 			     const u16 offset, const u16 value,
40 			     void *buffer, const u16 buffer_length,
41 			     const int timeout)
42 {
43 	struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
44 	int status;
45 	unsigned int pipe =
46 	    (requesttype == USB_VENDOR_REQUEST_IN) ?
47 	    usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0);
48 	unsigned long expire = jiffies + msecs_to_jiffies(timeout);
49 
50 	if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
51 		return -ENODEV;
52 
53 	do {
54 		status = usb_control_msg(usb_dev, pipe, request, requesttype,
55 					 value, offset, buffer, buffer_length,
56 					 timeout / 2);
57 		if (status >= 0)
58 			return 0;
59 
60 		if (status == -ENODEV) {
61 			/* Device has disappeared. */
62 			clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
63 			break;
64 		}
65 	} while (time_before(jiffies, expire));
66 
67 	rt2x00_err(rt2x00dev,
68 		   "Vendor Request 0x%02x failed for offset 0x%04x with error %d\n",
69 		   request, offset, status);
70 
71 	return status;
72 }
73 EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request);
74 
75 int rt2x00usb_vendor_req_buff_lock(struct rt2x00_dev *rt2x00dev,
76 				   const u8 request, const u8 requesttype,
77 				   const u16 offset, void *buffer,
78 				   const u16 buffer_length, const int timeout)
79 {
80 	int status;
81 
82 	BUG_ON(!mutex_is_locked(&rt2x00dev->csr_mutex));
83 
84 	/*
85 	 * Check for Cache availability.
86 	 */
87 	if (unlikely(!rt2x00dev->csr.cache || buffer_length > CSR_CACHE_SIZE)) {
88 		rt2x00_err(rt2x00dev, "CSR cache not available\n");
89 		return -ENOMEM;
90 	}
91 
92 	if (requesttype == USB_VENDOR_REQUEST_OUT)
93 		memcpy(rt2x00dev->csr.cache, buffer, buffer_length);
94 
95 	status = rt2x00usb_vendor_request(rt2x00dev, request, requesttype,
96 					  offset, 0, rt2x00dev->csr.cache,
97 					  buffer_length, timeout);
98 
99 	if (!status && requesttype == USB_VENDOR_REQUEST_IN)
100 		memcpy(buffer, rt2x00dev->csr.cache, buffer_length);
101 
102 	return status;
103 }
104 EXPORT_SYMBOL_GPL(rt2x00usb_vendor_req_buff_lock);
105 
106 int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev,
107 				  const u8 request, const u8 requesttype,
108 				  const u16 offset, void *buffer,
109 				  const u16 buffer_length)
110 {
111 	int status = 0;
112 	unsigned char *tb;
113 	u16 off, len, bsize;
114 
115 	mutex_lock(&rt2x00dev->csr_mutex);
116 
117 	tb  = (char *)buffer;
118 	off = offset;
119 	len = buffer_length;
120 	while (len && !status) {
121 		bsize = min_t(u16, CSR_CACHE_SIZE, len);
122 		status = rt2x00usb_vendor_req_buff_lock(rt2x00dev, request,
123 							requesttype, off, tb,
124 							bsize, REGISTER_TIMEOUT);
125 
126 		tb  += bsize;
127 		len -= bsize;
128 		off += bsize;
129 	}
130 
131 	mutex_unlock(&rt2x00dev->csr_mutex);
132 
133 	return status;
134 }
135 EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff);
136 
137 int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
138 			   const unsigned int offset,
139 			   const struct rt2x00_field32 field,
140 			   u32 *reg)
141 {
142 	unsigned int i;
143 
144 	if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
145 		return -ENODEV;
146 
147 	for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) {
148 		rt2x00usb_register_read_lock(rt2x00dev, offset, reg);
149 		if (!rt2x00_get_field32(*reg, field))
150 			return 1;
151 		udelay(REGISTER_BUSY_DELAY);
152 	}
153 
154 	rt2x00_err(rt2x00dev, "Indirect register access failed: offset=0x%.08x, value=0x%.08x\n",
155 		   offset, *reg);
156 	*reg = ~0;
157 
158 	return 0;
159 }
160 EXPORT_SYMBOL_GPL(rt2x00usb_regbusy_read);
161 
162 
163 struct rt2x00_async_read_data {
164 	__le32 reg;
165 	struct usb_ctrlrequest cr;
166 	struct rt2x00_dev *rt2x00dev;
167 	bool (*callback)(struct rt2x00_dev *, int, u32);
168 };
169 
170 static void rt2x00usb_register_read_async_cb(struct urb *urb)
171 {
172 	struct rt2x00_async_read_data *rd = urb->context;
173 	if (rd->callback(rd->rt2x00dev, urb->status, le32_to_cpu(rd->reg))) {
174 		usb_anchor_urb(urb, rd->rt2x00dev->anchor);
175 		if (usb_submit_urb(urb, GFP_ATOMIC) < 0) {
176 			usb_unanchor_urb(urb);
177 			kfree(rd);
178 		}
179 	} else
180 		kfree(rd);
181 }
182 
183 void rt2x00usb_register_read_async(struct rt2x00_dev *rt2x00dev,
184 				   const unsigned int offset,
185 				   bool (*callback)(struct rt2x00_dev*, int, u32))
186 {
187 	struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
188 	struct urb *urb;
189 	struct rt2x00_async_read_data *rd;
190 
191 	rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
192 	if (!rd)
193 		return;
194 
195 	urb = usb_alloc_urb(0, GFP_ATOMIC);
196 	if (!urb) {
197 		kfree(rd);
198 		return;
199 	}
200 
201 	rd->rt2x00dev = rt2x00dev;
202 	rd->callback = callback;
203 	rd->cr.bRequestType = USB_VENDOR_REQUEST_IN;
204 	rd->cr.bRequest = USB_MULTI_READ;
205 	rd->cr.wValue = 0;
206 	rd->cr.wIndex = cpu_to_le16(offset);
207 	rd->cr.wLength = cpu_to_le16(sizeof(u32));
208 
209 	usb_fill_control_urb(urb, usb_dev, usb_rcvctrlpipe(usb_dev, 0),
210 			     (unsigned char *)(&rd->cr), &rd->reg, sizeof(rd->reg),
211 			     rt2x00usb_register_read_async_cb, rd);
212 	usb_anchor_urb(urb, rt2x00dev->anchor);
213 	if (usb_submit_urb(urb, GFP_ATOMIC) < 0) {
214 		usb_unanchor_urb(urb);
215 		kfree(rd);
216 	}
217 	usb_free_urb(urb);
218 }
219 EXPORT_SYMBOL_GPL(rt2x00usb_register_read_async);
220 
221 /*
222  * TX data handlers.
223  */
224 static void rt2x00usb_work_txdone_entry(struct queue_entry *entry)
225 {
226 	/*
227 	 * If the transfer to hardware succeeded, it does not mean the
228 	 * frame was send out correctly. It only means the frame
229 	 * was successfully pushed to the hardware, we have no
230 	 * way to determine the transmission status right now.
231 	 * (Only indirectly by looking at the failed TX counters
232 	 * in the register).
233 	 */
234 	if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
235 		rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE);
236 	else
237 		rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN);
238 }
239 
240 static void rt2x00usb_work_txdone(struct work_struct *work)
241 {
242 	struct rt2x00_dev *rt2x00dev =
243 	    container_of(work, struct rt2x00_dev, txdone_work);
244 	struct data_queue *queue;
245 	struct queue_entry *entry;
246 
247 	tx_queue_for_each(rt2x00dev, queue) {
248 		while (!rt2x00queue_empty(queue)) {
249 			entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
250 
251 			if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
252 			    !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
253 				break;
254 
255 			rt2x00usb_work_txdone_entry(entry);
256 		}
257 	}
258 }
259 
260 static void rt2x00usb_interrupt_txdone(struct urb *urb)
261 {
262 	struct queue_entry *entry = (struct queue_entry *)urb->context;
263 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
264 
265 	if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
266 		return;
267 	/*
268 	 * Check if the frame was correctly uploaded
269 	 */
270 	if (urb->status)
271 		set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
272 	/*
273 	 * Report the frame as DMA done
274 	 */
275 	rt2x00lib_dmadone(entry);
276 
277 	if (rt2x00dev->ops->lib->tx_dma_done)
278 		rt2x00dev->ops->lib->tx_dma_done(entry);
279 	/*
280 	 * Schedule the delayed work for reading the TX status
281 	 * from the device.
282 	 */
283 	if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_TXSTATUS_FIFO) ||
284 	    !kfifo_is_empty(&rt2x00dev->txstatus_fifo))
285 		queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
286 }
287 
288 static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data)
289 {
290 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
291 	struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
292 	struct queue_entry_priv_usb *entry_priv = entry->priv_data;
293 	u32 length;
294 	int status;
295 
296 	if (!test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags) ||
297 	    test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
298 		return false;
299 
300 	/*
301 	 * USB devices require certain padding at the end of each frame
302 	 * and urb. Those paddings are not included in skbs. Pass entry
303 	 * to the driver to determine what the overall length should be.
304 	 */
305 	length = rt2x00dev->ops->lib->get_tx_data_len(entry);
306 
307 	status = skb_padto(entry->skb, length);
308 	if (unlikely(status)) {
309 		/* TODO: report something more appropriate than IO_FAILED. */
310 		rt2x00_warn(rt2x00dev, "TX SKB padding error, out of memory\n");
311 		set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
312 		rt2x00lib_dmadone(entry);
313 
314 		return false;
315 	}
316 
317 	usb_fill_bulk_urb(entry_priv->urb, usb_dev,
318 			  usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint),
319 			  entry->skb->data, length,
320 			  rt2x00usb_interrupt_txdone, entry);
321 
322 	usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor);
323 	status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
324 	if (status) {
325 		usb_unanchor_urb(entry_priv->urb);
326 		if (status == -ENODEV)
327 			clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
328 		set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
329 		rt2x00lib_dmadone(entry);
330 	}
331 
332 	return false;
333 }
334 
335 /*
336  * RX data handlers.
337  */
338 static void rt2x00usb_work_rxdone(struct work_struct *work)
339 {
340 	struct rt2x00_dev *rt2x00dev =
341 	    container_of(work, struct rt2x00_dev, rxdone_work);
342 	struct queue_entry *entry;
343 	struct skb_frame_desc *skbdesc;
344 	u8 rxd[32];
345 
346 	while (!rt2x00queue_empty(rt2x00dev->rx)) {
347 		entry = rt2x00queue_get_entry(rt2x00dev->rx, Q_INDEX_DONE);
348 
349 		if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
350 		    !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
351 			break;
352 
353 		/*
354 		 * Fill in desc fields of the skb descriptor
355 		 */
356 		skbdesc = get_skb_frame_desc(entry->skb);
357 		skbdesc->desc = rxd;
358 		skbdesc->desc_len = entry->queue->desc_size;
359 
360 		/*
361 		 * Send the frame to rt2x00lib for further processing.
362 		 */
363 		rt2x00lib_rxdone(entry, GFP_KERNEL);
364 	}
365 }
366 
367 static void rt2x00usb_interrupt_rxdone(struct urb *urb)
368 {
369 	struct queue_entry *entry = (struct queue_entry *)urb->context;
370 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
371 
372 	if (!test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
373 		return;
374 
375 	/*
376 	 * Report the frame as DMA done
377 	 */
378 	rt2x00lib_dmadone(entry);
379 
380 	/*
381 	 * Check if the received data is simply too small
382 	 * to be actually valid, or if the urb is signaling
383 	 * a problem.
384 	 */
385 	if (urb->actual_length < entry->queue->desc_size || urb->status)
386 		set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
387 
388 	/*
389 	 * Schedule the delayed work for reading the RX status
390 	 * from the device.
391 	 */
392 	queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work);
393 }
394 
395 static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data)
396 {
397 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
398 	struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
399 	struct queue_entry_priv_usb *entry_priv = entry->priv_data;
400 	int status;
401 
402 	if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
403 	    test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
404 		return false;
405 
406 	rt2x00lib_dmastart(entry);
407 
408 	usb_fill_bulk_urb(entry_priv->urb, usb_dev,
409 			  usb_rcvbulkpipe(usb_dev, entry->queue->usb_endpoint),
410 			  entry->skb->data, entry->skb->len,
411 			  rt2x00usb_interrupt_rxdone, entry);
412 
413 	usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor);
414 	status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
415 	if (status) {
416 		usb_unanchor_urb(entry_priv->urb);
417 		if (status == -ENODEV)
418 			clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
419 		set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
420 		rt2x00lib_dmadone(entry);
421 	}
422 
423 	return false;
424 }
425 
426 void rt2x00usb_kick_queue(struct data_queue *queue)
427 {
428 	switch (queue->qid) {
429 	case QID_AC_VO:
430 	case QID_AC_VI:
431 	case QID_AC_BE:
432 	case QID_AC_BK:
433 		if (!rt2x00queue_empty(queue))
434 			rt2x00queue_for_each_entry(queue,
435 						   Q_INDEX_DONE,
436 						   Q_INDEX,
437 						   NULL,
438 						   rt2x00usb_kick_tx_entry);
439 		break;
440 	case QID_RX:
441 		if (!rt2x00queue_full(queue))
442 			rt2x00queue_for_each_entry(queue,
443 						   Q_INDEX,
444 						   Q_INDEX_DONE,
445 						   NULL,
446 						   rt2x00usb_kick_rx_entry);
447 		break;
448 	default:
449 		break;
450 	}
451 }
452 EXPORT_SYMBOL_GPL(rt2x00usb_kick_queue);
453 
454 static bool rt2x00usb_flush_entry(struct queue_entry *entry, void *data)
455 {
456 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
457 	struct queue_entry_priv_usb *entry_priv = entry->priv_data;
458 	struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data;
459 
460 	if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
461 		return false;
462 
463 	usb_kill_urb(entry_priv->urb);
464 
465 	/*
466 	 * Kill guardian urb (if required by driver).
467 	 */
468 	if ((entry->queue->qid == QID_BEACON) &&
469 	    (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_BEACON_GUARD)))
470 		usb_kill_urb(bcn_priv->guardian_urb);
471 
472 	return false;
473 }
474 
475 void rt2x00usb_flush_queue(struct data_queue *queue, bool drop)
476 {
477 	struct work_struct *completion;
478 	unsigned int i;
479 
480 	if (drop)
481 		rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX, NULL,
482 					   rt2x00usb_flush_entry);
483 
484 	/*
485 	 * Obtain the queue completion handler
486 	 */
487 	switch (queue->qid) {
488 	case QID_AC_VO:
489 	case QID_AC_VI:
490 	case QID_AC_BE:
491 	case QID_AC_BK:
492 		completion = &queue->rt2x00dev->txdone_work;
493 		break;
494 	case QID_RX:
495 		completion = &queue->rt2x00dev->rxdone_work;
496 		break;
497 	default:
498 		return;
499 	}
500 
501 	for (i = 0; i < 10; i++) {
502 		/*
503 		 * Check if the driver is already done, otherwise we
504 		 * have to sleep a little while to give the driver/hw
505 		 * the oppurtunity to complete interrupt process itself.
506 		 */
507 		if (rt2x00queue_empty(queue))
508 			break;
509 
510 		/*
511 		 * Schedule the completion handler manually, when this
512 		 * worker function runs, it should cleanup the queue.
513 		 */
514 		queue_work(queue->rt2x00dev->workqueue, completion);
515 
516 		/*
517 		 * Wait for a little while to give the driver
518 		 * the oppurtunity to recover itself.
519 		 */
520 		msleep(10);
521 	}
522 }
523 EXPORT_SYMBOL_GPL(rt2x00usb_flush_queue);
524 
525 static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
526 {
527 	rt2x00_warn(queue->rt2x00dev, "TX queue %d DMA timed out, invoke forced forced reset\n",
528 		    queue->qid);
529 
530 	rt2x00queue_stop_queue(queue);
531 	rt2x00queue_flush_queue(queue, true);
532 	rt2x00queue_start_queue(queue);
533 }
534 
535 static int rt2x00usb_dma_timeout(struct data_queue *queue)
536 {
537 	struct queue_entry *entry;
538 
539 	entry = rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE);
540 	return rt2x00queue_dma_timeout(entry);
541 }
542 
543 void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
544 {
545 	struct data_queue *queue;
546 
547 	tx_queue_for_each(rt2x00dev, queue) {
548 		if (!rt2x00queue_empty(queue)) {
549 			if (rt2x00usb_dma_timeout(queue))
550 				rt2x00usb_watchdog_tx_dma(queue);
551 		}
552 	}
553 }
554 EXPORT_SYMBOL_GPL(rt2x00usb_watchdog);
555 
556 /*
557  * Radio handlers
558  */
559 void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev)
560 {
561 	rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0, 0,
562 				    REGISTER_TIMEOUT);
563 }
564 EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
565 
566 /*
567  * Device initialization handlers.
568  */
569 void rt2x00usb_clear_entry(struct queue_entry *entry)
570 {
571 	entry->flags = 0;
572 
573 	if (entry->queue->qid == QID_RX)
574 		rt2x00usb_kick_rx_entry(entry, NULL);
575 }
576 EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry);
577 
578 static void rt2x00usb_assign_endpoint(struct data_queue *queue,
579 				      struct usb_endpoint_descriptor *ep_desc)
580 {
581 	struct usb_device *usb_dev = to_usb_device_intf(queue->rt2x00dev->dev);
582 	int pipe;
583 
584 	queue->usb_endpoint = usb_endpoint_num(ep_desc);
585 
586 	if (queue->qid == QID_RX) {
587 		pipe = usb_rcvbulkpipe(usb_dev, queue->usb_endpoint);
588 		queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 0);
589 	} else {
590 		pipe = usb_sndbulkpipe(usb_dev, queue->usb_endpoint);
591 		queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 1);
592 	}
593 
594 	if (!queue->usb_maxpacket)
595 		queue->usb_maxpacket = 1;
596 }
597 
598 static int rt2x00usb_find_endpoints(struct rt2x00_dev *rt2x00dev)
599 {
600 	struct usb_interface *intf = to_usb_interface(rt2x00dev->dev);
601 	struct usb_host_interface *intf_desc = intf->cur_altsetting;
602 	struct usb_endpoint_descriptor *ep_desc;
603 	struct data_queue *queue = rt2x00dev->tx;
604 	struct usb_endpoint_descriptor *tx_ep_desc = NULL;
605 	unsigned int i;
606 
607 	/*
608 	 * Walk through all available endpoints to search for "bulk in"
609 	 * and "bulk out" endpoints. When we find such endpoints collect
610 	 * the information we need from the descriptor and assign it
611 	 * to the queue.
612 	 */
613 	for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
614 		ep_desc = &intf_desc->endpoint[i].desc;
615 
616 		if (usb_endpoint_is_bulk_in(ep_desc)) {
617 			rt2x00usb_assign_endpoint(rt2x00dev->rx, ep_desc);
618 		} else if (usb_endpoint_is_bulk_out(ep_desc) &&
619 			   (queue != queue_end(rt2x00dev))) {
620 			rt2x00usb_assign_endpoint(queue, ep_desc);
621 			queue = queue_next(queue);
622 
623 			tx_ep_desc = ep_desc;
624 		}
625 	}
626 
627 	/*
628 	 * At least 1 endpoint for RX and 1 endpoint for TX must be available.
629 	 */
630 	if (!rt2x00dev->rx->usb_endpoint || !rt2x00dev->tx->usb_endpoint) {
631 		rt2x00_err(rt2x00dev, "Bulk-in/Bulk-out endpoints not found\n");
632 		return -EPIPE;
633 	}
634 
635 	/*
636 	 * It might be possible not all queues have a dedicated endpoint.
637 	 * Loop through all TX queues and copy the endpoint information
638 	 * which we have gathered from already assigned endpoints.
639 	 */
640 	txall_queue_for_each(rt2x00dev, queue) {
641 		if (!queue->usb_endpoint)
642 			rt2x00usb_assign_endpoint(queue, tx_ep_desc);
643 	}
644 
645 	return 0;
646 }
647 
648 static int rt2x00usb_alloc_entries(struct data_queue *queue)
649 {
650 	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
651 	struct queue_entry_priv_usb *entry_priv;
652 	struct queue_entry_priv_usb_bcn *bcn_priv;
653 	unsigned int i;
654 
655 	for (i = 0; i < queue->limit; i++) {
656 		entry_priv = queue->entries[i].priv_data;
657 		entry_priv->urb = usb_alloc_urb(0, GFP_KERNEL);
658 		if (!entry_priv->urb)
659 			return -ENOMEM;
660 	}
661 
662 	/*
663 	 * If this is not the beacon queue or
664 	 * no guardian byte was required for the beacon,
665 	 * then we are done.
666 	 */
667 	if (queue->qid != QID_BEACON ||
668 	    !rt2x00_has_cap_flag(rt2x00dev, REQUIRE_BEACON_GUARD))
669 		return 0;
670 
671 	for (i = 0; i < queue->limit; i++) {
672 		bcn_priv = queue->entries[i].priv_data;
673 		bcn_priv->guardian_urb = usb_alloc_urb(0, GFP_KERNEL);
674 		if (!bcn_priv->guardian_urb)
675 			return -ENOMEM;
676 	}
677 
678 	return 0;
679 }
680 
681 static void rt2x00usb_free_entries(struct data_queue *queue)
682 {
683 	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
684 	struct queue_entry_priv_usb *entry_priv;
685 	struct queue_entry_priv_usb_bcn *bcn_priv;
686 	unsigned int i;
687 
688 	if (!queue->entries)
689 		return;
690 
691 	for (i = 0; i < queue->limit; i++) {
692 		entry_priv = queue->entries[i].priv_data;
693 		usb_kill_urb(entry_priv->urb);
694 		usb_free_urb(entry_priv->urb);
695 	}
696 
697 	/*
698 	 * If this is not the beacon queue or
699 	 * no guardian byte was required for the beacon,
700 	 * then we are done.
701 	 */
702 	if (queue->qid != QID_BEACON ||
703 	    !rt2x00_has_cap_flag(rt2x00dev, REQUIRE_BEACON_GUARD))
704 		return;
705 
706 	for (i = 0; i < queue->limit; i++) {
707 		bcn_priv = queue->entries[i].priv_data;
708 		usb_kill_urb(bcn_priv->guardian_urb);
709 		usb_free_urb(bcn_priv->guardian_urb);
710 	}
711 }
712 
713 int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev)
714 {
715 	struct data_queue *queue;
716 	int status;
717 
718 	/*
719 	 * Find endpoints for each queue
720 	 */
721 	status = rt2x00usb_find_endpoints(rt2x00dev);
722 	if (status)
723 		goto exit;
724 
725 	/*
726 	 * Allocate DMA
727 	 */
728 	queue_for_each(rt2x00dev, queue) {
729 		status = rt2x00usb_alloc_entries(queue);
730 		if (status)
731 			goto exit;
732 	}
733 
734 	return 0;
735 
736 exit:
737 	rt2x00usb_uninitialize(rt2x00dev);
738 
739 	return status;
740 }
741 EXPORT_SYMBOL_GPL(rt2x00usb_initialize);
742 
743 void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev)
744 {
745 	struct data_queue *queue;
746 
747 	queue_for_each(rt2x00dev, queue)
748 		rt2x00usb_free_entries(queue);
749 }
750 EXPORT_SYMBOL_GPL(rt2x00usb_uninitialize);
751 
752 /*
753  * USB driver handlers.
754  */
755 static void rt2x00usb_free_reg(struct rt2x00_dev *rt2x00dev)
756 {
757 	kfree(rt2x00dev->rf);
758 	rt2x00dev->rf = NULL;
759 
760 	kfree(rt2x00dev->eeprom);
761 	rt2x00dev->eeprom = NULL;
762 
763 	kfree(rt2x00dev->csr.cache);
764 	rt2x00dev->csr.cache = NULL;
765 }
766 
767 static int rt2x00usb_alloc_reg(struct rt2x00_dev *rt2x00dev)
768 {
769 	rt2x00dev->csr.cache = kzalloc(CSR_CACHE_SIZE, GFP_KERNEL);
770 	if (!rt2x00dev->csr.cache)
771 		goto exit;
772 
773 	rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
774 	if (!rt2x00dev->eeprom)
775 		goto exit;
776 
777 	rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL);
778 	if (!rt2x00dev->rf)
779 		goto exit;
780 
781 	return 0;
782 
783 exit:
784 	rt2x00_probe_err("Failed to allocate registers\n");
785 
786 	rt2x00usb_free_reg(rt2x00dev);
787 
788 	return -ENOMEM;
789 }
790 
791 int rt2x00usb_probe(struct usb_interface *usb_intf,
792 		    const struct rt2x00_ops *ops)
793 {
794 	struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
795 	struct ieee80211_hw *hw;
796 	struct rt2x00_dev *rt2x00dev;
797 	int retval;
798 
799 	usb_dev = usb_get_dev(usb_dev);
800 	usb_reset_device(usb_dev);
801 
802 	hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
803 	if (!hw) {
804 		rt2x00_probe_err("Failed to allocate hardware\n");
805 		retval = -ENOMEM;
806 		goto exit_put_device;
807 	}
808 
809 	usb_set_intfdata(usb_intf, hw);
810 
811 	rt2x00dev = hw->priv;
812 	rt2x00dev->dev = &usb_intf->dev;
813 	rt2x00dev->ops = ops;
814 	rt2x00dev->hw = hw;
815 
816 	rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_USB);
817 
818 	INIT_WORK(&rt2x00dev->rxdone_work, rt2x00usb_work_rxdone);
819 	INIT_WORK(&rt2x00dev->txdone_work, rt2x00usb_work_txdone);
820 	hrtimer_init(&rt2x00dev->txstatus_timer, CLOCK_MONOTONIC,
821 		     HRTIMER_MODE_REL);
822 
823 	retval = rt2x00usb_alloc_reg(rt2x00dev);
824 	if (retval)
825 		goto exit_free_device;
826 
827 	retval = rt2x00lib_probe_dev(rt2x00dev);
828 	if (retval)
829 		goto exit_free_reg;
830 
831 	rt2x00dev->anchor = devm_kmalloc(&usb_dev->dev,
832 					sizeof(struct usb_anchor),
833 					GFP_KERNEL);
834 	if (!rt2x00dev->anchor)
835 		goto exit_free_reg;
836 
837 	init_usb_anchor(rt2x00dev->anchor);
838 	return 0;
839 
840 exit_free_reg:
841 	rt2x00usb_free_reg(rt2x00dev);
842 
843 exit_free_device:
844 	ieee80211_free_hw(hw);
845 
846 exit_put_device:
847 	usb_put_dev(usb_dev);
848 
849 	usb_set_intfdata(usb_intf, NULL);
850 
851 	return retval;
852 }
853 EXPORT_SYMBOL_GPL(rt2x00usb_probe);
854 
855 void rt2x00usb_disconnect(struct usb_interface *usb_intf)
856 {
857 	struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
858 	struct rt2x00_dev *rt2x00dev = hw->priv;
859 
860 	/*
861 	 * Free all allocated data.
862 	 */
863 	rt2x00lib_remove_dev(rt2x00dev);
864 	rt2x00usb_free_reg(rt2x00dev);
865 	ieee80211_free_hw(hw);
866 
867 	/*
868 	 * Free the USB device data.
869 	 */
870 	usb_set_intfdata(usb_intf, NULL);
871 	usb_put_dev(interface_to_usbdev(usb_intf));
872 }
873 EXPORT_SYMBOL_GPL(rt2x00usb_disconnect);
874 
875 #ifdef CONFIG_PM
876 int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state)
877 {
878 	struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
879 	struct rt2x00_dev *rt2x00dev = hw->priv;
880 
881 	return rt2x00lib_suspend(rt2x00dev, state);
882 }
883 EXPORT_SYMBOL_GPL(rt2x00usb_suspend);
884 
885 int rt2x00usb_resume(struct usb_interface *usb_intf)
886 {
887 	struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
888 	struct rt2x00_dev *rt2x00dev = hw->priv;
889 
890 	return rt2x00lib_resume(rt2x00dev);
891 }
892 EXPORT_SYMBOL_GPL(rt2x00usb_resume);
893 #endif /* CONFIG_PM */
894 
895 /*
896  * rt2x00usb module information.
897  */
898 MODULE_AUTHOR(DRV_PROJECT);
899 MODULE_VERSION(DRV_VERSION);
900 MODULE_DESCRIPTION("rt2x00 usb library");
901 MODULE_LICENSE("GPL");
902