xref: /openbmc/linux/drivers/usb/core/urb.c (revision a1e58bbd)
1 #include <linux/module.h>
2 #include <linux/string.h>
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/init.h>
6 #include <linux/log2.h>
7 #include <linux/usb.h>
8 #include <linux/wait.h>
9 #include "hcd.h"
10 
11 #define to_urb(d) container_of(d, struct urb, kref)
12 
13 static void urb_destroy(struct kref *kref)
14 {
15 	struct urb *urb = to_urb(kref);
16 
17 	if (urb->transfer_flags & URB_FREE_BUFFER)
18 		kfree(urb->transfer_buffer);
19 
20 	kfree(urb);
21 }
22 
23 /**
24  * usb_init_urb - initializes a urb so that it can be used by a USB driver
25  * @urb: pointer to the urb to initialize
26  *
27  * Initializes a urb so that the USB subsystem can use it properly.
28  *
29  * If a urb is created with a call to usb_alloc_urb() it is not
30  * necessary to call this function.  Only use this if you allocate the
31  * space for a struct urb on your own.  If you call this function, be
32  * careful when freeing the memory for your urb that it is no longer in
33  * use by the USB core.
34  *
35  * Only use this function if you _really_ understand what you are doing.
36  */
37 void usb_init_urb(struct urb *urb)
38 {
39 	if (urb) {
40 		memset(urb, 0, sizeof(*urb));
41 		kref_init(&urb->kref);
42 		INIT_LIST_HEAD(&urb->anchor_list);
43 	}
44 }
45 EXPORT_SYMBOL_GPL(usb_init_urb);
46 
47 /**
48  * usb_alloc_urb - creates a new urb for a USB driver to use
49  * @iso_packets: number of iso packets for this urb
50  * @mem_flags: the type of memory to allocate, see kmalloc() for a list of
51  *	valid options for this.
52  *
53  * Creates an urb for the USB driver to use, initializes a few internal
54  * structures, incrementes the usage counter, and returns a pointer to it.
55  *
56  * If no memory is available, NULL is returned.
57  *
58  * If the driver want to use this urb for interrupt, control, or bulk
59  * endpoints, pass '0' as the number of iso packets.
60  *
61  * The driver must call usb_free_urb() when it is finished with the urb.
62  */
63 struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags)
64 {
65 	struct urb *urb;
66 
67 	urb = kmalloc(sizeof(struct urb) +
68 		iso_packets * sizeof(struct usb_iso_packet_descriptor),
69 		mem_flags);
70 	if (!urb) {
71 		err("alloc_urb: kmalloc failed");
72 		return NULL;
73 	}
74 	usb_init_urb(urb);
75 	return urb;
76 }
77 EXPORT_SYMBOL_GPL(usb_alloc_urb);
78 
79 /**
80  * usb_free_urb - frees the memory used by a urb when all users of it are finished
81  * @urb: pointer to the urb to free, may be NULL
82  *
83  * Must be called when a user of a urb is finished with it.  When the last user
84  * of the urb calls this function, the memory of the urb is freed.
85  *
86  * Note: The transfer buffer associated with the urb is not freed, that must be
87  * done elsewhere.
88  */
89 void usb_free_urb(struct urb *urb)
90 {
91 	if (urb)
92 		kref_put(&urb->kref, urb_destroy);
93 }
94 EXPORT_SYMBOL_GPL(usb_free_urb);
95 
96 /**
97  * usb_get_urb - increments the reference count of the urb
98  * @urb: pointer to the urb to modify, may be NULL
99  *
100  * This must be  called whenever a urb is transferred from a device driver to a
101  * host controller driver.  This allows proper reference counting to happen
102  * for urbs.
103  *
104  * A pointer to the urb with the incremented reference counter is returned.
105  */
106 struct urb *usb_get_urb(struct urb *urb)
107 {
108 	if (urb)
109 		kref_get(&urb->kref);
110 	return urb;
111 }
112 EXPORT_SYMBOL_GPL(usb_get_urb);
113 
114 /**
115  * usb_anchor_urb - anchors an URB while it is processed
116  * @urb: pointer to the urb to anchor
117  * @anchor: pointer to the anchor
118  *
119  * This can be called to have access to URBs which are to be executed
120  * without bothering to track them
121  */
122 void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor)
123 {
124 	unsigned long flags;
125 
126 	spin_lock_irqsave(&anchor->lock, flags);
127 	usb_get_urb(urb);
128 	list_add_tail(&urb->anchor_list, &anchor->urb_list);
129 	urb->anchor = anchor;
130 	spin_unlock_irqrestore(&anchor->lock, flags);
131 }
132 EXPORT_SYMBOL_GPL(usb_anchor_urb);
133 
134 /**
135  * usb_unanchor_urb - unanchors an URB
136  * @urb: pointer to the urb to anchor
137  *
138  * Call this to stop the system keeping track of this URB
139  */
140 void usb_unanchor_urb(struct urb *urb)
141 {
142 	unsigned long flags;
143 	struct usb_anchor *anchor;
144 
145 	if (!urb)
146 		return;
147 
148 	anchor = urb->anchor;
149 	if (!anchor)
150 		return;
151 
152 	spin_lock_irqsave(&anchor->lock, flags);
153 	if (unlikely(anchor != urb->anchor)) {
154 		/* we've lost the race to another thread */
155 		spin_unlock_irqrestore(&anchor->lock, flags);
156 		return;
157 	}
158 	urb->anchor = NULL;
159 	list_del(&urb->anchor_list);
160 	spin_unlock_irqrestore(&anchor->lock, flags);
161 	usb_put_urb(urb);
162 	if (list_empty(&anchor->urb_list))
163 		wake_up(&anchor->wait);
164 }
165 EXPORT_SYMBOL_GPL(usb_unanchor_urb);
166 
167 /*-------------------------------------------------------------------*/
168 
169 /**
170  * usb_submit_urb - issue an asynchronous transfer request for an endpoint
171  * @urb: pointer to the urb describing the request
172  * @mem_flags: the type of memory to allocate, see kmalloc() for a list
173  *	of valid options for this.
174  *
175  * This submits a transfer request, and transfers control of the URB
176  * describing that request to the USB subsystem.  Request completion will
177  * be indicated later, asynchronously, by calling the completion handler.
178  * The three types of completion are success, error, and unlink
179  * (a software-induced fault, also called "request cancellation").
180  *
181  * URBs may be submitted in interrupt context.
182  *
183  * The caller must have correctly initialized the URB before submitting
184  * it.  Functions such as usb_fill_bulk_urb() and usb_fill_control_urb() are
185  * available to ensure that most fields are correctly initialized, for
186  * the particular kind of transfer, although they will not initialize
187  * any transfer flags.
188  *
189  * Successful submissions return 0; otherwise this routine returns a
190  * negative error number.  If the submission is successful, the complete()
191  * callback from the URB will be called exactly once, when the USB core and
192  * Host Controller Driver (HCD) are finished with the URB.  When the completion
193  * function is called, control of the URB is returned to the device
194  * driver which issued the request.  The completion handler may then
195  * immediately free or reuse that URB.
196  *
197  * With few exceptions, USB device drivers should never access URB fields
198  * provided by usbcore or the HCD until its complete() is called.
199  * The exceptions relate to periodic transfer scheduling.  For both
200  * interrupt and isochronous urbs, as part of successful URB submission
201  * urb->interval is modified to reflect the actual transfer period used
202  * (normally some power of two units).  And for isochronous urbs,
203  * urb->start_frame is modified to reflect when the URB's transfers were
204  * scheduled to start.  Not all isochronous transfer scheduling policies
205  * will work, but most host controller drivers should easily handle ISO
206  * queues going from now until 10-200 msec into the future.
207  *
208  * For control endpoints, the synchronous usb_control_msg() call is
209  * often used (in non-interrupt context) instead of this call.
210  * That is often used through convenience wrappers, for the requests
211  * that are standardized in the USB 2.0 specification.  For bulk
212  * endpoints, a synchronous usb_bulk_msg() call is available.
213  *
214  * Request Queuing:
215  *
216  * URBs may be submitted to endpoints before previous ones complete, to
217  * minimize the impact of interrupt latencies and system overhead on data
218  * throughput.  With that queuing policy, an endpoint's queue would never
219  * be empty.  This is required for continuous isochronous data streams,
220  * and may also be required for some kinds of interrupt transfers. Such
221  * queuing also maximizes bandwidth utilization by letting USB controllers
222  * start work on later requests before driver software has finished the
223  * completion processing for earlier (successful) requests.
224  *
225  * As of Linux 2.6, all USB endpoint transfer queues support depths greater
226  * than one.  This was previously a HCD-specific behavior, except for ISO
227  * transfers.  Non-isochronous endpoint queues are inactive during cleanup
228  * after faults (transfer errors or cancellation).
229  *
230  * Reserved Bandwidth Transfers:
231  *
232  * Periodic transfers (interrupt or isochronous) are performed repeatedly,
233  * using the interval specified in the urb.  Submitting the first urb to
234  * the endpoint reserves the bandwidth necessary to make those transfers.
235  * If the USB subsystem can't allocate sufficient bandwidth to perform
236  * the periodic request, submitting such a periodic request should fail.
237  *
238  * Device drivers must explicitly request that repetition, by ensuring that
239  * some URB is always on the endpoint's queue (except possibly for short
240  * periods during completion callacks).  When there is no longer an urb
241  * queued, the endpoint's bandwidth reservation is canceled.  This means
242  * drivers can use their completion handlers to ensure they keep bandwidth
243  * they need, by reinitializing and resubmitting the just-completed urb
244  * until the driver longer needs that periodic bandwidth.
245  *
246  * Memory Flags:
247  *
248  * The general rules for how to decide which mem_flags to use
249  * are the same as for kmalloc.  There are four
250  * different possible values; GFP_KERNEL, GFP_NOFS, GFP_NOIO and
251  * GFP_ATOMIC.
252  *
253  * GFP_NOFS is not ever used, as it has not been implemented yet.
254  *
255  * GFP_ATOMIC is used when
256  *   (a) you are inside a completion handler, an interrupt, bottom half,
257  *       tasklet or timer, or
258  *   (b) you are holding a spinlock or rwlock (does not apply to
259  *       semaphores), or
260  *   (c) current->state != TASK_RUNNING, this is the case only after
261  *       you've changed it.
262  *
263  * GFP_NOIO is used in the block io path and error handling of storage
264  * devices.
265  *
266  * All other situations use GFP_KERNEL.
267  *
268  * Some more specific rules for mem_flags can be inferred, such as
269  *  (1) start_xmit, timeout, and receive methods of network drivers must
270  *      use GFP_ATOMIC (they are called with a spinlock held);
271  *  (2) queuecommand methods of scsi drivers must use GFP_ATOMIC (also
272  *      called with a spinlock held);
273  *  (3) If you use a kernel thread with a network driver you must use
274  *      GFP_NOIO, unless (b) or (c) apply;
275  *  (4) after you have done a down() you can use GFP_KERNEL, unless (b) or (c)
276  *      apply or your are in a storage driver's block io path;
277  *  (5) USB probe and disconnect can use GFP_KERNEL unless (b) or (c) apply; and
278  *  (6) changing firmware on a running storage or net device uses
279  *      GFP_NOIO, unless b) or c) apply
280  *
281  */
282 int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
283 {
284 	int				xfertype, max;
285 	struct usb_device		*dev;
286 	struct usb_host_endpoint	*ep;
287 	int				is_out;
288 
289 	if (!urb || urb->hcpriv || !urb->complete)
290 		return -EINVAL;
291 	dev = urb->dev;
292 	if ((!dev) || (dev->state < USB_STATE_DEFAULT))
293 		return -ENODEV;
294 
295 	/* For now, get the endpoint from the pipe.  Eventually drivers
296 	 * will be required to set urb->ep directly and we will eliminate
297 	 * urb->pipe.
298 	 */
299 	ep = (usb_pipein(urb->pipe) ? dev->ep_in : dev->ep_out)
300 			[usb_pipeendpoint(urb->pipe)];
301 	if (!ep)
302 		return -ENOENT;
303 
304 	urb->ep = ep;
305 	urb->status = -EINPROGRESS;
306 	urb->actual_length = 0;
307 
308 	/* Lots of sanity checks, so HCDs can rely on clean data
309 	 * and don't need to duplicate tests
310 	 */
311 	xfertype = usb_endpoint_type(&ep->desc);
312 	if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
313 		struct usb_ctrlrequest *setup =
314 				(struct usb_ctrlrequest *) urb->setup_packet;
315 
316 		if (!setup)
317 			return -ENOEXEC;
318 		is_out = !(setup->bRequestType & USB_DIR_IN) ||
319 				!setup->wLength;
320 	} else {
321 		is_out = usb_endpoint_dir_out(&ep->desc);
322 	}
323 
324 	/* Cache the direction for later use */
325 	urb->transfer_flags = (urb->transfer_flags & ~URB_DIR_MASK) |
326 			(is_out ? URB_DIR_OUT : URB_DIR_IN);
327 
328 	if (xfertype != USB_ENDPOINT_XFER_CONTROL &&
329 			dev->state < USB_STATE_CONFIGURED)
330 		return -ENODEV;
331 
332 	max = le16_to_cpu(ep->desc.wMaxPacketSize);
333 	if (max <= 0) {
334 		dev_dbg(&dev->dev,
335 			"bogus endpoint ep%d%s in %s (bad maxpacket %d)\n",
336 			usb_endpoint_num(&ep->desc), is_out ? "out" : "in",
337 			__FUNCTION__, max);
338 		return -EMSGSIZE;
339 	}
340 
341 	/* periodic transfers limit size per frame/uframe,
342 	 * but drivers only control those sizes for ISO.
343 	 * while we're checking, initialize return status.
344 	 */
345 	if (xfertype == USB_ENDPOINT_XFER_ISOC) {
346 		int	n, len;
347 
348 		/* "high bandwidth" mode, 1-3 packets/uframe? */
349 		if (dev->speed == USB_SPEED_HIGH) {
350 			int	mult = 1 + ((max >> 11) & 0x03);
351 			max &= 0x07ff;
352 			max *= mult;
353 		}
354 
355 		if (urb->number_of_packets <= 0)
356 			return -EINVAL;
357 		for (n = 0; n < urb->number_of_packets; n++) {
358 			len = urb->iso_frame_desc[n].length;
359 			if (len < 0 || len > max)
360 				return -EMSGSIZE;
361 			urb->iso_frame_desc[n].status = -EXDEV;
362 			urb->iso_frame_desc[n].actual_length = 0;
363 		}
364 	}
365 
366 	/* the I/O buffer must be mapped/unmapped, except when length=0 */
367 	if (urb->transfer_buffer_length < 0)
368 		return -EMSGSIZE;
369 
370 #ifdef DEBUG
371 	/* stuff that drivers shouldn't do, but which shouldn't
372 	 * cause problems in HCDs if they get it wrong.
373 	 */
374 	{
375 	unsigned int	orig_flags = urb->transfer_flags;
376 	unsigned int	allowed;
377 
378 	/* enforce simple/standard policy */
379 	allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP |
380 			URB_NO_INTERRUPT | URB_DIR_MASK | URB_FREE_BUFFER);
381 	switch (xfertype) {
382 	case USB_ENDPOINT_XFER_BULK:
383 		if (is_out)
384 			allowed |= URB_ZERO_PACKET;
385 		/* FALLTHROUGH */
386 	case USB_ENDPOINT_XFER_CONTROL:
387 		allowed |= URB_NO_FSBR;	/* only affects UHCI */
388 		/* FALLTHROUGH */
389 	default:			/* all non-iso endpoints */
390 		if (!is_out)
391 			allowed |= URB_SHORT_NOT_OK;
392 		break;
393 	case USB_ENDPOINT_XFER_ISOC:
394 		allowed |= URB_ISO_ASAP;
395 		break;
396 	}
397 	urb->transfer_flags &= allowed;
398 
399 	/* fail if submitter gave bogus flags */
400 	if (urb->transfer_flags != orig_flags) {
401 		err("BOGUS urb flags, %x --> %x",
402 			orig_flags, urb->transfer_flags);
403 		return -EINVAL;
404 	}
405 	}
406 #endif
407 	/*
408 	 * Force periodic transfer intervals to be legal values that are
409 	 * a power of two (so HCDs don't need to).
410 	 *
411 	 * FIXME want bus->{intr,iso}_sched_horizon values here.  Each HC
412 	 * supports different values... this uses EHCI/UHCI defaults (and
413 	 * EHCI can use smaller non-default values).
414 	 */
415 	switch (xfertype) {
416 	case USB_ENDPOINT_XFER_ISOC:
417 	case USB_ENDPOINT_XFER_INT:
418 		/* too small? */
419 		if (urb->interval <= 0)
420 			return -EINVAL;
421 		/* too big? */
422 		switch (dev->speed) {
423 		case USB_SPEED_HIGH:	/* units are microframes */
424 			/* NOTE usb handles 2^15 */
425 			if (urb->interval > (1024 * 8))
426 				urb->interval = 1024 * 8;
427 			max = 1024 * 8;
428 			break;
429 		case USB_SPEED_FULL:	/* units are frames/msec */
430 		case USB_SPEED_LOW:
431 			if (xfertype == USB_ENDPOINT_XFER_INT) {
432 				if (urb->interval > 255)
433 					return -EINVAL;
434 				/* NOTE ohci only handles up to 32 */
435 				max = 128;
436 			} else {
437 				if (urb->interval > 1024)
438 					urb->interval = 1024;
439 				/* NOTE usb and ohci handle up to 2^15 */
440 				max = 1024;
441 			}
442 			break;
443 		default:
444 			return -EINVAL;
445 		}
446 		/* Round down to a power of 2, no more than max */
447 		urb->interval = min(max, 1 << ilog2(urb->interval));
448 	}
449 
450 	return usb_hcd_submit_urb(urb, mem_flags);
451 }
452 EXPORT_SYMBOL_GPL(usb_submit_urb);
453 
454 /*-------------------------------------------------------------------*/
455 
456 /**
457  * usb_unlink_urb - abort/cancel a transfer request for an endpoint
458  * @urb: pointer to urb describing a previously submitted request,
459  *	may be NULL
460  *
461  * This routine cancels an in-progress request.  URBs complete only once
462  * per submission, and may be canceled only once per submission.
463  * Successful cancellation means termination of @urb will be expedited
464  * and the completion handler will be called with a status code
465  * indicating that the request has been canceled (rather than any other
466  * code).
467  *
468  * This request is always asynchronous.  Success is indicated by
469  * returning -EINPROGRESS, at which time the URB will probably not yet
470  * have been given back to the device driver.  When it is eventually
471  * called, the completion function will see @urb->status == -ECONNRESET.
472  * Failure is indicated by usb_unlink_urb() returning any other value.
473  * Unlinking will fail when @urb is not currently "linked" (i.e., it was
474  * never submitted, or it was unlinked before, or the hardware is already
475  * finished with it), even if the completion handler has not yet run.
476  *
477  * Unlinking and Endpoint Queues:
478  *
479  * [The behaviors and guarantees described below do not apply to virtual
480  * root hubs but only to endpoint queues for physical USB devices.]
481  *
482  * Host Controller Drivers (HCDs) place all the URBs for a particular
483  * endpoint in a queue.  Normally the queue advances as the controller
484  * hardware processes each request.  But when an URB terminates with an
485  * error its queue generally stops (see below), at least until that URB's
486  * completion routine returns.  It is guaranteed that a stopped queue
487  * will not restart until all its unlinked URBs have been fully retired,
488  * with their completion routines run, even if that's not until some time
489  * after the original completion handler returns.  The same behavior and
490  * guarantee apply when an URB terminates because it was unlinked.
491  *
492  * Bulk and interrupt endpoint queues are guaranteed to stop whenever an
493  * URB terminates with any sort of error, including -ECONNRESET, -ENOENT,
494  * and -EREMOTEIO.  Control endpoint queues behave the same way except
495  * that they are not guaranteed to stop for -EREMOTEIO errors.  Queues
496  * for isochronous endpoints are treated differently, because they must
497  * advance at fixed rates.  Such queues do not stop when an URB
498  * encounters an error or is unlinked.  An unlinked isochronous URB may
499  * leave a gap in the stream of packets; it is undefined whether such
500  * gaps can be filled in.
501  *
502  * Note that early termination of an URB because a short packet was
503  * received will generate a -EREMOTEIO error if and only if the
504  * URB_SHORT_NOT_OK flag is set.  By setting this flag, USB device
505  * drivers can build deep queues for large or complex bulk transfers
506  * and clean them up reliably after any sort of aborted transfer by
507  * unlinking all pending URBs at the first fault.
508  *
509  * When a control URB terminates with an error other than -EREMOTEIO, it
510  * is quite likely that the status stage of the transfer will not take
511  * place.
512  */
513 int usb_unlink_urb(struct urb *urb)
514 {
515 	if (!urb)
516 		return -EINVAL;
517 	if (!urb->dev)
518 		return -ENODEV;
519 	if (!urb->ep)
520 		return -EIDRM;
521 	return usb_hcd_unlink_urb(urb, -ECONNRESET);
522 }
523 EXPORT_SYMBOL_GPL(usb_unlink_urb);
524 
525 /**
526  * usb_kill_urb - cancel a transfer request and wait for it to finish
527  * @urb: pointer to URB describing a previously submitted request,
528  *	may be NULL
529  *
530  * This routine cancels an in-progress request.  It is guaranteed that
531  * upon return all completion handlers will have finished and the URB
532  * will be totally idle and available for reuse.  These features make
533  * this an ideal way to stop I/O in a disconnect() callback or close()
534  * function.  If the request has not already finished or been unlinked
535  * the completion handler will see urb->status == -ENOENT.
536  *
537  * While the routine is running, attempts to resubmit the URB will fail
538  * with error -EPERM.  Thus even if the URB's completion handler always
539  * tries to resubmit, it will not succeed and the URB will become idle.
540  *
541  * This routine may not be used in an interrupt context (such as a bottom
542  * half or a completion handler), or when holding a spinlock, or in other
543  * situations where the caller can't schedule().
544  */
545 void usb_kill_urb(struct urb *urb)
546 {
547 	static DEFINE_MUTEX(reject_mutex);
548 
549 	might_sleep();
550 	if (!(urb && urb->dev && urb->ep))
551 		return;
552 	mutex_lock(&reject_mutex);
553 	++urb->reject;
554 	mutex_unlock(&reject_mutex);
555 
556 	usb_hcd_unlink_urb(urb, -ENOENT);
557 	wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
558 
559 	mutex_lock(&reject_mutex);
560 	--urb->reject;
561 	mutex_unlock(&reject_mutex);
562 }
563 EXPORT_SYMBOL_GPL(usb_kill_urb);
564 
565 /**
566  * usb_kill_anchored_urbs - cancel transfer requests en masse
567  * @anchor: anchor the requests are bound to
568  *
569  * this allows all outstanding URBs to be killed starting
570  * from the back of the queue
571  */
572 void usb_kill_anchored_urbs(struct usb_anchor *anchor)
573 {
574 	struct urb *victim;
575 
576 	spin_lock_irq(&anchor->lock);
577 	while (!list_empty(&anchor->urb_list)) {
578 		victim = list_entry(anchor->urb_list.prev, struct urb,
579 				    anchor_list);
580 		/* we must make sure the URB isn't freed before we kill it*/
581 		usb_get_urb(victim);
582 		spin_unlock_irq(&anchor->lock);
583 		/* this will unanchor the URB */
584 		usb_kill_urb(victim);
585 		usb_put_urb(victim);
586 		spin_lock_irq(&anchor->lock);
587 	}
588 	spin_unlock_irq(&anchor->lock);
589 }
590 EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs);
591 
592 /**
593  * usb_wait_anchor_empty_timeout - wait for an anchor to be unused
594  * @anchor: the anchor you want to become unused
595  * @timeout: how long you are willing to wait in milliseconds
596  *
597  * Call this is you want to be sure all an anchor's
598  * URBs have finished
599  */
600 int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor,
601 				  unsigned int timeout)
602 {
603 	return wait_event_timeout(anchor->wait, list_empty(&anchor->urb_list),
604 				  msecs_to_jiffies(timeout));
605 }
606 EXPORT_SYMBOL_GPL(usb_wait_anchor_empty_timeout);
607