1 /* 2 * Copyright (C) 2003-2008 Takahiro Hirofuchi 3 * 4 * This is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, 17 * USA. 18 */ 19 20 #include <linux/kthread.h> 21 #include <linux/socket.h> 22 23 #include "usbip_common.h" 24 #include "stub.h" 25 26 static void stub_free_priv_and_urb(struct stub_priv *priv) 27 { 28 struct urb *urb = priv->urb; 29 30 kfree(urb->setup_packet); 31 urb->setup_packet = NULL; 32 33 kfree(urb->transfer_buffer); 34 urb->transfer_buffer = NULL; 35 36 list_del(&priv->list); 37 kmem_cache_free(stub_priv_cache, priv); 38 usb_free_urb(urb); 39 } 40 41 /* be in spin_lock_irqsave(&sdev->priv_lock, flags) */ 42 void stub_enqueue_ret_unlink(struct stub_device *sdev, __u32 seqnum, 43 __u32 status) 44 { 45 struct stub_unlink *unlink; 46 47 unlink = kzalloc(sizeof(struct stub_unlink), GFP_ATOMIC); 48 if (!unlink) { 49 usbip_event_add(&sdev->ud, VDEV_EVENT_ERROR_MALLOC); 50 return; 51 } 52 53 unlink->seqnum = seqnum; 54 unlink->status = status; 55 56 list_add_tail(&unlink->list, &sdev->unlink_tx); 57 } 58 59 /** 60 * stub_complete - completion handler of a usbip urb 61 * @urb: pointer to the urb completed 62 * 63 * When a urb has completed, the USB core driver calls this function mostly in 64 * the interrupt context. To return the result of a urb, the completed urb is 65 * linked to the pending list of returning. 66 * 67 */ 68 void stub_complete(struct urb *urb) 69 { 70 struct stub_priv *priv = (struct stub_priv *) urb->context; 71 struct stub_device *sdev = priv->sdev; 72 unsigned long flags; 73 74 usbip_dbg_stub_tx("complete! status %d\n", urb->status); 75 76 switch (urb->status) { 77 case 0: 78 /* OK */ 79 break; 80 case -ENOENT: 81 dev_info(&urb->dev->dev, 82 "stopped by a call to usb_kill_urb() because of cleaning up a virtual connection\n"); 83 return; 84 case -ECONNRESET: 85 dev_info(&urb->dev->dev, 86 "unlinked by a call to usb_unlink_urb()\n"); 87 break; 88 case -EPIPE: 89 dev_info(&urb->dev->dev, "endpoint %d is stalled\n", 90 usb_pipeendpoint(urb->pipe)); 91 break; 92 case -ESHUTDOWN: 93 dev_info(&urb->dev->dev, "device removed?\n"); 94 break; 95 default: 96 dev_info(&urb->dev->dev, 97 "urb completion with non-zero status %d\n", 98 urb->status); 99 break; 100 } 101 102 /* link a urb to the queue of tx. */ 103 spin_lock_irqsave(&sdev->priv_lock, flags); 104 if (sdev->ud.tcp_socket == NULL) { 105 usbip_dbg_stub_tx("ignore urb for closed connection %p", urb); 106 /* It will be freed in stub_device_cleanup_urbs(). */ 107 } else if (priv->unlinking) { 108 stub_enqueue_ret_unlink(sdev, priv->seqnum, urb->status); 109 stub_free_priv_and_urb(priv); 110 } else { 111 list_move_tail(&priv->list, &sdev->priv_tx); 112 } 113 spin_unlock_irqrestore(&sdev->priv_lock, flags); 114 115 /* wake up tx_thread */ 116 wake_up(&sdev->tx_waitq); 117 } 118 119 static inline void setup_base_pdu(struct usbip_header_basic *base, 120 __u32 command, __u32 seqnum) 121 { 122 base->command = command; 123 base->seqnum = seqnum; 124 base->devid = 0; 125 base->ep = 0; 126 base->direction = 0; 127 } 128 129 static void setup_ret_submit_pdu(struct usbip_header *rpdu, struct urb *urb) 130 { 131 struct stub_priv *priv = (struct stub_priv *) urb->context; 132 133 setup_base_pdu(&rpdu->base, USBIP_RET_SUBMIT, priv->seqnum); 134 usbip_pack_pdu(rpdu, urb, USBIP_RET_SUBMIT, 1); 135 } 136 137 static void setup_ret_unlink_pdu(struct usbip_header *rpdu, 138 struct stub_unlink *unlink) 139 { 140 setup_base_pdu(&rpdu->base, USBIP_RET_UNLINK, unlink->seqnum); 141 rpdu->u.ret_unlink.status = unlink->status; 142 } 143 144 static struct stub_priv *dequeue_from_priv_tx(struct stub_device *sdev) 145 { 146 unsigned long flags; 147 struct stub_priv *priv, *tmp; 148 149 spin_lock_irqsave(&sdev->priv_lock, flags); 150 151 list_for_each_entry_safe(priv, tmp, &sdev->priv_tx, list) { 152 list_move_tail(&priv->list, &sdev->priv_free); 153 spin_unlock_irqrestore(&sdev->priv_lock, flags); 154 return priv; 155 } 156 157 spin_unlock_irqrestore(&sdev->priv_lock, flags); 158 159 return NULL; 160 } 161 162 static int stub_send_ret_submit(struct stub_device *sdev) 163 { 164 unsigned long flags; 165 struct stub_priv *priv, *tmp; 166 167 struct msghdr msg; 168 size_t txsize; 169 170 size_t total_size = 0; 171 172 while ((priv = dequeue_from_priv_tx(sdev)) != NULL) { 173 int ret; 174 struct urb *urb = priv->urb; 175 struct usbip_header pdu_header; 176 struct usbip_iso_packet_descriptor *iso_buffer = NULL; 177 struct kvec *iov = NULL; 178 int iovnum = 0; 179 180 txsize = 0; 181 memset(&pdu_header, 0, sizeof(pdu_header)); 182 memset(&msg, 0, sizeof(msg)); 183 184 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) 185 iovnum = 2 + urb->number_of_packets; 186 else 187 iovnum = 2; 188 189 iov = kcalloc(iovnum, sizeof(struct kvec), GFP_KERNEL); 190 191 if (!iov) { 192 usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_MALLOC); 193 return -1; 194 } 195 196 iovnum = 0; 197 198 /* 1. setup usbip_header */ 199 setup_ret_submit_pdu(&pdu_header, urb); 200 usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n", 201 pdu_header.base.seqnum, urb); 202 usbip_header_correct_endian(&pdu_header, 1); 203 204 iov[iovnum].iov_base = &pdu_header; 205 iov[iovnum].iov_len = sizeof(pdu_header); 206 iovnum++; 207 txsize += sizeof(pdu_header); 208 209 /* 2. setup transfer buffer */ 210 if (usb_pipein(urb->pipe) && 211 usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS && 212 urb->actual_length > 0) { 213 iov[iovnum].iov_base = urb->transfer_buffer; 214 iov[iovnum].iov_len = urb->actual_length; 215 iovnum++; 216 txsize += urb->actual_length; 217 } else if (usb_pipein(urb->pipe) && 218 usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 219 /* 220 * For isochronous packets: actual length is the sum of 221 * the actual length of the individual, packets, but as 222 * the packet offsets are not changed there will be 223 * padding between the packets. To optimally use the 224 * bandwidth the padding is not transmitted. 225 */ 226 227 int i; 228 229 for (i = 0; i < urb->number_of_packets; i++) { 230 iov[iovnum].iov_base = urb->transfer_buffer + 231 urb->iso_frame_desc[i].offset; 232 iov[iovnum].iov_len = 233 urb->iso_frame_desc[i].actual_length; 234 iovnum++; 235 txsize += urb->iso_frame_desc[i].actual_length; 236 } 237 238 if (txsize != sizeof(pdu_header) + urb->actual_length) { 239 dev_err(&sdev->udev->dev, 240 "actual length of urb %d does not match iso packet sizes %zu\n", 241 urb->actual_length, 242 txsize-sizeof(pdu_header)); 243 kfree(iov); 244 usbip_event_add(&sdev->ud, 245 SDEV_EVENT_ERROR_TCP); 246 return -1; 247 } 248 } 249 250 /* 3. setup iso_packet_descriptor */ 251 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 252 ssize_t len = 0; 253 254 iso_buffer = usbip_alloc_iso_desc_pdu(urb, &len); 255 if (!iso_buffer) { 256 usbip_event_add(&sdev->ud, 257 SDEV_EVENT_ERROR_MALLOC); 258 kfree(iov); 259 return -1; 260 } 261 262 iov[iovnum].iov_base = iso_buffer; 263 iov[iovnum].iov_len = len; 264 txsize += len; 265 iovnum++; 266 } 267 268 ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg, 269 iov, iovnum, txsize); 270 if (ret != txsize) { 271 dev_err(&sdev->udev->dev, 272 "sendmsg failed!, retval %d for %zd\n", 273 ret, txsize); 274 kfree(iov); 275 kfree(iso_buffer); 276 usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP); 277 return -1; 278 } 279 280 kfree(iov); 281 kfree(iso_buffer); 282 283 total_size += txsize; 284 } 285 286 spin_lock_irqsave(&sdev->priv_lock, flags); 287 list_for_each_entry_safe(priv, tmp, &sdev->priv_free, list) { 288 stub_free_priv_and_urb(priv); 289 } 290 spin_unlock_irqrestore(&sdev->priv_lock, flags); 291 292 return total_size; 293 } 294 295 static struct stub_unlink *dequeue_from_unlink_tx(struct stub_device *sdev) 296 { 297 unsigned long flags; 298 struct stub_unlink *unlink, *tmp; 299 300 spin_lock_irqsave(&sdev->priv_lock, flags); 301 302 list_for_each_entry_safe(unlink, tmp, &sdev->unlink_tx, list) { 303 list_move_tail(&unlink->list, &sdev->unlink_free); 304 spin_unlock_irqrestore(&sdev->priv_lock, flags); 305 return unlink; 306 } 307 308 spin_unlock_irqrestore(&sdev->priv_lock, flags); 309 310 return NULL; 311 } 312 313 static int stub_send_ret_unlink(struct stub_device *sdev) 314 { 315 unsigned long flags; 316 struct stub_unlink *unlink, *tmp; 317 318 struct msghdr msg; 319 struct kvec iov[1]; 320 size_t txsize; 321 322 size_t total_size = 0; 323 324 while ((unlink = dequeue_from_unlink_tx(sdev)) != NULL) { 325 int ret; 326 struct usbip_header pdu_header; 327 328 txsize = 0; 329 memset(&pdu_header, 0, sizeof(pdu_header)); 330 memset(&msg, 0, sizeof(msg)); 331 memset(&iov, 0, sizeof(iov)); 332 333 usbip_dbg_stub_tx("setup ret unlink %lu\n", unlink->seqnum); 334 335 /* 1. setup usbip_header */ 336 setup_ret_unlink_pdu(&pdu_header, unlink); 337 usbip_header_correct_endian(&pdu_header, 1); 338 339 iov[0].iov_base = &pdu_header; 340 iov[0].iov_len = sizeof(pdu_header); 341 txsize += sizeof(pdu_header); 342 343 ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg, iov, 344 1, txsize); 345 if (ret != txsize) { 346 dev_err(&sdev->udev->dev, 347 "sendmsg failed!, retval %d for %zd\n", 348 ret, txsize); 349 usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP); 350 return -1; 351 } 352 353 usbip_dbg_stub_tx("send txdata\n"); 354 total_size += txsize; 355 } 356 357 spin_lock_irqsave(&sdev->priv_lock, flags); 358 359 list_for_each_entry_safe(unlink, tmp, &sdev->unlink_free, list) { 360 list_del(&unlink->list); 361 kfree(unlink); 362 } 363 364 spin_unlock_irqrestore(&sdev->priv_lock, flags); 365 366 return total_size; 367 } 368 369 int stub_tx_loop(void *data) 370 { 371 struct usbip_device *ud = data; 372 struct stub_device *sdev = container_of(ud, struct stub_device, ud); 373 374 while (!kthread_should_stop()) { 375 if (usbip_event_happened(ud)) 376 break; 377 378 /* 379 * send_ret_submit comes earlier than send_ret_unlink. stub_rx 380 * looks at only priv_init queue. If the completion of a URB is 381 * earlier than the receive of CMD_UNLINK, priv is moved to 382 * priv_tx queue and stub_rx does not find the target priv. In 383 * this case, vhci_rx receives the result of the submit request 384 * and then receives the result of the unlink request. The 385 * result of the submit is given back to the usbcore as the 386 * completion of the unlink request. The request of the 387 * unlink is ignored. This is ok because a driver who calls 388 * usb_unlink_urb() understands the unlink was too late by 389 * getting the status of the given-backed URB which has the 390 * status of usb_submit_urb(). 391 */ 392 if (stub_send_ret_submit(sdev) < 0) 393 break; 394 395 if (stub_send_ret_unlink(sdev) < 0) 396 break; 397 398 wait_event_interruptible(sdev->tx_waitq, 399 (!list_empty(&sdev->priv_tx) || 400 !list_empty(&sdev->unlink_tx) || 401 kthread_should_stop())); 402 } 403 404 return 0; 405 } 406