1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2003-2008 Takahiro Hirofuchi 4 */ 5 6 #include <linux/kthread.h> 7 #include <linux/socket.h> 8 9 #include "usbip_common.h" 10 #include "stub.h" 11 12 static void stub_free_priv_and_urb(struct stub_priv *priv) 13 { 14 struct urb *urb = priv->urb; 15 16 kfree(urb->setup_packet); 17 urb->setup_packet = NULL; 18 19 kfree(urb->transfer_buffer); 20 urb->transfer_buffer = NULL; 21 22 list_del(&priv->list); 23 kmem_cache_free(stub_priv_cache, priv); 24 usb_free_urb(urb); 25 } 26 27 /* be in spin_lock_irqsave(&sdev->priv_lock, flags) */ 28 void stub_enqueue_ret_unlink(struct stub_device *sdev, __u32 seqnum, 29 __u32 status) 30 { 31 struct stub_unlink *unlink; 32 33 unlink = kzalloc(sizeof(struct stub_unlink), GFP_ATOMIC); 34 if (!unlink) { 35 usbip_event_add(&sdev->ud, VDEV_EVENT_ERROR_MALLOC); 36 return; 37 } 38 39 unlink->seqnum = seqnum; 40 unlink->status = status; 41 42 list_add_tail(&unlink->list, &sdev->unlink_tx); 43 } 44 45 /** 46 * stub_complete - completion handler of a usbip urb 47 * @urb: pointer to the urb completed 48 * 49 * When a urb has completed, the USB core driver calls this function mostly in 50 * the interrupt context. To return the result of a urb, the completed urb is 51 * linked to the pending list of returning. 52 * 53 */ 54 void stub_complete(struct urb *urb) 55 { 56 struct stub_priv *priv = (struct stub_priv *) urb->context; 57 struct stub_device *sdev = priv->sdev; 58 unsigned long flags; 59 60 usbip_dbg_stub_tx("complete! status %d\n", urb->status); 61 62 switch (urb->status) { 63 case 0: 64 /* OK */ 65 break; 66 case -ENOENT: 67 dev_info(&urb->dev->dev, 68 "stopped by a call to usb_kill_urb() because of cleaning up a virtual connection\n"); 69 return; 70 case -ECONNRESET: 71 dev_info(&urb->dev->dev, 72 "unlinked by a call to usb_unlink_urb()\n"); 73 break; 74 case -EPIPE: 75 dev_info(&urb->dev->dev, "endpoint %d is stalled\n", 76 usb_pipeendpoint(urb->pipe)); 77 break; 78 case -ESHUTDOWN: 79 dev_info(&urb->dev->dev, "device removed?\n"); 80 break; 81 default: 82 dev_info(&urb->dev->dev, 83 "urb completion with non-zero status %d\n", 84 urb->status); 85 break; 86 } 87 88 /* link a urb to the queue of tx. */ 89 spin_lock_irqsave(&sdev->priv_lock, flags); 90 if (sdev->ud.tcp_socket == NULL) { 91 usbip_dbg_stub_tx("ignore urb for closed connection %p", urb); 92 /* It will be freed in stub_device_cleanup_urbs(). */ 93 } else if (priv->unlinking) { 94 stub_enqueue_ret_unlink(sdev, priv->seqnum, urb->status); 95 stub_free_priv_and_urb(priv); 96 } else { 97 list_move_tail(&priv->list, &sdev->priv_tx); 98 } 99 spin_unlock_irqrestore(&sdev->priv_lock, flags); 100 101 /* wake up tx_thread */ 102 wake_up(&sdev->tx_waitq); 103 } 104 105 static inline void setup_base_pdu(struct usbip_header_basic *base, 106 __u32 command, __u32 seqnum) 107 { 108 base->command = command; 109 base->seqnum = seqnum; 110 base->devid = 0; 111 base->ep = 0; 112 base->direction = 0; 113 } 114 115 static void setup_ret_submit_pdu(struct usbip_header *rpdu, struct urb *urb) 116 { 117 struct stub_priv *priv = (struct stub_priv *) urb->context; 118 119 setup_base_pdu(&rpdu->base, USBIP_RET_SUBMIT, priv->seqnum); 120 usbip_pack_pdu(rpdu, urb, USBIP_RET_SUBMIT, 1); 121 } 122 123 static void setup_ret_unlink_pdu(struct usbip_header *rpdu, 124 struct stub_unlink *unlink) 125 { 126 setup_base_pdu(&rpdu->base, USBIP_RET_UNLINK, unlink->seqnum); 127 rpdu->u.ret_unlink.status = unlink->status; 128 } 129 130 static struct stub_priv *dequeue_from_priv_tx(struct stub_device *sdev) 131 { 132 unsigned long flags; 133 struct stub_priv *priv, *tmp; 134 135 spin_lock_irqsave(&sdev->priv_lock, flags); 136 137 list_for_each_entry_safe(priv, tmp, &sdev->priv_tx, list) { 138 list_move_tail(&priv->list, &sdev->priv_free); 139 spin_unlock_irqrestore(&sdev->priv_lock, flags); 140 return priv; 141 } 142 143 spin_unlock_irqrestore(&sdev->priv_lock, flags); 144 145 return NULL; 146 } 147 148 static int stub_send_ret_submit(struct stub_device *sdev) 149 { 150 unsigned long flags; 151 struct stub_priv *priv, *tmp; 152 153 struct msghdr msg; 154 size_t txsize; 155 156 size_t total_size = 0; 157 158 while ((priv = dequeue_from_priv_tx(sdev)) != NULL) { 159 int ret; 160 struct urb *urb = priv->urb; 161 struct usbip_header pdu_header; 162 struct usbip_iso_packet_descriptor *iso_buffer = NULL; 163 struct kvec *iov = NULL; 164 int iovnum = 0; 165 166 txsize = 0; 167 memset(&pdu_header, 0, sizeof(pdu_header)); 168 memset(&msg, 0, sizeof(msg)); 169 170 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) 171 iovnum = 2 + urb->number_of_packets; 172 else 173 iovnum = 2; 174 175 iov = kcalloc(iovnum, sizeof(struct kvec), GFP_KERNEL); 176 177 if (!iov) { 178 usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_MALLOC); 179 return -1; 180 } 181 182 iovnum = 0; 183 184 /* 1. setup usbip_header */ 185 setup_ret_submit_pdu(&pdu_header, urb); 186 usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n", 187 pdu_header.base.seqnum, urb); 188 usbip_header_correct_endian(&pdu_header, 1); 189 190 iov[iovnum].iov_base = &pdu_header; 191 iov[iovnum].iov_len = sizeof(pdu_header); 192 iovnum++; 193 txsize += sizeof(pdu_header); 194 195 /* 2. setup transfer buffer */ 196 if (usb_pipein(urb->pipe) && 197 usb_pipetype(urb->pipe) != PIPE_ISOCHRONOUS && 198 urb->actual_length > 0) { 199 iov[iovnum].iov_base = urb->transfer_buffer; 200 iov[iovnum].iov_len = urb->actual_length; 201 iovnum++; 202 txsize += urb->actual_length; 203 } else if (usb_pipein(urb->pipe) && 204 usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 205 /* 206 * For isochronous packets: actual length is the sum of 207 * the actual length of the individual, packets, but as 208 * the packet offsets are not changed there will be 209 * padding between the packets. To optimally use the 210 * bandwidth the padding is not transmitted. 211 */ 212 213 int i; 214 215 for (i = 0; i < urb->number_of_packets; i++) { 216 iov[iovnum].iov_base = urb->transfer_buffer + 217 urb->iso_frame_desc[i].offset; 218 iov[iovnum].iov_len = 219 urb->iso_frame_desc[i].actual_length; 220 iovnum++; 221 txsize += urb->iso_frame_desc[i].actual_length; 222 } 223 224 if (txsize != sizeof(pdu_header) + urb->actual_length) { 225 dev_err(&sdev->udev->dev, 226 "actual length of urb %d does not match iso packet sizes %zu\n", 227 urb->actual_length, 228 txsize-sizeof(pdu_header)); 229 kfree(iov); 230 usbip_event_add(&sdev->ud, 231 SDEV_EVENT_ERROR_TCP); 232 return -1; 233 } 234 } 235 236 /* 3. setup iso_packet_descriptor */ 237 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { 238 ssize_t len = 0; 239 240 iso_buffer = usbip_alloc_iso_desc_pdu(urb, &len); 241 if (!iso_buffer) { 242 usbip_event_add(&sdev->ud, 243 SDEV_EVENT_ERROR_MALLOC); 244 kfree(iov); 245 return -1; 246 } 247 248 iov[iovnum].iov_base = iso_buffer; 249 iov[iovnum].iov_len = len; 250 txsize += len; 251 iovnum++; 252 } 253 254 ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg, 255 iov, iovnum, txsize); 256 if (ret != txsize) { 257 dev_err(&sdev->udev->dev, 258 "sendmsg failed!, retval %d for %zd\n", 259 ret, txsize); 260 kfree(iov); 261 kfree(iso_buffer); 262 usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP); 263 return -1; 264 } 265 266 kfree(iov); 267 kfree(iso_buffer); 268 269 total_size += txsize; 270 } 271 272 spin_lock_irqsave(&sdev->priv_lock, flags); 273 list_for_each_entry_safe(priv, tmp, &sdev->priv_free, list) { 274 stub_free_priv_and_urb(priv); 275 } 276 spin_unlock_irqrestore(&sdev->priv_lock, flags); 277 278 return total_size; 279 } 280 281 static struct stub_unlink *dequeue_from_unlink_tx(struct stub_device *sdev) 282 { 283 unsigned long flags; 284 struct stub_unlink *unlink, *tmp; 285 286 spin_lock_irqsave(&sdev->priv_lock, flags); 287 288 list_for_each_entry_safe(unlink, tmp, &sdev->unlink_tx, list) { 289 list_move_tail(&unlink->list, &sdev->unlink_free); 290 spin_unlock_irqrestore(&sdev->priv_lock, flags); 291 return unlink; 292 } 293 294 spin_unlock_irqrestore(&sdev->priv_lock, flags); 295 296 return NULL; 297 } 298 299 static int stub_send_ret_unlink(struct stub_device *sdev) 300 { 301 unsigned long flags; 302 struct stub_unlink *unlink, *tmp; 303 304 struct msghdr msg; 305 struct kvec iov[1]; 306 size_t txsize; 307 308 size_t total_size = 0; 309 310 while ((unlink = dequeue_from_unlink_tx(sdev)) != NULL) { 311 int ret; 312 struct usbip_header pdu_header; 313 314 txsize = 0; 315 memset(&pdu_header, 0, sizeof(pdu_header)); 316 memset(&msg, 0, sizeof(msg)); 317 memset(&iov, 0, sizeof(iov)); 318 319 usbip_dbg_stub_tx("setup ret unlink %lu\n", unlink->seqnum); 320 321 /* 1. setup usbip_header */ 322 setup_ret_unlink_pdu(&pdu_header, unlink); 323 usbip_header_correct_endian(&pdu_header, 1); 324 325 iov[0].iov_base = &pdu_header; 326 iov[0].iov_len = sizeof(pdu_header); 327 txsize += sizeof(pdu_header); 328 329 ret = kernel_sendmsg(sdev->ud.tcp_socket, &msg, iov, 330 1, txsize); 331 if (ret != txsize) { 332 dev_err(&sdev->udev->dev, 333 "sendmsg failed!, retval %d for %zd\n", 334 ret, txsize); 335 usbip_event_add(&sdev->ud, SDEV_EVENT_ERROR_TCP); 336 return -1; 337 } 338 339 usbip_dbg_stub_tx("send txdata\n"); 340 total_size += txsize; 341 } 342 343 spin_lock_irqsave(&sdev->priv_lock, flags); 344 345 list_for_each_entry_safe(unlink, tmp, &sdev->unlink_free, list) { 346 list_del(&unlink->list); 347 kfree(unlink); 348 } 349 350 spin_unlock_irqrestore(&sdev->priv_lock, flags); 351 352 return total_size; 353 } 354 355 int stub_tx_loop(void *data) 356 { 357 struct usbip_device *ud = data; 358 struct stub_device *sdev = container_of(ud, struct stub_device, ud); 359 360 while (!kthread_should_stop()) { 361 if (usbip_event_happened(ud)) 362 break; 363 364 /* 365 * send_ret_submit comes earlier than send_ret_unlink. stub_rx 366 * looks at only priv_init queue. If the completion of a URB is 367 * earlier than the receive of CMD_UNLINK, priv is moved to 368 * priv_tx queue and stub_rx does not find the target priv. In 369 * this case, vhci_rx receives the result of the submit request 370 * and then receives the result of the unlink request. The 371 * result of the submit is given back to the usbcore as the 372 * completion of the unlink request. The request of the 373 * unlink is ignored. This is ok because a driver who calls 374 * usb_unlink_urb() understands the unlink was too late by 375 * getting the status of the given-backed URB which has the 376 * status of usb_submit_urb(). 377 */ 378 if (stub_send_ret_submit(sdev) < 0) 379 break; 380 381 if (stub_send_ret_unlink(sdev) < 0) 382 break; 383 384 wait_event_interruptible(sdev->tx_waitq, 385 (!list_empty(&sdev->priv_tx) || 386 !list_empty(&sdev->unlink_tx) || 387 kthread_should_stop())); 388 } 389 390 return 0; 391 } 392