1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2015 Karol Kosik <karo9@interia.eu> 4 * Copyright (C) 2015-2016 Samsung Electronics 5 * Igor Kotrasinski <i.kotrasinsk@samsung.com> 6 */ 7 8 #include <net/sock.h> 9 #include <linux/list.h> 10 #include <linux/kthread.h> 11 12 #include "usbip_common.h" 13 #include "vudc.h" 14 15 static inline void setup_base_pdu(struct usbip_header_basic *base, 16 __u32 command, __u32 seqnum) 17 { 18 base->command = command; 19 base->seqnum = seqnum; 20 base->devid = 0; 21 base->ep = 0; 22 base->direction = 0; 23 } 24 25 static void setup_ret_submit_pdu(struct usbip_header *rpdu, struct urbp *urb_p) 26 { 27 setup_base_pdu(&rpdu->base, USBIP_RET_SUBMIT, urb_p->seqnum); 28 usbip_pack_pdu(rpdu, urb_p->urb, USBIP_RET_SUBMIT, 1); 29 } 30 31 static void setup_ret_unlink_pdu(struct usbip_header *rpdu, 32 struct v_unlink *unlink) 33 { 34 setup_base_pdu(&rpdu->base, USBIP_RET_UNLINK, unlink->seqnum); 35 rpdu->u.ret_unlink.status = unlink->status; 36 } 37 38 static int v_send_ret_unlink(struct vudc *udc, struct v_unlink *unlink) 39 { 40 struct msghdr msg; 41 struct kvec iov[1]; 42 size_t txsize; 43 44 int ret; 45 struct usbip_header pdu_header; 46 47 txsize = 0; 48 memset(&pdu_header, 0, sizeof(pdu_header)); 49 memset(&msg, 0, sizeof(msg)); 50 memset(&iov, 0, sizeof(iov)); 51 52 /* 1. setup usbip_header */ 53 setup_ret_unlink_pdu(&pdu_header, unlink); 54 usbip_header_correct_endian(&pdu_header, 1); 55 56 iov[0].iov_base = &pdu_header; 57 iov[0].iov_len = sizeof(pdu_header); 58 txsize += sizeof(pdu_header); 59 60 ret = kernel_sendmsg(udc->ud.tcp_socket, &msg, iov, 61 1, txsize); 62 if (ret != txsize) { 63 usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP); 64 if (ret >= 0) 65 return -EPIPE; 66 return ret; 67 } 68 kfree(unlink); 69 70 return txsize; 71 } 72 73 static int v_send_ret_submit(struct vudc *udc, struct urbp *urb_p) 74 { 75 struct urb *urb = urb_p->urb; 76 struct usbip_header pdu_header; 77 struct usbip_iso_packet_descriptor *iso_buffer = NULL; 78 struct kvec *iov = NULL; 79 int iovnum = 0; 80 int ret = 0; 81 size_t txsize; 82 struct msghdr msg; 83 84 txsize = 0; 85 memset(&pdu_header, 0, sizeof(pdu_header)); 86 memset(&msg, 0, sizeof(msg)); 87 88 if (urb_p->type == USB_ENDPOINT_XFER_ISOC) 89 iovnum = 2 + urb->number_of_packets; 90 else 91 iovnum = 2; 92 93 iov = kcalloc(iovnum, sizeof(*iov), GFP_KERNEL); 94 if (!iov) { 95 usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC); 96 ret = -ENOMEM; 97 goto out; 98 } 99 iovnum = 0; 100 101 /* 1. setup usbip_header */ 102 setup_ret_submit_pdu(&pdu_header, urb_p); 103 usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n", 104 pdu_header.base.seqnum, urb); 105 usbip_header_correct_endian(&pdu_header, 1); 106 107 iov[iovnum].iov_base = &pdu_header; 108 iov[iovnum].iov_len = sizeof(pdu_header); 109 iovnum++; 110 txsize += sizeof(pdu_header); 111 112 /* 2. setup transfer buffer */ 113 if (urb_p->type != USB_ENDPOINT_XFER_ISOC && 114 usb_pipein(urb->pipe) && urb->actual_length > 0) { 115 iov[iovnum].iov_base = urb->transfer_buffer; 116 iov[iovnum].iov_len = urb->actual_length; 117 iovnum++; 118 txsize += urb->actual_length; 119 } else if (urb_p->type == USB_ENDPOINT_XFER_ISOC && 120 usb_pipein(urb->pipe)) { 121 /* FIXME - copypasted from stub_tx, refactor */ 122 int i; 123 124 for (i = 0; i < urb->number_of_packets; i++) { 125 iov[iovnum].iov_base = urb->transfer_buffer + 126 urb->iso_frame_desc[i].offset; 127 iov[iovnum].iov_len = 128 urb->iso_frame_desc[i].actual_length; 129 iovnum++; 130 txsize += urb->iso_frame_desc[i].actual_length; 131 } 132 133 if (txsize != sizeof(pdu_header) + urb->actual_length) { 134 usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP); 135 ret = -EPIPE; 136 goto out; 137 } 138 } 139 /* else - no buffer to send */ 140 141 /* 3. setup iso_packet_descriptor */ 142 if (urb_p->type == USB_ENDPOINT_XFER_ISOC) { 143 ssize_t len = 0; 144 145 iso_buffer = usbip_alloc_iso_desc_pdu(urb, &len); 146 if (!iso_buffer) { 147 usbip_event_add(&udc->ud, 148 VUDC_EVENT_ERROR_MALLOC); 149 ret = -ENOMEM; 150 goto out; 151 } 152 153 iov[iovnum].iov_base = iso_buffer; 154 iov[iovnum].iov_len = len; 155 txsize += len; 156 iovnum++; 157 } 158 159 ret = kernel_sendmsg(udc->ud.tcp_socket, &msg, 160 iov, iovnum, txsize); 161 if (ret != txsize) { 162 usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP); 163 if (ret >= 0) 164 ret = -EPIPE; 165 goto out; 166 } 167 168 out: 169 kfree(iov); 170 kfree(iso_buffer); 171 free_urbp_and_urb(urb_p); 172 if (ret < 0) 173 return ret; 174 return txsize; 175 } 176 177 static int v_send_ret(struct vudc *udc) 178 { 179 unsigned long flags; 180 struct tx_item *txi; 181 size_t total_size = 0; 182 int ret = 0; 183 184 spin_lock_irqsave(&udc->lock_tx, flags); 185 while (!list_empty(&udc->tx_queue)) { 186 txi = list_first_entry(&udc->tx_queue, struct tx_item, 187 tx_entry); 188 list_del(&txi->tx_entry); 189 spin_unlock_irqrestore(&udc->lock_tx, flags); 190 191 switch (txi->type) { 192 case TX_SUBMIT: 193 ret = v_send_ret_submit(udc, txi->s); 194 break; 195 case TX_UNLINK: 196 ret = v_send_ret_unlink(udc, txi->u); 197 break; 198 } 199 kfree(txi); 200 201 if (ret < 0) 202 return ret; 203 204 total_size += ret; 205 206 spin_lock_irqsave(&udc->lock_tx, flags); 207 } 208 209 spin_unlock_irqrestore(&udc->lock_tx, flags); 210 return total_size; 211 } 212 213 214 int v_tx_loop(void *data) 215 { 216 struct usbip_device *ud = (struct usbip_device *) data; 217 struct vudc *udc = container_of(ud, struct vudc, ud); 218 int ret; 219 220 while (!kthread_should_stop()) { 221 if (usbip_event_happened(&udc->ud)) 222 break; 223 ret = v_send_ret(udc); 224 if (ret < 0) { 225 pr_warn("v_tx exit with error %d", ret); 226 break; 227 } 228 wait_event_interruptible(udc->tx_waitq, 229 (!list_empty(&udc->tx_queue) || 230 kthread_should_stop())); 231 } 232 233 return 0; 234 } 235 236 /* called with spinlocks held */ 237 void v_enqueue_ret_unlink(struct vudc *udc, __u32 seqnum, __u32 status) 238 { 239 struct tx_item *txi; 240 struct v_unlink *unlink; 241 242 txi = kzalloc(sizeof(*txi), GFP_ATOMIC); 243 if (!txi) { 244 usbip_event_add(&udc->ud, VDEV_EVENT_ERROR_MALLOC); 245 return; 246 } 247 unlink = kzalloc(sizeof(*unlink), GFP_ATOMIC); 248 if (!unlink) { 249 kfree(txi); 250 usbip_event_add(&udc->ud, VDEV_EVENT_ERROR_MALLOC); 251 return; 252 } 253 254 unlink->seqnum = seqnum; 255 unlink->status = status; 256 txi->type = TX_UNLINK; 257 txi->u = unlink; 258 259 list_add_tail(&txi->tx_entry, &udc->tx_queue); 260 } 261 262 /* called with spinlocks held */ 263 void v_enqueue_ret_submit(struct vudc *udc, struct urbp *urb_p) 264 { 265 struct tx_item *txi; 266 267 txi = kzalloc(sizeof(*txi), GFP_ATOMIC); 268 if (!txi) { 269 usbip_event_add(&udc->ud, VDEV_EVENT_ERROR_MALLOC); 270 return; 271 } 272 273 txi->type = TX_SUBMIT; 274 txi->s = urb_p; 275 276 list_add_tail(&txi->tx_entry, &udc->tx_queue); 277 } 278