Lines Matching refs:q

35 static inline bool tap_legacy_is_little_endian(struct tap_queue *q)  in tap_legacy_is_little_endian()  argument
37 return q->flags & TAP_VNET_BE ? false : in tap_legacy_is_little_endian()
41 static long tap_get_vnet_be(struct tap_queue *q, int __user *sp) in tap_get_vnet_be() argument
43 int s = !!(q->flags & TAP_VNET_BE); in tap_get_vnet_be()
51 static long tap_set_vnet_be(struct tap_queue *q, int __user *sp) in tap_set_vnet_be() argument
59 q->flags |= TAP_VNET_BE; in tap_set_vnet_be()
61 q->flags &= ~TAP_VNET_BE; in tap_set_vnet_be()
66 static inline bool tap_legacy_is_little_endian(struct tap_queue *q) in tap_legacy_is_little_endian() argument
71 static long tap_get_vnet_be(struct tap_queue *q, int __user *argp) in tap_get_vnet_be() argument
76 static long tap_set_vnet_be(struct tap_queue *q, int __user *argp) in tap_set_vnet_be() argument
82 static inline bool tap_is_little_endian(struct tap_queue *q) in tap_is_little_endian() argument
84 return q->flags & TAP_VNET_LE || in tap_is_little_endian()
85 tap_legacy_is_little_endian(q); in tap_is_little_endian()
88 static inline u16 tap16_to_cpu(struct tap_queue *q, __virtio16 val) in tap16_to_cpu() argument
90 return __virtio16_to_cpu(tap_is_little_endian(q), val); in tap16_to_cpu()
93 static inline __virtio16 cpu_to_tap16(struct tap_queue *q, u16 val) in cpu_to_tap16() argument
95 return __cpu_to_virtio16(tap_is_little_endian(q), val); in cpu_to_tap16()
147 struct tap_queue *q) in tap_enable_queue() argument
153 if (q->enabled) in tap_enable_queue()
157 rcu_assign_pointer(tap->taps[tap->numvtaps], q); in tap_enable_queue()
158 q->queue_index = tap->numvtaps; in tap_enable_queue()
159 q->enabled = true; in tap_enable_queue()
168 struct tap_queue *q) in tap_set_queue() argument
173 rcu_assign_pointer(q->tap, tap); in tap_set_queue()
174 rcu_assign_pointer(tap->taps[tap->numvtaps], q); in tap_set_queue()
175 sock_hold(&q->sk); in tap_set_queue()
177 q->file = file; in tap_set_queue()
178 q->queue_index = tap->numvtaps; in tap_set_queue()
179 q->enabled = true; in tap_set_queue()
180 file->private_data = q; in tap_set_queue()
181 list_add_tail(&q->next, &tap->queue_list); in tap_set_queue()
189 static int tap_disable_queue(struct tap_queue *q) in tap_disable_queue() argument
195 if (!q->enabled) in tap_disable_queue()
198 tap = rtnl_dereference(q->tap); in tap_disable_queue()
201 int index = q->queue_index; in tap_disable_queue()
208 q->enabled = false; in tap_disable_queue()
224 static void tap_put_queue(struct tap_queue *q) in tap_put_queue() argument
229 tap = rtnl_dereference(q->tap); in tap_put_queue()
232 if (q->enabled) in tap_put_queue()
233 BUG_ON(tap_disable_queue(q)); in tap_put_queue()
236 RCU_INIT_POINTER(q->tap, NULL); in tap_put_queue()
237 sock_put(&q->sk); in tap_put_queue()
238 list_del_init(&q->next); in tap_put_queue()
244 sock_put(&q->sk); in tap_put_queue()
302 struct tap_queue *q, *tmp; in tap_del_queues() local
305 list_for_each_entry_safe(q, tmp, &tap->queue_list, next) { in tap_del_queues()
306 list_del_init(&q->next); in tap_del_queues()
307 RCU_INIT_POINTER(q->tap, NULL); in tap_del_queues()
308 if (q->enabled) in tap_del_queues()
311 sock_put(&q->sk); in tap_del_queues()
325 struct tap_queue *q; in tap_handle_frame() local
333 q = tap_get_queue(tap, skb); in tap_handle_frame()
334 if (!q) in tap_handle_frame()
343 if (q->flags & IFF_VNET_HDR) in tap_handle_frame()
355 if (ptr_ring_produce(&q->ring, skb)) { in tap_handle_frame()
365 if (ptr_ring_produce(&q->ring, skb)) { in tap_handle_frame()
384 if (ptr_ring_produce(&q->ring, skb)) { in tap_handle_frame()
391 wake_up_interruptible_poll(sk_sleep(&q->sk), EPOLLIN | EPOLLRDNORM | EPOLLRDBAND); in tap_handle_frame()
506 struct tap_queue *q = container_of(sk, struct tap_queue, sk); in tap_sock_destruct() local
508 ptr_ring_cleanup(&q->ring, __skb_array_destroy_skb); in tap_sock_destruct()
515 struct tap_queue *q; in tap_open() local
524 q = (struct tap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, in tap_open()
526 if (!q) in tap_open()
528 if (ptr_ring_init(&q->ring, tap->dev->tx_queue_len, GFP_KERNEL)) { in tap_open()
529 sk_free(&q->sk); in tap_open()
533 init_waitqueue_head(&q->sock.wq.wait); in tap_open()
534 q->sock.type = SOCK_RAW; in tap_open()
535 q->sock.state = SS_CONNECTED; in tap_open()
536 q->sock.file = file; in tap_open()
537 q->sock.ops = &tap_socket_ops; in tap_open()
538 sock_init_data_uid(&q->sock, &q->sk, current_fsuid()); in tap_open()
539 q->sk.sk_write_space = tap_sock_write_space; in tap_open()
540 q->sk.sk_destruct = tap_sock_destruct; in tap_open()
541 q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP; in tap_open()
542 q->vnet_hdr_sz = sizeof(struct virtio_net_hdr); in tap_open()
552 sock_set_flag(&q->sk, SOCK_ZEROCOPY); in tap_open()
554 err = tap_set_queue(tap, file, q); in tap_open()
569 sock_put(&q->sk); in tap_open()
580 struct tap_queue *q = file->private_data; in tap_release() local
581 tap_put_queue(q); in tap_release()
587 struct tap_queue *q = file->private_data; in tap_poll() local
590 if (!q) in tap_poll()
594 poll_wait(file, &q->sock.wq.wait, wait); in tap_poll()
596 if (!ptr_ring_empty(&q->ring)) in tap_poll()
599 if (sock_writeable(&q->sk) || in tap_poll()
600 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) && in tap_poll()
601 sock_writeable(&q->sk))) in tap_poll()
637 static ssize_t tap_get_user(struct tap_queue *q, void *msg_control, in tap_get_user() argument
654 if (q->flags & IFF_VNET_HDR) { in tap_get_user()
655 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); in tap_get_user()
667 tap16_to_cpu(q, vnet_hdr.csum_start) + in tap_get_user()
668 tap16_to_cpu(q, vnet_hdr.csum_offset) + 2 > in tap_get_user()
669 tap16_to_cpu(q, vnet_hdr.hdr_len)) in tap_get_user()
670 vnet_hdr.hdr_len = cpu_to_tap16(q, in tap_get_user()
671 tap16_to_cpu(q, vnet_hdr.csum_start) + in tap_get_user()
672 tap16_to_cpu(q, vnet_hdr.csum_offset) + 2); in tap_get_user()
674 if (tap16_to_cpu(q, vnet_hdr.hdr_len) > len) in tap_get_user()
682 if (msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { in tap_get_user()
686 tap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN; in tap_get_user()
700 linear = tap16_to_cpu(q, vnet_hdr.hdr_len); in tap_get_user()
707 skb = tap_alloc_skb(&q->sk, TAP_RESERVE, copylen, in tap_get_user()
727 tap = rcu_dereference(q->tap); in tap_get_user()
737 tap_is_little_endian(q)); in tap_get_user()
769 tap = rcu_dereference(q->tap); in tap_get_user()
780 struct tap_queue *q = file->private_data; in tap_write_iter() local
786 return tap_get_user(q, NULL, from, noblock); in tap_write_iter()
790 static ssize_t tap_put_user(struct tap_queue *q, in tap_put_user() argument
799 if (q->flags & IFF_VNET_HDR) { in tap_put_user()
803 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); in tap_put_user()
808 tap_is_little_endian(q), true, in tap_put_user()
848 static ssize_t tap_do_read(struct tap_queue *q, in tap_do_read() argument
865 prepare_to_wait(sk_sleep(&q->sk), &wait, in tap_do_read()
869 skb = ptr_ring_consume(&q->ring); in tap_do_read()
884 finish_wait(sk_sleep(&q->sk), &wait); in tap_do_read()
888 ret = tap_put_user(q, skb, to); in tap_do_read()
900 struct tap_queue *q = file->private_data; in tap_read_iter() local
907 ret = tap_do_read(q, to, noblock, NULL); in tap_read_iter()
914 static struct tap_dev *tap_get_tap_dev(struct tap_queue *q) in tap_get_tap_dev() argument
919 tap = rtnl_dereference(q->tap); in tap_get_tap_dev()
933 struct tap_queue *q = file->private_data; in tap_ioctl_set_queue() local
937 tap = tap_get_tap_dev(q); in tap_ioctl_set_queue()
942 ret = tap_enable_queue(tap, file, q); in tap_ioctl_set_queue()
944 ret = tap_disable_queue(q); in tap_ioctl_set_queue()
952 static int set_offload(struct tap_queue *q, unsigned long arg) in set_offload() argument
958 tap = rtnl_dereference(q->tap); in set_offload()
1011 struct tap_queue *q = file->private_data; in tap_ioctl() local
1032 q->flags = (q->flags & ~TAP_IFFEATURES) | u; in tap_ioctl()
1038 tap = tap_get_tap_dev(q); in tap_ioctl()
1045 u = q->flags; in tap_ioctl()
1072 q->sk.sk_sndbuf = s; in tap_ioctl()
1076 s = q->vnet_hdr_sz; in tap_ioctl()
1087 q->vnet_hdr_sz = s; in tap_ioctl()
1091 s = !!(q->flags & TAP_VNET_LE); in tap_ioctl()
1100 q->flags |= TAP_VNET_LE; in tap_ioctl()
1102 q->flags &= ~TAP_VNET_LE; in tap_ioctl()
1106 return tap_get_vnet_be(q, sp); in tap_ioctl()
1109 return tap_set_vnet_be(q, sp); in tap_ioctl()
1119 ret = set_offload(q, arg); in tap_ioctl()
1125 tap = tap_get_tap_dev(q); in tap_ioctl()
1143 tap = tap_get_tap_dev(q); in tap_ioctl()
1170 static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp) in tap_get_user_xdp() argument
1185 if (q->flags & IFF_VNET_HDR) in tap_get_user_xdp()
1186 vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); in tap_get_user_xdp()
1202 err = virtio_net_hdr_to_skb(skb, gso, tap_is_little_endian(q)); in tap_get_user_xdp()
1213 tap = rcu_dereference(q->tap); in tap_get_user_xdp()
1229 tap = rcu_dereference(q->tap); in tap_get_user_xdp()
1239 struct tap_queue *q = container_of(sock, struct tap_queue, sock); in tap_sendmsg() local
1248 tap_get_user_xdp(q, xdp); in tap_sendmsg()
1253 return tap_get_user(q, ctl ? ctl->ptr : NULL, &m->msg_iter, in tap_sendmsg()
1260 struct tap_queue *q = container_of(sock, struct tap_queue, sock); in tap_recvmsg() local
1267 ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb); in tap_recvmsg()
1277 struct tap_queue *q = container_of(sock, struct tap_queue, in tap_peek_len() local
1279 return PTR_RING_PEEK_CALL(&q->ring, __skb_array_len_with_tag); in tap_peek_len()
1295 struct tap_queue *q; in tap_get_socket() local
1298 q = file->private_data; in tap_get_socket()
1299 if (!q) in tap_get_socket()
1301 return &q->sock; in tap_get_socket()
1307 struct tap_queue *q; in tap_get_ptr_ring() local
1311 q = file->private_data; in tap_get_ptr_ring()
1312 if (!q) in tap_get_ptr_ring()
1314 return &q->ring; in tap_get_ptr_ring()
1321 struct tap_queue *q; in tap_queue_resize() local
1330 list_for_each_entry(q, &tap->queue_list, next) in tap_queue_resize()
1331 rings[i++] = &q->ring; in tap_queue_resize()