af_iucv.c (57f20448032158ad00b1e74f479515c689998be9) | af_iucv.c (f0703c80e5156406ad947cb67fe277725b48080f) |
---|---|
1/* 2 * linux/net/iucv/af_iucv.c 3 * 4 * IUCV protocol stack for Linux on zSeries 5 * 6 * Copyright 2006 IBM Corporation 7 * 8 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com> --- 210 unchanged lines hidden (view full) --- 219 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, 1); 220 if (!sk) 221 return NULL; 222 223 sock_init_data(sock, sk); 224 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q); 225 spin_lock_init(&iucv_sk(sk)->accept_q_lock); 226 skb_queue_head_init(&iucv_sk(sk)->send_skb_q); | 1/* 2 * linux/net/iucv/af_iucv.c 3 * 4 * IUCV protocol stack for Linux on zSeries 5 * 6 * Copyright 2006 IBM Corporation 7 * 8 * Author(s): Jennifer Hunt <jenhunt@us.ibm.com> --- 210 unchanged lines hidden (view full) --- 219 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, 1); 220 if (!sk) 221 return NULL; 222 223 sock_init_data(sock, sk); 224 INIT_LIST_HEAD(&iucv_sk(sk)->accept_q); 225 spin_lock_init(&iucv_sk(sk)->accept_q_lock); 226 skb_queue_head_init(&iucv_sk(sk)->send_skb_q); |
227 INIT_LIST_HEAD(&iucv_sk(sk)->message_q.list); 228 spin_lock_init(&iucv_sk(sk)->message_q.lock); |
|
227 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q); 228 iucv_sk(sk)->send_tag = 0; 229 230 sk->sk_destruct = iucv_sock_destruct; 231 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; 232 sk->sk_allocation = GFP_DMA; 233 234 sock_reset_flag(sk, SOCK_ZAPPED); --- 433 unchanged lines hidden (view full) --- 668 669fail: 670 kfree_skb(skb); 671out: 672 release_sock(sk); 673 return err; 674} 675 | 229 skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q); 230 iucv_sk(sk)->send_tag = 0; 231 232 sk->sk_destruct = iucv_sock_destruct; 233 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; 234 sk->sk_allocation = GFP_DMA; 235 236 sock_reset_flag(sk, SOCK_ZAPPED); --- 433 unchanged lines hidden (view full) --- 670 671fail: 672 kfree_skb(skb); 673out: 674 release_sock(sk); 675 return err; 676} 677 |
678static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len) 679{ 680 int dataleft, size, copied = 0; 681 struct sk_buff *nskb; 682 683 dataleft = len; 684 while (dataleft) { 685 if (dataleft >= sk->sk_rcvbuf / 4) 686 size = sk->sk_rcvbuf / 4; 687 else 688 size = dataleft; 689 690 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA); 691 if (!nskb) 692 return -ENOMEM; 693 694 memcpy(nskb->data, skb->data + copied, size); 695 copied += size; 696 dataleft -= size; 697 698 skb_reset_transport_header(nskb); 699 skb_reset_network_header(nskb); 700 nskb->len = size; 701 702 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb); 703 } 704 705 return 0; 706} 707 708static void iucv_process_message(struct sock *sk, struct sk_buff *skb, 709 struct iucv_path *path, 710 struct iucv_message *msg) 711{ 712 int rc; 713 714 if (msg->flags & IPRMDATA) { 715 skb->data = NULL; 716 skb->len = 0; 717 } else { 718 rc = iucv_message_receive(path, msg, 0, skb->data, 719 msg->length, NULL); 720 if (rc) { 721 kfree_skb(skb); 722 return; 723 } 724 if (skb->truesize >= sk->sk_rcvbuf / 4) { 725 rc = iucv_fragment_skb(sk, skb, msg->length); 726 kfree_skb(skb); 727 skb = NULL; 728 if (rc) { 729 iucv_path_sever(path, NULL); 730 return; 731 } 732 skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); 733 } else { 734 skb_reset_transport_header(skb); 735 skb_reset_network_header(skb); 736 skb->len = msg->length; 737 } 738 } 739 740 if (sock_queue_rcv_skb(sk, skb)) 741 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); 742} 743 744static void iucv_process_message_q(struct sock *sk) 745{ 746 struct iucv_sock *iucv = iucv_sk(sk); 747 struct sk_buff *skb; 748 struct sock_msg_q *p, *n; 749 750 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) { 751 skb = alloc_skb(p->msg.length, GFP_ATOMIC | GFP_DMA); 752 if (!skb) 753 break; 754 iucv_process_message(sk, skb, p->path, &p->msg); 755 list_del(&p->list); 756 kfree(p); 757 if (!skb_queue_empty(&iucv->backlog_skb_q)) 758 break; 759 } 760} 761 |
|
676static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, 677 struct msghdr *msg, size_t len, int flags) 678{ 679 int noblock = flags & MSG_DONTWAIT; 680 struct sock *sk = sock->sk; 681 struct iucv_sock *iucv = iucv_sk(sk); 682 int target, copied = 0; 683 struct sk_buff *skb, *rskb, *cskb; 684 int err = 0; 685 686 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) && | 762static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, 763 struct msghdr *msg, size_t len, int flags) 764{ 765 int noblock = flags & MSG_DONTWAIT; 766 struct sock *sk = sock->sk; 767 struct iucv_sock *iucv = iucv_sk(sk); 768 int target, copied = 0; 769 struct sk_buff *skb, *rskb, *cskb; 770 int err = 0; 771 772 if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) && |
687 skb_queue_empty(&iucv->backlog_skb_q) && 688 skb_queue_empty(&sk->sk_receive_queue)) | 773 skb_queue_empty(&iucv->backlog_skb_q) && 774 skb_queue_empty(&sk->sk_receive_queue) && 775 list_empty(&iucv->message_q.list)) |
689 return 0; 690 691 if (flags & (MSG_OOB)) 692 return -EOPNOTSUPP; 693 694 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 695 696 skb = skb_recv_datagram(sk, flags, noblock, &err); --- 22 unchanged lines hidden (view full) --- 719 if (skb->len) { 720 skb_queue_head(&sk->sk_receive_queue, skb); 721 goto done; 722 } 723 724 kfree_skb(skb); 725 726 /* Queue backlog skbs */ | 776 return 0; 777 778 if (flags & (MSG_OOB)) 779 return -EOPNOTSUPP; 780 781 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); 782 783 skb = skb_recv_datagram(sk, flags, noblock, &err); --- 22 unchanged lines hidden (view full) --- 806 if (skb->len) { 807 skb_queue_head(&sk->sk_receive_queue, skb); 808 goto done; 809 } 810 811 kfree_skb(skb); 812 813 /* Queue backlog skbs */ |
727 rskb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); | 814 rskb = skb_dequeue(&iucv->backlog_skb_q); |
728 while (rskb) { 729 if (sock_queue_rcv_skb(sk, rskb)) { | 815 while (rskb) { 816 if (sock_queue_rcv_skb(sk, rskb)) { |
730 skb_queue_head(&iucv_sk(sk)->backlog_skb_q, | 817 skb_queue_head(&iucv->backlog_skb_q, |
731 rskb); 732 break; 733 } else { | 818 rskb); 819 break; 820 } else { |
734 rskb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); | 821 rskb = skb_dequeue(&iucv->backlog_skb_q); |
735 } 736 } | 822 } 823 } |
824 if (skb_queue_empty(&iucv->backlog_skb_q)) { 825 spin_lock_bh(&iucv->message_q.lock); 826 if (!list_empty(&iucv->message_q.list)) 827 iucv_process_message_q(sk); 828 spin_unlock_bh(&iucv->message_q.lock); 829 } 830 |
|
737 } else 738 skb_queue_head(&sk->sk_receive_queue, skb); 739 740done: 741 return err ? : copied; 742} 743 744static inline unsigned int iucv_accept_poll(struct sock *parent) --- 225 unchanged lines hidden (view full) --- 970static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16]) 971{ 972 struct sock *sk = path->private; 973 974 sk->sk_state = IUCV_CONNECTED; 975 sk->sk_state_change(sk); 976} 977 | 831 } else 832 skb_queue_head(&sk->sk_receive_queue, skb); 833 834done: 835 return err ? : copied; 836} 837 838static inline unsigned int iucv_accept_poll(struct sock *parent) --- 225 unchanged lines hidden (view full) --- 1064static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16]) 1065{ 1066 struct sock *sk = path->private; 1067 1068 sk->sk_state = IUCV_CONNECTED; 1069 sk->sk_state_change(sk); 1070} 1071 |
978static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len, 979 struct sk_buff_head *fragmented_skb_q) 980{ 981 int dataleft, size, copied = 0; 982 struct sk_buff *nskb; 983 984 dataleft = len; 985 while (dataleft) { 986 if (dataleft >= sk->sk_rcvbuf / 4) 987 size = sk->sk_rcvbuf / 4; 988 else 989 size = dataleft; 990 991 nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA); 992 if (!nskb) 993 return -ENOMEM; 994 995 memcpy(nskb->data, skb->data + copied, size); 996 copied += size; 997 dataleft -= size; 998 999 skb_reset_transport_header(nskb); 1000 skb_reset_network_header(nskb); 1001 nskb->len = size; 1002 1003 skb_queue_tail(fragmented_skb_q, nskb); 1004 } 1005 1006 return 0; 1007} 1008 | |
1009static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) 1010{ 1011 struct sock *sk = path->private; 1012 struct iucv_sock *iucv = iucv_sk(sk); | 1072static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) 1073{ 1074 struct sock *sk = path->private; 1075 struct iucv_sock *iucv = iucv_sk(sk); |
1013 struct sk_buff *skb, *fskb; 1014 struct sk_buff_head fragmented_skb_q; 1015 int rc; | 1076 struct sk_buff *skb; 1077 struct sock_msg_q *save_msg; 1078 int len; |
1016 | 1079 |
1017 skb_queue_head_init(&fragmented_skb_q); 1018 | |
1019 if (sk->sk_shutdown & RCV_SHUTDOWN) 1020 return; 1021 | 1080 if (sk->sk_shutdown & RCV_SHUTDOWN) 1081 return; 1082 |
1083 if (!list_empty(&iucv->message_q.list) || 1084 !skb_queue_empty(&iucv->backlog_skb_q)) 1085 goto save_message; 1086 1087 len = atomic_read(&sk->sk_rmem_alloc); 1088 len += msg->length + sizeof(struct sk_buff); 1089 if (len > sk->sk_rcvbuf) 1090 goto save_message; 1091 |
|
1022 skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA); | 1092 skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA); |
1023 if (!skb) { 1024 iucv_path_sever(path, NULL); 1025 return; 1026 } | 1093 if (!skb) 1094 goto save_message; |
1027 | 1095 |
1028 if (msg->flags & IPRMDATA) { 1029 skb->data = NULL; 1030 skb->len = 0; 1031 } else { 1032 rc = iucv_message_receive(path, msg, 0, skb->data, 1033 msg->length, NULL); 1034 if (rc) { 1035 kfree_skb(skb); 1036 return; 1037 } 1038 if (skb->truesize >= sk->sk_rcvbuf / 4) { 1039 rc = iucv_fragment_skb(sk, skb, msg->length, 1040 &fragmented_skb_q); 1041 kfree_skb(skb); 1042 skb = NULL; 1043 if (rc) { 1044 iucv_path_sever(path, NULL); 1045 return; 1046 } 1047 } else { 1048 skb_reset_transport_header(skb); 1049 skb_reset_network_header(skb); 1050 skb->len = msg->length; 1051 } 1052 } 1053 /* Queue the fragmented skb */ 1054 fskb = skb_dequeue(&fragmented_skb_q); 1055 while (fskb) { 1056 if (!skb_queue_empty(&iucv->backlog_skb_q)) 1057 skb_queue_tail(&iucv->backlog_skb_q, fskb); 1058 else if (sock_queue_rcv_skb(sk, fskb)) 1059 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, fskb); 1060 fskb = skb_dequeue(&fragmented_skb_q); 1061 } | 1096 spin_lock(&iucv->message_q.lock); 1097 iucv_process_message(sk, skb, path, msg); 1098 spin_unlock(&iucv->message_q.lock); |
1062 | 1099 |
1063 /* Queue the original skb if it exists (was not fragmented) */ 1064 if (skb) { 1065 if (!skb_queue_empty(&iucv->backlog_skb_q)) 1066 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); 1067 else if (sock_queue_rcv_skb(sk, skb)) 1068 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb); 1069 } | 1100 return; |
1070 | 1101 |
1102save_message: 1103 save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); 1104 save_msg->path = path; 1105 save_msg->msg = *msg; 1106 1107 spin_lock(&iucv->message_q.lock); 1108 list_add_tail(&save_msg->list, &iucv->message_q.list); 1109 spin_unlock(&iucv->message_q.lock); |
|
1071} 1072 1073static void iucv_callback_txdone(struct iucv_path *path, 1074 struct iucv_message *msg) 1075{ 1076 struct sock *sk = path->private; 1077 struct sk_buff *this; 1078 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q; --- 117 unchanged lines hidden --- | 1110} 1111 1112static void iucv_callback_txdone(struct iucv_path *path, 1113 struct iucv_message *msg) 1114{ 1115 struct sock *sk = path->private; 1116 struct sk_buff *this; 1117 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q; --- 117 unchanged lines hidden --- |