xref: /openbmc/linux/net/netrom/nr_out.c (revision 5731369a)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *
4  * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
5  * Copyright Darryl Miles G7LED (dlm@g7led.demon.co.uk)
6  */
7 #include <linux/errno.h>
8 #include <linux/types.h>
9 #include <linux/socket.h>
10 #include <linux/in.h>
11 #include <linux/kernel.h>
12 #include <linux/timer.h>
13 #include <linux/string.h>
14 #include <linux/sockios.h>
15 #include <linux/net.h>
16 #include <linux/slab.h>
17 #include <net/ax25.h>
18 #include <linux/inet.h>
19 #include <linux/netdevice.h>
20 #include <linux/skbuff.h>
21 #include <net/sock.h>
22 #include <linux/uaccess.h>
23 #include <linux/fcntl.h>
24 #include <linux/mm.h>
25 #include <linux/interrupt.h>
26 #include <net/netrom.h>
27 
28 /*
29  *	This is where all NET/ROM frames pass, except for IP-over-NET/ROM which
30  *	cannot be fragmented in this manner.
31  */
nr_output(struct sock * sk,struct sk_buff * skb)32 void nr_output(struct sock *sk, struct sk_buff *skb)
33 {
34 	struct sk_buff *skbn;
35 	unsigned char transport[NR_TRANSPORT_LEN];
36 	int err, frontlen, len;
37 
38 	if (skb->len - NR_TRANSPORT_LEN > NR_MAX_PACKET_SIZE) {
39 		/* Save a copy of the Transport Header */
40 		skb_copy_from_linear_data(skb, transport, NR_TRANSPORT_LEN);
41 		skb_pull(skb, NR_TRANSPORT_LEN);
42 
43 		frontlen = skb_headroom(skb);
44 
45 		while (skb->len > 0) {
46 			if ((skbn = sock_alloc_send_skb(sk, frontlen + NR_MAX_PACKET_SIZE, 0, &err)) == NULL)
47 				return;
48 
49 			skb_reserve(skbn, frontlen);
50 
51 			len = (NR_MAX_PACKET_SIZE > skb->len) ? skb->len : NR_MAX_PACKET_SIZE;
52 
53 			/* Copy the user data */
54 			skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
55 			skb_pull(skb, len);
56 
57 			/* Duplicate the Transport Header */
58 			skb_push(skbn, NR_TRANSPORT_LEN);
59 			skb_copy_to_linear_data(skbn, transport,
60 						NR_TRANSPORT_LEN);
61 			if (skb->len > 0)
62 				skbn->data[4] |= NR_MORE_FLAG;
63 
64 			skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */
65 		}
66 
67 		kfree_skb(skb);
68 	} else {
69 		skb_queue_tail(&sk->sk_write_queue, skb);		/* Throw it on the queue */
70 	}
71 
72 	nr_kick(sk);
73 }
74 
75 /*
76  *	This procedure is passed a buffer descriptor for an iframe. It builds
77  *	the rest of the control part of the frame and then writes it out.
78  */
nr_send_iframe(struct sock * sk,struct sk_buff * skb)79 static void nr_send_iframe(struct sock *sk, struct sk_buff *skb)
80 {
81 	struct nr_sock *nr = nr_sk(sk);
82 
83 	if (skb == NULL)
84 		return;
85 
86 	skb->data[2] = nr->vs;
87 	skb->data[3] = nr->vr;
88 
89 	if (nr->condition & NR_COND_OWN_RX_BUSY)
90 		skb->data[4] |= NR_CHOKE_FLAG;
91 
92 	nr_start_idletimer(sk);
93 
94 	nr_transmit_buffer(sk, skb);
95 }
96 
nr_send_nak_frame(struct sock * sk)97 void nr_send_nak_frame(struct sock *sk)
98 {
99 	struct sk_buff *skb, *skbn;
100 	struct nr_sock *nr = nr_sk(sk);
101 
102 	if ((skb = skb_peek(&nr->ack_queue)) == NULL)
103 		return;
104 
105 	if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL)
106 		return;
107 
108 	skbn->data[2] = nr->va;
109 	skbn->data[3] = nr->vr;
110 
111 	if (nr->condition & NR_COND_OWN_RX_BUSY)
112 		skbn->data[4] |= NR_CHOKE_FLAG;
113 
114 	nr_transmit_buffer(sk, skbn);
115 
116 	nr->condition &= ~NR_COND_ACK_PENDING;
117 	nr->vl         = nr->vr;
118 
119 	nr_stop_t1timer(sk);
120 }
121 
nr_kick(struct sock * sk)122 void nr_kick(struct sock *sk)
123 {
124 	struct nr_sock *nr = nr_sk(sk);
125 	struct sk_buff *skb, *skbn;
126 	unsigned short start, end;
127 
128 	if (nr->state != NR_STATE_3)
129 		return;
130 
131 	if (nr->condition & NR_COND_PEER_RX_BUSY)
132 		return;
133 
134 	if (!skb_peek(&sk->sk_write_queue))
135 		return;
136 
137 	start = (skb_peek(&nr->ack_queue) == NULL) ? nr->va : nr->vs;
138 	end   = (nr->va + nr->window) % NR_MODULUS;
139 
140 	if (start == end)
141 		return;
142 
143 	nr->vs = start;
144 
145 	/*
146 	 * Transmit data until either we're out of data to send or
147 	 * the window is full.
148 	 */
149 
150 	/*
151 	 * Dequeue the frame and copy it.
152 	 */
153 	skb = skb_dequeue(&sk->sk_write_queue);
154 
155 	do {
156 		if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
157 			skb_queue_head(&sk->sk_write_queue, skb);
158 			break;
159 		}
160 
161 		skb_set_owner_w(skbn, sk);
162 
163 		/*
164 		 * Transmit the frame copy.
165 		 */
166 		nr_send_iframe(sk, skbn);
167 
168 		nr->vs = (nr->vs + 1) % NR_MODULUS;
169 
170 		/*
171 		 * Requeue the original data frame.
172 		 */
173 		skb_queue_tail(&nr->ack_queue, skb);
174 
175 	} while (nr->vs != end &&
176 		 (skb = skb_dequeue(&sk->sk_write_queue)) != NULL);
177 
178 	nr->vl         = nr->vr;
179 	nr->condition &= ~NR_COND_ACK_PENDING;
180 
181 	if (!nr_t1timer_running(sk))
182 		nr_start_t1timer(sk);
183 }
184 
nr_transmit_buffer(struct sock * sk,struct sk_buff * skb)185 void nr_transmit_buffer(struct sock *sk, struct sk_buff *skb)
186 {
187 	struct nr_sock *nr = nr_sk(sk);
188 	unsigned char *dptr;
189 
190 	/*
191 	 *	Add the protocol byte and network header.
192 	 */
193 	dptr = skb_push(skb, NR_NETWORK_LEN);
194 
195 	memcpy(dptr, &nr->source_addr, AX25_ADDR_LEN);
196 	dptr[6] &= ~AX25_CBIT;
197 	dptr[6] &= ~AX25_EBIT;
198 	dptr[6] |= AX25_SSSID_SPARE;
199 	dptr += AX25_ADDR_LEN;
200 
201 	memcpy(dptr, &nr->dest_addr, AX25_ADDR_LEN);
202 	dptr[6] &= ~AX25_CBIT;
203 	dptr[6] |= AX25_EBIT;
204 	dptr[6] |= AX25_SSSID_SPARE;
205 	dptr += AX25_ADDR_LEN;
206 
207 	*dptr++ = READ_ONCE(sysctl_netrom_network_ttl_initialiser);
208 
209 	if (!nr_route_frame(skb, NULL)) {
210 		kfree_skb(skb);
211 		nr_disconnect(sk, ENETUNREACH);
212 	}
213 }
214 
215 /*
216  * The following routines are taken from page 170 of the 7th ARRL Computer
217  * Networking Conference paper, as is the whole state machine.
218  */
219 
nr_establish_data_link(struct sock * sk)220 void nr_establish_data_link(struct sock *sk)
221 {
222 	struct nr_sock *nr = nr_sk(sk);
223 
224 	nr->condition = 0x00;
225 	nr->n2count   = 0;
226 
227 	nr_write_internal(sk, NR_CONNREQ);
228 
229 	nr_stop_t2timer(sk);
230 	nr_stop_t4timer(sk);
231 	nr_stop_idletimer(sk);
232 	nr_start_t1timer(sk);
233 }
234 
235 /*
236  * Never send a NAK when we are CHOKEd.
237  */
nr_enquiry_response(struct sock * sk)238 void nr_enquiry_response(struct sock *sk)
239 {
240 	struct nr_sock *nr = nr_sk(sk);
241 	int frametype = NR_INFOACK;
242 
243 	if (nr->condition & NR_COND_OWN_RX_BUSY) {
244 		frametype |= NR_CHOKE_FLAG;
245 	} else {
246 		if (skb_peek(&nr->reseq_queue) != NULL)
247 			frametype |= NR_NAK_FLAG;
248 	}
249 
250 	nr_write_internal(sk, frametype);
251 
252 	nr->vl         = nr->vr;
253 	nr->condition &= ~NR_COND_ACK_PENDING;
254 }
255 
nr_check_iframes_acked(struct sock * sk,unsigned short nr)256 void nr_check_iframes_acked(struct sock *sk, unsigned short nr)
257 {
258 	struct nr_sock *nrom = nr_sk(sk);
259 
260 	if (nrom->vs == nr) {
261 		nr_frames_acked(sk, nr);
262 		nr_stop_t1timer(sk);
263 		nrom->n2count = 0;
264 	} else {
265 		if (nrom->va != nr) {
266 			nr_frames_acked(sk, nr);
267 			nr_start_t1timer(sk);
268 		}
269 	}
270 }
271