xref: /openbmc/linux/net/rxrpc/proc.c (revision ff62b8e6)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* /proc/net/ support for AF_RXRPC
3  *
4  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/module.h>
9 #include <net/sock.h>
10 #include <net/af_rxrpc.h>
11 #include "ar-internal.h"
12 
13 static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = {
14 	[RXRPC_CONN_UNUSED]			= "Unused  ",
15 	[RXRPC_CONN_CLIENT]			= "Client  ",
16 	[RXRPC_CONN_SERVICE_PREALLOC]		= "SvPrealc",
17 	[RXRPC_CONN_SERVICE_UNSECURED]		= "SvUnsec ",
18 	[RXRPC_CONN_SERVICE_CHALLENGING]	= "SvChall ",
19 	[RXRPC_CONN_SERVICE]			= "SvSecure",
20 	[RXRPC_CONN_REMOTELY_ABORTED]		= "RmtAbort",
21 	[RXRPC_CONN_LOCALLY_ABORTED]		= "LocAbort",
22 };
23 
24 /*
25  * generate a list of extant and dead calls in /proc/net/rxrpc_calls
26  */
27 static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos)
28 	__acquires(rcu)
29 {
30 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
31 
32 	rcu_read_lock();
33 	return seq_list_start_head_rcu(&rxnet->calls, *_pos);
34 }
35 
36 static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos)
37 {
38 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
39 
40 	return seq_list_next_rcu(v, &rxnet->calls, pos);
41 }
42 
43 static void rxrpc_call_seq_stop(struct seq_file *seq, void *v)
44 	__releases(rcu)
45 {
46 	rcu_read_unlock();
47 }
48 
49 static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
50 {
51 	struct rxrpc_local *local;
52 	struct rxrpc_sock *rx;
53 	struct rxrpc_peer *peer;
54 	struct rxrpc_call *call;
55 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
56 	unsigned long timeout = 0;
57 	rxrpc_seq_t tx_hard_ack, rx_hard_ack;
58 	char lbuff[50], rbuff[50];
59 
60 	if (v == &rxnet->calls) {
61 		seq_puts(seq,
62 			 "Proto Local                                          "
63 			 " Remote                                         "
64 			 " SvID ConnID   CallID   End Use State    Abort   "
65 			 " DebugId  TxSeq    TW RxSeq    RW RxSerial RxTimo\n");
66 		return 0;
67 	}
68 
69 	call = list_entry(v, struct rxrpc_call, link);
70 
71 	rx = rcu_dereference(call->socket);
72 	if (rx) {
73 		local = READ_ONCE(rx->local);
74 		if (local)
75 			sprintf(lbuff, "%pISpc", &local->srx.transport);
76 		else
77 			strcpy(lbuff, "no_local");
78 	} else {
79 		strcpy(lbuff, "no_socket");
80 	}
81 
82 	peer = call->peer;
83 	if (peer)
84 		sprintf(rbuff, "%pISpc", &peer->srx.transport);
85 	else
86 		strcpy(rbuff, "no_connection");
87 
88 	if (call->state != RXRPC_CALL_SERVER_PREALLOC) {
89 		timeout = READ_ONCE(call->expect_rx_by);
90 		timeout -= jiffies;
91 	}
92 
93 	tx_hard_ack = READ_ONCE(call->tx_hard_ack);
94 	rx_hard_ack = READ_ONCE(call->rx_hard_ack);
95 	seq_printf(seq,
96 		   "UDP   %-47.47s %-47.47s %4x %08x %08x %s %3u"
97 		   " %-8.8s %08x %08x %08x %02x %08x %02x %08x %06lx\n",
98 		   lbuff,
99 		   rbuff,
100 		   call->service_id,
101 		   call->cid,
102 		   call->call_id,
103 		   rxrpc_is_service_call(call) ? "Svc" : "Clt",
104 		   refcount_read(&call->ref),
105 		   rxrpc_call_states[call->state],
106 		   call->abort_code,
107 		   call->debug_id,
108 		   tx_hard_ack, READ_ONCE(call->tx_top) - tx_hard_ack,
109 		   rx_hard_ack, READ_ONCE(call->rx_top) - rx_hard_ack,
110 		   call->rx_serial,
111 		   timeout);
112 
113 	return 0;
114 }
115 
116 const struct seq_operations rxrpc_call_seq_ops = {
117 	.start  = rxrpc_call_seq_start,
118 	.next   = rxrpc_call_seq_next,
119 	.stop   = rxrpc_call_seq_stop,
120 	.show   = rxrpc_call_seq_show,
121 };
122 
123 /*
124  * generate a list of extant virtual connections in /proc/net/rxrpc_conns
125  */
126 static void *rxrpc_connection_seq_start(struct seq_file *seq, loff_t *_pos)
127 	__acquires(rxnet->conn_lock)
128 {
129 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
130 
131 	read_lock(&rxnet->conn_lock);
132 	return seq_list_start_head(&rxnet->conn_proc_list, *_pos);
133 }
134 
135 static void *rxrpc_connection_seq_next(struct seq_file *seq, void *v,
136 				       loff_t *pos)
137 {
138 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
139 
140 	return seq_list_next(v, &rxnet->conn_proc_list, pos);
141 }
142 
143 static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v)
144 	__releases(rxnet->conn_lock)
145 {
146 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
147 
148 	read_unlock(&rxnet->conn_lock);
149 }
150 
151 static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
152 {
153 	struct rxrpc_connection *conn;
154 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
155 	char lbuff[50], rbuff[50];
156 
157 	if (v == &rxnet->conn_proc_list) {
158 		seq_puts(seq,
159 			 "Proto Local                                          "
160 			 " Remote                                         "
161 			 " SvID ConnID   End Use State    Key     "
162 			 " Serial   ISerial  CallId0  CallId1  CallId2  CallId3\n"
163 			 );
164 		return 0;
165 	}
166 
167 	conn = list_entry(v, struct rxrpc_connection, proc_link);
168 	if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) {
169 		strcpy(lbuff, "no_local");
170 		strcpy(rbuff, "no_connection");
171 		goto print;
172 	}
173 
174 	sprintf(lbuff, "%pISpc", &conn->params.local->srx.transport);
175 
176 	sprintf(rbuff, "%pISpc", &conn->params.peer->srx.transport);
177 print:
178 	seq_printf(seq,
179 		   "UDP   %-47.47s %-47.47s %4x %08x %s %3u"
180 		   " %s %08x %08x %08x %08x %08x %08x %08x\n",
181 		   lbuff,
182 		   rbuff,
183 		   conn->service_id,
184 		   conn->proto.cid,
185 		   rxrpc_conn_is_service(conn) ? "Svc" : "Clt",
186 		   refcount_read(&conn->ref),
187 		   rxrpc_conn_states[conn->state],
188 		   key_serial(conn->params.key),
189 		   atomic_read(&conn->serial),
190 		   conn->hi_serial,
191 		   conn->channels[0].call_id,
192 		   conn->channels[1].call_id,
193 		   conn->channels[2].call_id,
194 		   conn->channels[3].call_id);
195 
196 	return 0;
197 }
198 
199 const struct seq_operations rxrpc_connection_seq_ops = {
200 	.start  = rxrpc_connection_seq_start,
201 	.next   = rxrpc_connection_seq_next,
202 	.stop   = rxrpc_connection_seq_stop,
203 	.show   = rxrpc_connection_seq_show,
204 };
205 
206 /*
207  * generate a list of extant virtual peers in /proc/net/rxrpc/peers
208  */
209 static int rxrpc_peer_seq_show(struct seq_file *seq, void *v)
210 {
211 	struct rxrpc_peer *peer;
212 	time64_t now;
213 	char lbuff[50], rbuff[50];
214 
215 	if (v == SEQ_START_TOKEN) {
216 		seq_puts(seq,
217 			 "Proto Local                                          "
218 			 " Remote                                         "
219 			 " Use  CW   MTU LastUse      RTT      RTO\n"
220 			 );
221 		return 0;
222 	}
223 
224 	peer = list_entry(v, struct rxrpc_peer, hash_link);
225 
226 	sprintf(lbuff, "%pISpc", &peer->local->srx.transport);
227 
228 	sprintf(rbuff, "%pISpc", &peer->srx.transport);
229 
230 	now = ktime_get_seconds();
231 	seq_printf(seq,
232 		   "UDP   %-47.47s %-47.47s %3u"
233 		   " %3u %5u %6llus %8u %8u\n",
234 		   lbuff,
235 		   rbuff,
236 		   refcount_read(&peer->ref),
237 		   peer->cong_cwnd,
238 		   peer->mtu,
239 		   now - peer->last_tx_at,
240 		   peer->srtt_us >> 3,
241 		   jiffies_to_usecs(peer->rto_j));
242 
243 	return 0;
244 }
245 
246 static void *rxrpc_peer_seq_start(struct seq_file *seq, loff_t *_pos)
247 	__acquires(rcu)
248 {
249 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
250 	unsigned int bucket, n;
251 	unsigned int shift = 32 - HASH_BITS(rxnet->peer_hash);
252 	void *p;
253 
254 	rcu_read_lock();
255 
256 	if (*_pos >= UINT_MAX)
257 		return NULL;
258 
259 	n = *_pos & ((1U << shift) - 1);
260 	bucket = *_pos >> shift;
261 	for (;;) {
262 		if (bucket >= HASH_SIZE(rxnet->peer_hash)) {
263 			*_pos = UINT_MAX;
264 			return NULL;
265 		}
266 		if (n == 0) {
267 			if (bucket == 0)
268 				return SEQ_START_TOKEN;
269 			*_pos += 1;
270 			n++;
271 		}
272 
273 		p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1);
274 		if (p)
275 			return p;
276 		bucket++;
277 		n = 1;
278 		*_pos = (bucket << shift) | n;
279 	}
280 }
281 
282 static void *rxrpc_peer_seq_next(struct seq_file *seq, void *v, loff_t *_pos)
283 {
284 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
285 	unsigned int bucket, n;
286 	unsigned int shift = 32 - HASH_BITS(rxnet->peer_hash);
287 	void *p;
288 
289 	if (*_pos >= UINT_MAX)
290 		return NULL;
291 
292 	bucket = *_pos >> shift;
293 
294 	p = seq_hlist_next_rcu(v, &rxnet->peer_hash[bucket], _pos);
295 	if (p)
296 		return p;
297 
298 	for (;;) {
299 		bucket++;
300 		n = 1;
301 		*_pos = (bucket << shift) | n;
302 
303 		if (bucket >= HASH_SIZE(rxnet->peer_hash)) {
304 			*_pos = UINT_MAX;
305 			return NULL;
306 		}
307 		if (n == 0) {
308 			*_pos += 1;
309 			n++;
310 		}
311 
312 		p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1);
313 		if (p)
314 			return p;
315 	}
316 }
317 
318 static void rxrpc_peer_seq_stop(struct seq_file *seq, void *v)
319 	__releases(rcu)
320 {
321 	rcu_read_unlock();
322 }
323 
324 
325 const struct seq_operations rxrpc_peer_seq_ops = {
326 	.start  = rxrpc_peer_seq_start,
327 	.next   = rxrpc_peer_seq_next,
328 	.stop   = rxrpc_peer_seq_stop,
329 	.show   = rxrpc_peer_seq_show,
330 };
331 
332 /*
333  * Generate a list of extant virtual local endpoints in /proc/net/rxrpc/locals
334  */
335 static int rxrpc_local_seq_show(struct seq_file *seq, void *v)
336 {
337 	struct rxrpc_local *local;
338 	char lbuff[50];
339 
340 	if (v == SEQ_START_TOKEN) {
341 		seq_puts(seq,
342 			 "Proto Local                                          "
343 			 " Use Act\n");
344 		return 0;
345 	}
346 
347 	local = hlist_entry(v, struct rxrpc_local, link);
348 
349 	sprintf(lbuff, "%pISpc", &local->srx.transport);
350 
351 	seq_printf(seq,
352 		   "UDP   %-47.47s %3u %3u\n",
353 		   lbuff,
354 		   refcount_read(&local->ref),
355 		   atomic_read(&local->active_users));
356 
357 	return 0;
358 }
359 
360 static void *rxrpc_local_seq_start(struct seq_file *seq, loff_t *_pos)
361 	__acquires(rcu)
362 {
363 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
364 	unsigned int n;
365 
366 	rcu_read_lock();
367 
368 	if (*_pos >= UINT_MAX)
369 		return NULL;
370 
371 	n = *_pos;
372 	if (n == 0)
373 		return SEQ_START_TOKEN;
374 
375 	return seq_hlist_start_rcu(&rxnet->local_endpoints, n - 1);
376 }
377 
378 static void *rxrpc_local_seq_next(struct seq_file *seq, void *v, loff_t *_pos)
379 {
380 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
381 
382 	if (*_pos >= UINT_MAX)
383 		return NULL;
384 
385 	return seq_hlist_next_rcu(v, &rxnet->local_endpoints, _pos);
386 }
387 
388 static void rxrpc_local_seq_stop(struct seq_file *seq, void *v)
389 	__releases(rcu)
390 {
391 	rcu_read_unlock();
392 }
393 
394 const struct seq_operations rxrpc_local_seq_ops = {
395 	.start  = rxrpc_local_seq_start,
396 	.next   = rxrpc_local_seq_next,
397 	.stop   = rxrpc_local_seq_stop,
398 	.show   = rxrpc_local_seq_show,
399 };
400