xref: /openbmc/linux/net/rxrpc/proc.c (revision 9b93eb47)
1 /* /proc/net/ support for AF_RXRPC
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/module.h>
13 #include <net/sock.h>
14 #include <net/af_rxrpc.h>
15 #include "ar-internal.h"
16 
17 static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = {
18 	[RXRPC_CONN_UNUSED]			= "Unused  ",
19 	[RXRPC_CONN_CLIENT]			= "Client  ",
20 	[RXRPC_CONN_SERVICE_PREALLOC]		= "SvPrealc",
21 	[RXRPC_CONN_SERVICE_UNSECURED]		= "SvUnsec ",
22 	[RXRPC_CONN_SERVICE_CHALLENGING]	= "SvChall ",
23 	[RXRPC_CONN_SERVICE]			= "SvSecure",
24 	[RXRPC_CONN_REMOTELY_ABORTED]		= "RmtAbort",
25 	[RXRPC_CONN_LOCALLY_ABORTED]		= "LocAbort",
26 };
27 
28 /*
29  * generate a list of extant and dead calls in /proc/net/rxrpc_calls
30  */
31 static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos)
32 	__acquires(rcu)
33 	__acquires(rxnet->call_lock)
34 {
35 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
36 
37 	rcu_read_lock();
38 	read_lock(&rxnet->call_lock);
39 	return seq_list_start_head(&rxnet->calls, *_pos);
40 }
41 
42 static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos)
43 {
44 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
45 
46 	return seq_list_next(v, &rxnet->calls, pos);
47 }
48 
49 static void rxrpc_call_seq_stop(struct seq_file *seq, void *v)
50 	__releases(rxnet->call_lock)
51 	__releases(rcu)
52 {
53 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
54 
55 	read_unlock(&rxnet->call_lock);
56 	rcu_read_unlock();
57 }
58 
59 static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
60 {
61 	struct rxrpc_local *local;
62 	struct rxrpc_sock *rx;
63 	struct rxrpc_peer *peer;
64 	struct rxrpc_call *call;
65 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
66 	unsigned long timeout = 0;
67 	rxrpc_seq_t tx_hard_ack, rx_hard_ack;
68 	char lbuff[50], rbuff[50];
69 
70 	if (v == &rxnet->calls) {
71 		seq_puts(seq,
72 			 "Proto Local                                          "
73 			 " Remote                                         "
74 			 " SvID ConnID   CallID   End Use State    Abort   "
75 			 " UserID           TxSeq    TW RxSeq    RW RxSerial RxTimo\n");
76 		return 0;
77 	}
78 
79 	call = list_entry(v, struct rxrpc_call, link);
80 
81 	rx = rcu_dereference(call->socket);
82 	if (rx) {
83 		local = READ_ONCE(rx->local);
84 		if (local)
85 			sprintf(lbuff, "%pISpc", &local->srx.transport);
86 		else
87 			strcpy(lbuff, "no_local");
88 	} else {
89 		strcpy(lbuff, "no_socket");
90 	}
91 
92 	peer = call->peer;
93 	if (peer)
94 		sprintf(rbuff, "%pISpc", &peer->srx.transport);
95 	else
96 		strcpy(rbuff, "no_connection");
97 
98 	if (call->state != RXRPC_CALL_SERVER_PREALLOC) {
99 		timeout = READ_ONCE(call->expect_rx_by);
100 		timeout -= jiffies;
101 	}
102 
103 	tx_hard_ack = READ_ONCE(call->tx_hard_ack);
104 	rx_hard_ack = READ_ONCE(call->rx_hard_ack);
105 	seq_printf(seq,
106 		   "UDP   %-47.47s %-47.47s %4x %08x %08x %s %3u"
107 		   " %-8.8s %08x %lx %08x %02x %08x %02x %08x %06lx\n",
108 		   lbuff,
109 		   rbuff,
110 		   call->service_id,
111 		   call->cid,
112 		   call->call_id,
113 		   rxrpc_is_service_call(call) ? "Svc" : "Clt",
114 		   atomic_read(&call->usage),
115 		   rxrpc_call_states[call->state],
116 		   call->abort_code,
117 		   call->user_call_ID,
118 		   tx_hard_ack, READ_ONCE(call->tx_top) - tx_hard_ack,
119 		   rx_hard_ack, READ_ONCE(call->rx_top) - rx_hard_ack,
120 		   call->rx_serial,
121 		   timeout);
122 
123 	return 0;
124 }
125 
126 const struct seq_operations rxrpc_call_seq_ops = {
127 	.start  = rxrpc_call_seq_start,
128 	.next   = rxrpc_call_seq_next,
129 	.stop   = rxrpc_call_seq_stop,
130 	.show   = rxrpc_call_seq_show,
131 };
132 
133 /*
134  * generate a list of extant virtual connections in /proc/net/rxrpc_conns
135  */
136 static void *rxrpc_connection_seq_start(struct seq_file *seq, loff_t *_pos)
137 	__acquires(rxnet->conn_lock)
138 {
139 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
140 
141 	read_lock(&rxnet->conn_lock);
142 	return seq_list_start_head(&rxnet->conn_proc_list, *_pos);
143 }
144 
145 static void *rxrpc_connection_seq_next(struct seq_file *seq, void *v,
146 				       loff_t *pos)
147 {
148 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
149 
150 	return seq_list_next(v, &rxnet->conn_proc_list, pos);
151 }
152 
153 static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v)
154 	__releases(rxnet->conn_lock)
155 {
156 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
157 
158 	read_unlock(&rxnet->conn_lock);
159 }
160 
161 static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
162 {
163 	struct rxrpc_connection *conn;
164 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
165 	char lbuff[50], rbuff[50];
166 
167 	if (v == &rxnet->conn_proc_list) {
168 		seq_puts(seq,
169 			 "Proto Local                                          "
170 			 " Remote                                         "
171 			 " SvID ConnID   End Use State    Key     "
172 			 " Serial   ISerial\n"
173 			 );
174 		return 0;
175 	}
176 
177 	conn = list_entry(v, struct rxrpc_connection, proc_link);
178 	if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) {
179 		strcpy(lbuff, "no_local");
180 		strcpy(rbuff, "no_connection");
181 		goto print;
182 	}
183 
184 	sprintf(lbuff, "%pISpc", &conn->params.local->srx.transport);
185 
186 	sprintf(rbuff, "%pISpc", &conn->params.peer->srx.transport);
187 print:
188 	seq_printf(seq,
189 		   "UDP   %-47.47s %-47.47s %4x %08x %s %3u"
190 		   " %s %08x %08x %08x %08x %08x %08x %08x\n",
191 		   lbuff,
192 		   rbuff,
193 		   conn->service_id,
194 		   conn->proto.cid,
195 		   rxrpc_conn_is_service(conn) ? "Svc" : "Clt",
196 		   atomic_read(&conn->usage),
197 		   rxrpc_conn_states[conn->state],
198 		   key_serial(conn->params.key),
199 		   atomic_read(&conn->serial),
200 		   conn->hi_serial,
201 		   conn->channels[0].call_id,
202 		   conn->channels[1].call_id,
203 		   conn->channels[2].call_id,
204 		   conn->channels[3].call_id);
205 
206 	return 0;
207 }
208 
209 const struct seq_operations rxrpc_connection_seq_ops = {
210 	.start  = rxrpc_connection_seq_start,
211 	.next   = rxrpc_connection_seq_next,
212 	.stop   = rxrpc_connection_seq_stop,
213 	.show   = rxrpc_connection_seq_show,
214 };
215 
216 /*
217  * generate a list of extant virtual peers in /proc/net/rxrpc/peers
218  */
219 static int rxrpc_peer_seq_show(struct seq_file *seq, void *v)
220 {
221 	struct rxrpc_peer *peer;
222 	time64_t now;
223 	char lbuff[50], rbuff[50];
224 
225 	if (v == SEQ_START_TOKEN) {
226 		seq_puts(seq,
227 			 "Proto Local                                          "
228 			 " Remote                                         "
229 			 " Use CW  MTU   LastUse          RTT Rc\n"
230 			 );
231 		return 0;
232 	}
233 
234 	peer = list_entry(v, struct rxrpc_peer, hash_link);
235 
236 	sprintf(lbuff, "%pISpc", &peer->local->srx.transport);
237 
238 	sprintf(rbuff, "%pISpc", &peer->srx.transport);
239 
240 	now = ktime_get_seconds();
241 	seq_printf(seq,
242 		   "UDP   %-47.47s %-47.47s %3u"
243 		   " %3u %5u %6llus %12llu %2u\n",
244 		   lbuff,
245 		   rbuff,
246 		   atomic_read(&peer->usage),
247 		   peer->cong_cwnd,
248 		   peer->mtu,
249 		   now - peer->last_tx_at,
250 		   peer->rtt,
251 		   peer->rtt_cursor);
252 
253 	return 0;
254 }
255 
256 static void *rxrpc_peer_seq_start(struct seq_file *seq, loff_t *_pos)
257 	__acquires(rcu)
258 {
259 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
260 	unsigned int bucket, n;
261 	unsigned int shift = 32 - HASH_BITS(rxnet->peer_hash);
262 	void *p;
263 
264 	rcu_read_lock();
265 
266 	if (*_pos >= UINT_MAX)
267 		return NULL;
268 
269 	n = *_pos & ((1U << shift) - 1);
270 	bucket = *_pos >> shift;
271 	for (;;) {
272 		if (bucket >= HASH_SIZE(rxnet->peer_hash)) {
273 			*_pos = UINT_MAX;
274 			return NULL;
275 		}
276 		if (n == 0) {
277 			if (bucket == 0)
278 				return SEQ_START_TOKEN;
279 			*_pos += 1;
280 			n++;
281 		}
282 
283 		p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1);
284 		if (p)
285 			return p;
286 		bucket++;
287 		n = 1;
288 		*_pos = (bucket << shift) | n;
289 	}
290 }
291 
292 static void *rxrpc_peer_seq_next(struct seq_file *seq, void *v, loff_t *_pos)
293 {
294 	struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
295 	unsigned int bucket, n;
296 	unsigned int shift = 32 - HASH_BITS(rxnet->peer_hash);
297 	void *p;
298 
299 	if (*_pos >= UINT_MAX)
300 		return NULL;
301 
302 	bucket = *_pos >> shift;
303 
304 	p = seq_hlist_next_rcu(v, &rxnet->peer_hash[bucket], _pos);
305 	if (p)
306 		return p;
307 
308 	for (;;) {
309 		bucket++;
310 		n = 1;
311 		*_pos = (bucket << shift) | n;
312 
313 		if (bucket >= HASH_SIZE(rxnet->peer_hash)) {
314 			*_pos = UINT_MAX;
315 			return NULL;
316 		}
317 		if (n == 0) {
318 			*_pos += 1;
319 			n++;
320 		}
321 
322 		p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1);
323 		if (p)
324 			return p;
325 	}
326 }
327 
328 static void rxrpc_peer_seq_stop(struct seq_file *seq, void *v)
329 	__releases(rcu)
330 {
331 	rcu_read_unlock();
332 }
333 
334 
335 const struct seq_operations rxrpc_peer_seq_ops = {
336 	.start  = rxrpc_peer_seq_start,
337 	.next   = rxrpc_peer_seq_next,
338 	.stop   = rxrpc_peer_seq_stop,
339 	.show   = rxrpc_peer_seq_show,
340 };
341