xref: /openbmc/linux/arch/um/drivers/port_kern.c (revision b6dcefde)
1 /*
2  * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{linux.intel,addtoit}.com)
3  * Licensed under the GPL
4  */
5 
6 #include "linux/completion.h"
7 #include "linux/interrupt.h"
8 #include "linux/list.h"
9 #include "linux/mutex.h"
10 #include "linux/workqueue.h"
11 #include "asm/atomic.h"
12 #include "init.h"
13 #include "irq_kern.h"
14 #include "os.h"
15 #include "port.h"
16 
17 struct port_list {
18 	struct list_head list;
19 	atomic_t wait_count;
20 	int has_connection;
21 	struct completion done;
22 	int port;
23 	int fd;
24 	spinlock_t lock;
25 	struct list_head pending;
26 	struct list_head connections;
27 };
28 
29 struct port_dev {
30 	struct port_list *port;
31 	int helper_pid;
32 	int telnetd_pid;
33 };
34 
35 struct connection {
36 	struct list_head list;
37 	int fd;
38 	int helper_pid;
39 	int socket[2];
40 	int telnetd_pid;
41 	struct port_list *port;
42 };
43 
44 static irqreturn_t pipe_interrupt(int irq, void *data)
45 {
46 	struct connection *conn = data;
47 	int fd;
48 
49 	fd = os_rcv_fd(conn->socket[0], &conn->helper_pid);
50 	if (fd < 0) {
51 		if (fd == -EAGAIN)
52 			return IRQ_NONE;
53 
54 		printk(KERN_ERR "pipe_interrupt : os_rcv_fd returned %d\n",
55 		       -fd);
56 		os_close_file(conn->fd);
57 	}
58 
59 	list_del(&conn->list);
60 
61 	conn->fd = fd;
62 	list_add(&conn->list, &conn->port->connections);
63 
64 	complete(&conn->port->done);
65 	return IRQ_HANDLED;
66 }
67 
68 #define NO_WAITER_MSG \
69     "****\n" \
70     "There are currently no UML consoles waiting for port connections.\n" \
71     "Either disconnect from one to make it available or activate some more\n" \
72     "by enabling more consoles in the UML /etc/inittab.\n" \
73     "****\n"
74 
75 static int port_accept(struct port_list *port)
76 {
77 	struct connection *conn;
78 	int fd, socket[2], pid;
79 
80 	fd = port_connection(port->fd, socket, &pid);
81 	if (fd < 0) {
82 		if (fd != -EAGAIN)
83 			printk(KERN_ERR "port_accept : port_connection "
84 			       "returned %d\n", -fd);
85 		goto out;
86 	}
87 
88 	conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
89 	if (conn == NULL) {
90 		printk(KERN_ERR "port_accept : failed to allocate "
91 		       "connection\n");
92 		goto out_close;
93 	}
94 	*conn = ((struct connection)
95 		{ .list 	= LIST_HEAD_INIT(conn->list),
96 		  .fd 		= fd,
97 		  .socket  	= { socket[0], socket[1] },
98 		  .telnetd_pid 	= pid,
99 		  .port 	= port });
100 
101 	if (um_request_irq(TELNETD_IRQ, socket[0], IRQ_READ, pipe_interrupt,
102 			  IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM,
103 			  "telnetd", conn)) {
104 		printk(KERN_ERR "port_accept : failed to get IRQ for "
105 		       "telnetd\n");
106 		goto out_free;
107 	}
108 
109 	if (atomic_read(&port->wait_count) == 0) {
110 		os_write_file(fd, NO_WAITER_MSG, sizeof(NO_WAITER_MSG));
111 		printk(KERN_ERR "No one waiting for port\n");
112 	}
113 	list_add(&conn->list, &port->pending);
114 	return 1;
115 
116  out_free:
117 	kfree(conn);
118  out_close:
119 	os_close_file(fd);
120 	os_kill_process(pid, 1);
121  out:
122 	return 0;
123 }
124 
125 static DEFINE_MUTEX(ports_mutex);
126 static LIST_HEAD(ports);
127 
128 static void port_work_proc(struct work_struct *unused)
129 {
130 	struct port_list *port;
131 	struct list_head *ele;
132 	unsigned long flags;
133 
134 	local_irq_save(flags);
135 	list_for_each(ele, &ports) {
136 		port = list_entry(ele, struct port_list, list);
137 		if (!port->has_connection)
138 			continue;
139 
140 		reactivate_fd(port->fd, ACCEPT_IRQ);
141 		while (port_accept(port))
142 			;
143 		port->has_connection = 0;
144 	}
145 	local_irq_restore(flags);
146 }
147 
148 DECLARE_WORK(port_work, port_work_proc);
149 
150 static irqreturn_t port_interrupt(int irq, void *data)
151 {
152 	struct port_list *port = data;
153 
154 	port->has_connection = 1;
155 	schedule_work(&port_work);
156 	return IRQ_HANDLED;
157 }
158 
159 void *port_data(int port_num)
160 {
161 	struct list_head *ele;
162 	struct port_list *port;
163 	struct port_dev *dev = NULL;
164 	int fd;
165 
166 	mutex_lock(&ports_mutex);
167 	list_for_each(ele, &ports) {
168 		port = list_entry(ele, struct port_list, list);
169 		if (port->port == port_num)
170 			goto found;
171 	}
172 	port = kmalloc(sizeof(struct port_list), GFP_KERNEL);
173 	if (port == NULL) {
174 		printk(KERN_ERR "Allocation of port list failed\n");
175 		goto out;
176 	}
177 
178 	fd = port_listen_fd(port_num);
179 	if (fd < 0) {
180 		printk(KERN_ERR "binding to port %d failed, errno = %d\n",
181 		       port_num, -fd);
182 		goto out_free;
183 	}
184 
185 	if (um_request_irq(ACCEPT_IRQ, fd, IRQ_READ, port_interrupt,
186 			  IRQF_DISABLED | IRQF_SHARED | IRQF_SAMPLE_RANDOM,
187 			  "port", port)) {
188 		printk(KERN_ERR "Failed to get IRQ for port %d\n", port_num);
189 		goto out_close;
190 	}
191 
192 	*port = ((struct port_list)
193 		{ .list 	 	= LIST_HEAD_INIT(port->list),
194 		  .wait_count		= ATOMIC_INIT(0),
195 		  .has_connection 	= 0,
196 		  .port 	 	= port_num,
197 		  .fd  			= fd,
198 		  .pending 		= LIST_HEAD_INIT(port->pending),
199 		  .connections 		= LIST_HEAD_INIT(port->connections) });
200 	spin_lock_init(&port->lock);
201 	init_completion(&port->done);
202 	list_add(&port->list, &ports);
203 
204  found:
205 	dev = kmalloc(sizeof(struct port_dev), GFP_KERNEL);
206 	if (dev == NULL) {
207 		printk(KERN_ERR "Allocation of port device entry failed\n");
208 		goto out;
209 	}
210 
211 	*dev = ((struct port_dev) { .port  		= port,
212 				    .helper_pid  	= -1,
213 				    .telnetd_pid  	= -1 });
214 	goto out;
215 
216  out_close:
217 	os_close_file(fd);
218  out_free:
219 	kfree(port);
220  out:
221 	mutex_unlock(&ports_mutex);
222 	return dev;
223 }
224 
225 int port_wait(void *data)
226 {
227 	struct port_dev *dev = data;
228 	struct connection *conn;
229 	struct port_list *port = dev->port;
230 	int fd;
231 
232 	atomic_inc(&port->wait_count);
233 	while (1) {
234 		fd = -ERESTARTSYS;
235 		if (wait_for_completion_interruptible(&port->done))
236 			goto out;
237 
238 		spin_lock(&port->lock);
239 
240 		conn = list_entry(port->connections.next, struct connection,
241 				  list);
242 		list_del(&conn->list);
243 		spin_unlock(&port->lock);
244 
245 		os_shutdown_socket(conn->socket[0], 1, 1);
246 		os_close_file(conn->socket[0]);
247 		os_shutdown_socket(conn->socket[1], 1, 1);
248 		os_close_file(conn->socket[1]);
249 
250 		/* This is done here because freeing an IRQ can't be done
251 		 * within the IRQ handler.  So, pipe_interrupt always ups
252 		 * the semaphore regardless of whether it got a successful
253 		 * connection.  Then we loop here throwing out failed
254 		 * connections until a good one is found.
255 		 */
256 		free_irq(TELNETD_IRQ, conn);
257 
258 		if (conn->fd >= 0)
259 			break;
260 		os_close_file(conn->fd);
261 		kfree(conn);
262 	}
263 
264 	fd = conn->fd;
265 	dev->helper_pid = conn->helper_pid;
266 	dev->telnetd_pid = conn->telnetd_pid;
267 	kfree(conn);
268  out:
269 	atomic_dec(&port->wait_count);
270 	return fd;
271 }
272 
273 void port_remove_dev(void *d)
274 {
275 	struct port_dev *dev = d;
276 
277 	if (dev->helper_pid != -1)
278 		os_kill_process(dev->helper_pid, 0);
279 	if (dev->telnetd_pid != -1)
280 		os_kill_process(dev->telnetd_pid, 1);
281 	dev->helper_pid = -1;
282 	dev->telnetd_pid = -1;
283 }
284 
285 void port_kern_free(void *d)
286 {
287 	struct port_dev *dev = d;
288 
289 	port_remove_dev(dev);
290 	kfree(dev);
291 }
292 
293 static void free_port(void)
294 {
295 	struct list_head *ele;
296 	struct port_list *port;
297 
298 	list_for_each(ele, &ports) {
299 		port = list_entry(ele, struct port_list, list);
300 		free_irq_by_fd(port->fd);
301 		os_close_file(port->fd);
302 	}
303 }
304 
305 __uml_exitcall(free_port);
306