1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Kernel Connection Multiplexor
4 *
5 * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
6 */
7
8 #include <linux/bpf.h>
9 #include <linux/errno.h>
10 #include <linux/errqueue.h>
11 #include <linux/file.h>
12 #include <linux/filter.h>
13 #include <linux/in.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/net.h>
17 #include <linux/netdevice.h>
18 #include <linux/poll.h>
19 #include <linux/rculist.h>
20 #include <linux/skbuff.h>
21 #include <linux/socket.h>
22 #include <linux/uaccess.h>
23 #include <linux/workqueue.h>
24 #include <linux/syscalls.h>
25 #include <linux/sched/signal.h>
26
27 #include <net/kcm.h>
28 #include <net/netns/generic.h>
29 #include <net/sock.h>
30 #include <uapi/linux/kcm.h>
31 #include <trace/events/sock.h>
32
33 unsigned int kcm_net_id;
34
35 static struct kmem_cache *kcm_psockp __read_mostly;
36 static struct kmem_cache *kcm_muxp __read_mostly;
37 static struct workqueue_struct *kcm_wq;
38
kcm_sk(const struct sock * sk)39 static inline struct kcm_sock *kcm_sk(const struct sock *sk)
40 {
41 return (struct kcm_sock *)sk;
42 }
43
kcm_tx_msg(struct sk_buff * skb)44 static inline struct kcm_tx_msg *kcm_tx_msg(struct sk_buff *skb)
45 {
46 return (struct kcm_tx_msg *)skb->cb;
47 }
48
report_csk_error(struct sock * csk,int err)49 static void report_csk_error(struct sock *csk, int err)
50 {
51 csk->sk_err = EPIPE;
52 sk_error_report(csk);
53 }
54
kcm_abort_tx_psock(struct kcm_psock * psock,int err,bool wakeup_kcm)55 static void kcm_abort_tx_psock(struct kcm_psock *psock, int err,
56 bool wakeup_kcm)
57 {
58 struct sock *csk = psock->sk;
59 struct kcm_mux *mux = psock->mux;
60
61 /* Unrecoverable error in transmit */
62
63 spin_lock_bh(&mux->lock);
64
65 if (psock->tx_stopped) {
66 spin_unlock_bh(&mux->lock);
67 return;
68 }
69
70 psock->tx_stopped = 1;
71 KCM_STATS_INCR(psock->stats.tx_aborts);
72
73 if (!psock->tx_kcm) {
74 /* Take off psocks_avail list */
75 list_del(&psock->psock_avail_list);
76 } else if (wakeup_kcm) {
77 /* In this case psock is being aborted while outside of
78 * write_msgs and psock is reserved. Schedule tx_work
79 * to handle the failure there. Need to commit tx_stopped
80 * before queuing work.
81 */
82 smp_mb();
83
84 queue_work(kcm_wq, &psock->tx_kcm->tx_work);
85 }
86
87 spin_unlock_bh(&mux->lock);
88
89 /* Report error on lower socket */
90 report_csk_error(csk, err);
91 }
92
93 /* RX mux lock held. */
kcm_update_rx_mux_stats(struct kcm_mux * mux,struct kcm_psock * psock)94 static void kcm_update_rx_mux_stats(struct kcm_mux *mux,
95 struct kcm_psock *psock)
96 {
97 STRP_STATS_ADD(mux->stats.rx_bytes,
98 psock->strp.stats.bytes -
99 psock->saved_rx_bytes);
100 mux->stats.rx_msgs +=
101 psock->strp.stats.msgs - psock->saved_rx_msgs;
102 psock->saved_rx_msgs = psock->strp.stats.msgs;
103 psock->saved_rx_bytes = psock->strp.stats.bytes;
104 }
105
kcm_update_tx_mux_stats(struct kcm_mux * mux,struct kcm_psock * psock)106 static void kcm_update_tx_mux_stats(struct kcm_mux *mux,
107 struct kcm_psock *psock)
108 {
109 KCM_STATS_ADD(mux->stats.tx_bytes,
110 psock->stats.tx_bytes - psock->saved_tx_bytes);
111 mux->stats.tx_msgs +=
112 psock->stats.tx_msgs - psock->saved_tx_msgs;
113 psock->saved_tx_msgs = psock->stats.tx_msgs;
114 psock->saved_tx_bytes = psock->stats.tx_bytes;
115 }
116
117 static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
118
119 /* KCM is ready to receive messages on its queue-- either the KCM is new or
120 * has become unblocked after being blocked on full socket buffer. Queue any
121 * pending ready messages on a psock. RX mux lock held.
122 */
kcm_rcv_ready(struct kcm_sock * kcm)123 static void kcm_rcv_ready(struct kcm_sock *kcm)
124 {
125 struct kcm_mux *mux = kcm->mux;
126 struct kcm_psock *psock;
127 struct sk_buff *skb;
128
129 if (unlikely(kcm->rx_wait || kcm->rx_psock || kcm->rx_disabled))
130 return;
131
132 while (unlikely((skb = __skb_dequeue(&mux->rx_hold_queue)))) {
133 if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
134 /* Assuming buffer limit has been reached */
135 skb_queue_head(&mux->rx_hold_queue, skb);
136 WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
137 return;
138 }
139 }
140
141 while (!list_empty(&mux->psocks_ready)) {
142 psock = list_first_entry(&mux->psocks_ready, struct kcm_psock,
143 psock_ready_list);
144
145 if (kcm_queue_rcv_skb(&kcm->sk, psock->ready_rx_msg)) {
146 /* Assuming buffer limit has been reached */
147 WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
148 return;
149 }
150
151 /* Consumed the ready message on the psock. Schedule rx_work to
152 * get more messages.
153 */
154 list_del(&psock->psock_ready_list);
155 psock->ready_rx_msg = NULL;
156 /* Commit clearing of ready_rx_msg for queuing work */
157 smp_mb();
158
159 strp_unpause(&psock->strp);
160 strp_check_rcv(&psock->strp);
161 }
162
163 /* Buffer limit is okay now, add to ready list */
164 list_add_tail(&kcm->wait_rx_list,
165 &kcm->mux->kcm_rx_waiters);
166 /* paired with lockless reads in kcm_rfree() */
167 WRITE_ONCE(kcm->rx_wait, true);
168 }
169
kcm_rfree(struct sk_buff * skb)170 static void kcm_rfree(struct sk_buff *skb)
171 {
172 struct sock *sk = skb->sk;
173 struct kcm_sock *kcm = kcm_sk(sk);
174 struct kcm_mux *mux = kcm->mux;
175 unsigned int len = skb->truesize;
176
177 sk_mem_uncharge(sk, len);
178 atomic_sub(len, &sk->sk_rmem_alloc);
179
180 /* For reading rx_wait and rx_psock without holding lock */
181 smp_mb__after_atomic();
182
183 if (!READ_ONCE(kcm->rx_wait) && !READ_ONCE(kcm->rx_psock) &&
184 sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) {
185 spin_lock_bh(&mux->rx_lock);
186 kcm_rcv_ready(kcm);
187 spin_unlock_bh(&mux->rx_lock);
188 }
189 }
190
kcm_queue_rcv_skb(struct sock * sk,struct sk_buff * skb)191 static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
192 {
193 struct sk_buff_head *list = &sk->sk_receive_queue;
194
195 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
196 return -ENOMEM;
197
198 if (!sk_rmem_schedule(sk, skb, skb->truesize))
199 return -ENOBUFS;
200
201 skb->dev = NULL;
202
203 skb_orphan(skb);
204 skb->sk = sk;
205 skb->destructor = kcm_rfree;
206 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
207 sk_mem_charge(sk, skb->truesize);
208
209 skb_queue_tail(list, skb);
210
211 if (!sock_flag(sk, SOCK_DEAD))
212 sk->sk_data_ready(sk);
213
214 return 0;
215 }
216
217 /* Requeue received messages for a kcm socket to other kcm sockets. This is
218 * called with a kcm socket is receive disabled.
219 * RX mux lock held.
220 */
requeue_rx_msgs(struct kcm_mux * mux,struct sk_buff_head * head)221 static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head)
222 {
223 struct sk_buff *skb;
224 struct kcm_sock *kcm;
225
226 while ((skb = skb_dequeue(head))) {
227 /* Reset destructor to avoid calling kcm_rcv_ready */
228 skb->destructor = sock_rfree;
229 skb_orphan(skb);
230 try_again:
231 if (list_empty(&mux->kcm_rx_waiters)) {
232 skb_queue_tail(&mux->rx_hold_queue, skb);
233 continue;
234 }
235
236 kcm = list_first_entry(&mux->kcm_rx_waiters,
237 struct kcm_sock, wait_rx_list);
238
239 if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
240 /* Should mean socket buffer full */
241 list_del(&kcm->wait_rx_list);
242 /* paired with lockless reads in kcm_rfree() */
243 WRITE_ONCE(kcm->rx_wait, false);
244
245 /* Commit rx_wait to read in kcm_free */
246 smp_wmb();
247
248 goto try_again;
249 }
250 }
251 }
252
253 /* Lower sock lock held */
reserve_rx_kcm(struct kcm_psock * psock,struct sk_buff * head)254 static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
255 struct sk_buff *head)
256 {
257 struct kcm_mux *mux = psock->mux;
258 struct kcm_sock *kcm;
259
260 WARN_ON(psock->ready_rx_msg);
261
262 if (psock->rx_kcm)
263 return psock->rx_kcm;
264
265 spin_lock_bh(&mux->rx_lock);
266
267 if (psock->rx_kcm) {
268 spin_unlock_bh(&mux->rx_lock);
269 return psock->rx_kcm;
270 }
271
272 kcm_update_rx_mux_stats(mux, psock);
273
274 if (list_empty(&mux->kcm_rx_waiters)) {
275 psock->ready_rx_msg = head;
276 strp_pause(&psock->strp);
277 list_add_tail(&psock->psock_ready_list,
278 &mux->psocks_ready);
279 spin_unlock_bh(&mux->rx_lock);
280 return NULL;
281 }
282
283 kcm = list_first_entry(&mux->kcm_rx_waiters,
284 struct kcm_sock, wait_rx_list);
285 list_del(&kcm->wait_rx_list);
286 /* paired with lockless reads in kcm_rfree() */
287 WRITE_ONCE(kcm->rx_wait, false);
288
289 psock->rx_kcm = kcm;
290 /* paired with lockless reads in kcm_rfree() */
291 WRITE_ONCE(kcm->rx_psock, psock);
292
293 spin_unlock_bh(&mux->rx_lock);
294
295 return kcm;
296 }
297
298 static void kcm_done(struct kcm_sock *kcm);
299
kcm_done_work(struct work_struct * w)300 static void kcm_done_work(struct work_struct *w)
301 {
302 kcm_done(container_of(w, struct kcm_sock, done_work));
303 }
304
305 /* Lower sock held */
unreserve_rx_kcm(struct kcm_psock * psock,bool rcv_ready)306 static void unreserve_rx_kcm(struct kcm_psock *psock,
307 bool rcv_ready)
308 {
309 struct kcm_sock *kcm = psock->rx_kcm;
310 struct kcm_mux *mux = psock->mux;
311
312 if (!kcm)
313 return;
314
315 spin_lock_bh(&mux->rx_lock);
316
317 psock->rx_kcm = NULL;
318 /* paired with lockless reads in kcm_rfree() */
319 WRITE_ONCE(kcm->rx_psock, NULL);
320
321 /* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with
322 * kcm_rfree
323 */
324 smp_mb();
325
326 if (unlikely(kcm->done)) {
327 spin_unlock_bh(&mux->rx_lock);
328
329 /* Need to run kcm_done in a task since we need to qcquire
330 * callback locks which may already be held here.
331 */
332 INIT_WORK(&kcm->done_work, kcm_done_work);
333 schedule_work(&kcm->done_work);
334 return;
335 }
336
337 if (unlikely(kcm->rx_disabled)) {
338 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
339 } else if (rcv_ready || unlikely(!sk_rmem_alloc_get(&kcm->sk))) {
340 /* Check for degenerative race with rx_wait that all
341 * data was dequeued (accounted for in kcm_rfree).
342 */
343 kcm_rcv_ready(kcm);
344 }
345 spin_unlock_bh(&mux->rx_lock);
346 }
347
348 /* Lower sock lock held */
psock_data_ready(struct sock * sk)349 static void psock_data_ready(struct sock *sk)
350 {
351 struct kcm_psock *psock;
352
353 trace_sk_data_ready(sk);
354
355 read_lock_bh(&sk->sk_callback_lock);
356
357 psock = (struct kcm_psock *)sk->sk_user_data;
358 if (likely(psock))
359 strp_data_ready(&psock->strp);
360
361 read_unlock_bh(&sk->sk_callback_lock);
362 }
363
364 /* Called with lower sock held */
kcm_rcv_strparser(struct strparser * strp,struct sk_buff * skb)365 static void kcm_rcv_strparser(struct strparser *strp, struct sk_buff *skb)
366 {
367 struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
368 struct kcm_sock *kcm;
369
370 try_queue:
371 kcm = reserve_rx_kcm(psock, skb);
372 if (!kcm) {
373 /* Unable to reserve a KCM, message is held in psock and strp
374 * is paused.
375 */
376 return;
377 }
378
379 if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
380 /* Should mean socket buffer full */
381 unreserve_rx_kcm(psock, false);
382 goto try_queue;
383 }
384 }
385
kcm_parse_func_strparser(struct strparser * strp,struct sk_buff * skb)386 static int kcm_parse_func_strparser(struct strparser *strp, struct sk_buff *skb)
387 {
388 struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
389 struct bpf_prog *prog = psock->bpf_prog;
390 int res;
391
392 res = bpf_prog_run_pin_on_cpu(prog, skb);
393 return res;
394 }
395
kcm_read_sock_done(struct strparser * strp,int err)396 static int kcm_read_sock_done(struct strparser *strp, int err)
397 {
398 struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
399
400 unreserve_rx_kcm(psock, true);
401
402 return err;
403 }
404
psock_state_change(struct sock * sk)405 static void psock_state_change(struct sock *sk)
406 {
407 /* TCP only does a EPOLLIN for a half close. Do a EPOLLHUP here
408 * since application will normally not poll with EPOLLIN
409 * on the TCP sockets.
410 */
411
412 report_csk_error(sk, EPIPE);
413 }
414
psock_write_space(struct sock * sk)415 static void psock_write_space(struct sock *sk)
416 {
417 struct kcm_psock *psock;
418 struct kcm_mux *mux;
419 struct kcm_sock *kcm;
420
421 read_lock_bh(&sk->sk_callback_lock);
422
423 psock = (struct kcm_psock *)sk->sk_user_data;
424 if (unlikely(!psock))
425 goto out;
426 mux = psock->mux;
427
428 spin_lock_bh(&mux->lock);
429
430 /* Check if the socket is reserved so someone is waiting for sending. */
431 kcm = psock->tx_kcm;
432 if (kcm && !unlikely(kcm->tx_stopped))
433 queue_work(kcm_wq, &kcm->tx_work);
434
435 spin_unlock_bh(&mux->lock);
436 out:
437 read_unlock_bh(&sk->sk_callback_lock);
438 }
439
440 static void unreserve_psock(struct kcm_sock *kcm);
441
442 /* kcm sock is locked. */
reserve_psock(struct kcm_sock * kcm)443 static struct kcm_psock *reserve_psock(struct kcm_sock *kcm)
444 {
445 struct kcm_mux *mux = kcm->mux;
446 struct kcm_psock *psock;
447
448 psock = kcm->tx_psock;
449
450 smp_rmb(); /* Must read tx_psock before tx_wait */
451
452 if (psock) {
453 WARN_ON(kcm->tx_wait);
454 if (unlikely(psock->tx_stopped))
455 unreserve_psock(kcm);
456 else
457 return kcm->tx_psock;
458 }
459
460 spin_lock_bh(&mux->lock);
461
462 /* Check again under lock to see if psock was reserved for this
463 * psock via psock_unreserve.
464 */
465 psock = kcm->tx_psock;
466 if (unlikely(psock)) {
467 WARN_ON(kcm->tx_wait);
468 spin_unlock_bh(&mux->lock);
469 return kcm->tx_psock;
470 }
471
472 if (!list_empty(&mux->psocks_avail)) {
473 psock = list_first_entry(&mux->psocks_avail,
474 struct kcm_psock,
475 psock_avail_list);
476 list_del(&psock->psock_avail_list);
477 if (kcm->tx_wait) {
478 list_del(&kcm->wait_psock_list);
479 kcm->tx_wait = false;
480 }
481 kcm->tx_psock = psock;
482 psock->tx_kcm = kcm;
483 KCM_STATS_INCR(psock->stats.reserved);
484 } else if (!kcm->tx_wait) {
485 list_add_tail(&kcm->wait_psock_list,
486 &mux->kcm_tx_waiters);
487 kcm->tx_wait = true;
488 }
489
490 spin_unlock_bh(&mux->lock);
491
492 return psock;
493 }
494
495 /* mux lock held */
psock_now_avail(struct kcm_psock * psock)496 static void psock_now_avail(struct kcm_psock *psock)
497 {
498 struct kcm_mux *mux = psock->mux;
499 struct kcm_sock *kcm;
500
501 if (list_empty(&mux->kcm_tx_waiters)) {
502 list_add_tail(&psock->psock_avail_list,
503 &mux->psocks_avail);
504 } else {
505 kcm = list_first_entry(&mux->kcm_tx_waiters,
506 struct kcm_sock,
507 wait_psock_list);
508 list_del(&kcm->wait_psock_list);
509 kcm->tx_wait = false;
510 psock->tx_kcm = kcm;
511
512 /* Commit before changing tx_psock since that is read in
513 * reserve_psock before queuing work.
514 */
515 smp_mb();
516
517 kcm->tx_psock = psock;
518 KCM_STATS_INCR(psock->stats.reserved);
519 queue_work(kcm_wq, &kcm->tx_work);
520 }
521 }
522
523 /* kcm sock is locked. */
unreserve_psock(struct kcm_sock * kcm)524 static void unreserve_psock(struct kcm_sock *kcm)
525 {
526 struct kcm_psock *psock;
527 struct kcm_mux *mux = kcm->mux;
528
529 spin_lock_bh(&mux->lock);
530
531 psock = kcm->tx_psock;
532
533 if (WARN_ON(!psock)) {
534 spin_unlock_bh(&mux->lock);
535 return;
536 }
537
538 smp_rmb(); /* Read tx_psock before tx_wait */
539
540 kcm_update_tx_mux_stats(mux, psock);
541
542 WARN_ON(kcm->tx_wait);
543
544 kcm->tx_psock = NULL;
545 psock->tx_kcm = NULL;
546 KCM_STATS_INCR(psock->stats.unreserved);
547
548 if (unlikely(psock->tx_stopped)) {
549 if (psock->done) {
550 /* Deferred free */
551 list_del(&psock->psock_list);
552 mux->psocks_cnt--;
553 sock_put(psock->sk);
554 fput(psock->sk->sk_socket->file);
555 kmem_cache_free(kcm_psockp, psock);
556 }
557
558 /* Don't put back on available list */
559
560 spin_unlock_bh(&mux->lock);
561
562 return;
563 }
564
565 psock_now_avail(psock);
566
567 spin_unlock_bh(&mux->lock);
568 }
569
kcm_report_tx_retry(struct kcm_sock * kcm)570 static void kcm_report_tx_retry(struct kcm_sock *kcm)
571 {
572 struct kcm_mux *mux = kcm->mux;
573
574 spin_lock_bh(&mux->lock);
575 KCM_STATS_INCR(mux->stats.tx_retries);
576 spin_unlock_bh(&mux->lock);
577 }
578
579 /* Write any messages ready on the kcm socket. Called with kcm sock lock
580 * held. Return bytes actually sent or error.
581 */
kcm_write_msgs(struct kcm_sock * kcm)582 static int kcm_write_msgs(struct kcm_sock *kcm)
583 {
584 unsigned int total_sent = 0;
585 struct sock *sk = &kcm->sk;
586 struct kcm_psock *psock;
587 struct sk_buff *head;
588 int ret = 0;
589
590 kcm->tx_wait_more = false;
591 psock = kcm->tx_psock;
592 if (unlikely(psock && psock->tx_stopped)) {
593 /* A reserved psock was aborted asynchronously. Unreserve
594 * it and we'll retry the message.
595 */
596 unreserve_psock(kcm);
597 kcm_report_tx_retry(kcm);
598 if (skb_queue_empty(&sk->sk_write_queue))
599 return 0;
600
601 kcm_tx_msg(skb_peek(&sk->sk_write_queue))->started_tx = false;
602 }
603
604 retry:
605 while ((head = skb_peek(&sk->sk_write_queue))) {
606 struct msghdr msg = {
607 .msg_flags = MSG_DONTWAIT | MSG_SPLICE_PAGES,
608 };
609 struct kcm_tx_msg *txm = kcm_tx_msg(head);
610 struct sk_buff *skb;
611 unsigned int msize;
612 int i;
613
614 if (!txm->started_tx) {
615 psock = reserve_psock(kcm);
616 if (!psock)
617 goto out;
618 skb = head;
619 txm->frag_offset = 0;
620 txm->sent = 0;
621 txm->started_tx = true;
622 } else {
623 if (WARN_ON(!psock)) {
624 ret = -EINVAL;
625 goto out;
626 }
627 skb = txm->frag_skb;
628 }
629
630 if (WARN_ON(!skb_shinfo(skb)->nr_frags)) {
631 ret = -EINVAL;
632 goto out;
633 }
634
635 msize = 0;
636 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
637 msize += skb_frag_size(&skb_shinfo(skb)->frags[i]);
638
639 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE,
640 skb_shinfo(skb)->frags, skb_shinfo(skb)->nr_frags,
641 msize);
642 iov_iter_advance(&msg.msg_iter, txm->frag_offset);
643
644 do {
645 ret = sock_sendmsg(psock->sk->sk_socket, &msg);
646 if (ret <= 0) {
647 if (ret == -EAGAIN) {
648 /* Save state to try again when there's
649 * write space on the socket
650 */
651 txm->frag_skb = skb;
652 ret = 0;
653 goto out;
654 }
655
656 /* Hard failure in sending message, abort this
657 * psock since it has lost framing
658 * synchronization and retry sending the
659 * message from the beginning.
660 */
661 kcm_abort_tx_psock(psock, ret ? -ret : EPIPE,
662 true);
663 unreserve_psock(kcm);
664 psock = NULL;
665
666 txm->started_tx = false;
667 kcm_report_tx_retry(kcm);
668 ret = 0;
669 goto retry;
670 }
671
672 txm->sent += ret;
673 txm->frag_offset += ret;
674 KCM_STATS_ADD(psock->stats.tx_bytes, ret);
675 } while (msg.msg_iter.count > 0);
676
677 if (skb == head) {
678 if (skb_has_frag_list(skb)) {
679 txm->frag_skb = skb_shinfo(skb)->frag_list;
680 txm->frag_offset = 0;
681 continue;
682 }
683 } else if (skb->next) {
684 txm->frag_skb = skb->next;
685 txm->frag_offset = 0;
686 continue;
687 }
688
689 /* Successfully sent the whole packet, account for it. */
690 sk->sk_wmem_queued -= txm->sent;
691 total_sent += txm->sent;
692 skb_dequeue(&sk->sk_write_queue);
693 kfree_skb(head);
694 KCM_STATS_INCR(psock->stats.tx_msgs);
695 }
696 out:
697 if (!head) {
698 /* Done with all queued messages. */
699 WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
700 if (psock)
701 unreserve_psock(kcm);
702 }
703
704 /* Check if write space is available */
705 sk->sk_write_space(sk);
706
707 return total_sent ? : ret;
708 }
709
kcm_tx_work(struct work_struct * w)710 static void kcm_tx_work(struct work_struct *w)
711 {
712 struct kcm_sock *kcm = container_of(w, struct kcm_sock, tx_work);
713 struct sock *sk = &kcm->sk;
714 int err;
715
716 lock_sock(sk);
717
718 /* Primarily for SOCK_DGRAM sockets, also handle asynchronous tx
719 * aborts
720 */
721 err = kcm_write_msgs(kcm);
722 if (err < 0) {
723 /* Hard failure in write, report error on KCM socket */
724 pr_warn("KCM: Hard failure on kcm_write_msgs %d\n", err);
725 report_csk_error(&kcm->sk, -err);
726 goto out;
727 }
728
729 /* Primarily for SOCK_SEQPACKET sockets */
730 if (likely(sk->sk_socket) &&
731 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
732 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
733 sk->sk_write_space(sk);
734 }
735
736 out:
737 release_sock(sk);
738 }
739
kcm_push(struct kcm_sock * kcm)740 static void kcm_push(struct kcm_sock *kcm)
741 {
742 if (kcm->tx_wait_more)
743 kcm_write_msgs(kcm);
744 }
745
kcm_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)746 static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
747 {
748 struct sock *sk = sock->sk;
749 struct kcm_sock *kcm = kcm_sk(sk);
750 struct sk_buff *skb = NULL, *head = NULL;
751 size_t copy, copied = 0;
752 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
753 int eor = (sock->type == SOCK_DGRAM) ?
754 !(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR);
755 int err = -EPIPE;
756
757 lock_sock(sk);
758
759 /* Per tcp_sendmsg this should be in poll */
760 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
761
762 if (sk->sk_err)
763 goto out_error;
764
765 if (kcm->seq_skb) {
766 /* Previously opened message */
767 head = kcm->seq_skb;
768 skb = kcm_tx_msg(head)->last_skb;
769 goto start;
770 }
771
772 /* Call the sk_stream functions to manage the sndbuf mem. */
773 if (!sk_stream_memory_free(sk)) {
774 kcm_push(kcm);
775 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
776 err = sk_stream_wait_memory(sk, &timeo);
777 if (err)
778 goto out_error;
779 }
780
781 if (msg_data_left(msg)) {
782 /* New message, alloc head skb */
783 head = alloc_skb(0, sk->sk_allocation);
784 while (!head) {
785 kcm_push(kcm);
786 err = sk_stream_wait_memory(sk, &timeo);
787 if (err)
788 goto out_error;
789
790 head = alloc_skb(0, sk->sk_allocation);
791 }
792
793 skb = head;
794
795 /* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
796 * csum_and_copy_from_iter from skb_do_copy_data_nocache.
797 */
798 skb->ip_summed = CHECKSUM_UNNECESSARY;
799 }
800
801 start:
802 while (msg_data_left(msg)) {
803 bool merge = true;
804 int i = skb_shinfo(skb)->nr_frags;
805 struct page_frag *pfrag = sk_page_frag(sk);
806
807 if (!sk_page_frag_refill(sk, pfrag))
808 goto wait_for_memory;
809
810 if (!skb_can_coalesce(skb, i, pfrag->page,
811 pfrag->offset)) {
812 if (i == MAX_SKB_FRAGS) {
813 struct sk_buff *tskb;
814
815 tskb = alloc_skb(0, sk->sk_allocation);
816 if (!tskb)
817 goto wait_for_memory;
818
819 if (head == skb)
820 skb_shinfo(head)->frag_list = tskb;
821 else
822 skb->next = tskb;
823
824 skb = tskb;
825 skb->ip_summed = CHECKSUM_UNNECESSARY;
826 continue;
827 }
828 merge = false;
829 }
830
831 if (msg->msg_flags & MSG_SPLICE_PAGES) {
832 copy = msg_data_left(msg);
833 if (!sk_wmem_schedule(sk, copy))
834 goto wait_for_memory;
835
836 err = skb_splice_from_iter(skb, &msg->msg_iter, copy,
837 sk->sk_allocation);
838 if (err < 0) {
839 if (err == -EMSGSIZE)
840 goto wait_for_memory;
841 goto out_error;
842 }
843
844 copy = err;
845 skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG;
846 sk_wmem_queued_add(sk, copy);
847 sk_mem_charge(sk, copy);
848
849 if (head != skb)
850 head->truesize += copy;
851 } else {
852 copy = min_t(int, msg_data_left(msg),
853 pfrag->size - pfrag->offset);
854 if (!sk_wmem_schedule(sk, copy))
855 goto wait_for_memory;
856
857 err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
858 pfrag->page,
859 pfrag->offset,
860 copy);
861 if (err)
862 goto out_error;
863
864 /* Update the skb. */
865 if (merge) {
866 skb_frag_size_add(
867 &skb_shinfo(skb)->frags[i - 1], copy);
868 } else {
869 skb_fill_page_desc(skb, i, pfrag->page,
870 pfrag->offset, copy);
871 get_page(pfrag->page);
872 }
873
874 pfrag->offset += copy;
875 }
876
877 copied += copy;
878 if (head != skb) {
879 head->len += copy;
880 head->data_len += copy;
881 }
882
883 continue;
884
885 wait_for_memory:
886 kcm_push(kcm);
887 err = sk_stream_wait_memory(sk, &timeo);
888 if (err)
889 goto out_error;
890 }
891
892 if (eor) {
893 bool not_busy = skb_queue_empty(&sk->sk_write_queue);
894
895 if (head) {
896 /* Message complete, queue it on send buffer */
897 __skb_queue_tail(&sk->sk_write_queue, head);
898 kcm->seq_skb = NULL;
899 KCM_STATS_INCR(kcm->stats.tx_msgs);
900 }
901
902 if (msg->msg_flags & MSG_BATCH) {
903 kcm->tx_wait_more = true;
904 } else if (kcm->tx_wait_more || not_busy) {
905 err = kcm_write_msgs(kcm);
906 if (err < 0) {
907 /* We got a hard error in write_msgs but have
908 * already queued this message. Report an error
909 * in the socket, but don't affect return value
910 * from sendmsg
911 */
912 pr_warn("KCM: Hard failure on kcm_write_msgs\n");
913 report_csk_error(&kcm->sk, -err);
914 }
915 }
916 } else {
917 /* Message not complete, save state */
918 partial_message:
919 if (head) {
920 kcm->seq_skb = head;
921 kcm_tx_msg(head)->last_skb = skb;
922 }
923 }
924
925 KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
926
927 release_sock(sk);
928 return copied;
929
930 out_error:
931 kcm_push(kcm);
932
933 if (sock->type == SOCK_SEQPACKET) {
934 /* Wrote some bytes before encountering an
935 * error, return partial success.
936 */
937 if (copied)
938 goto partial_message;
939 if (head != kcm->seq_skb)
940 kfree_skb(head);
941 } else {
942 kfree_skb(head);
943 kcm->seq_skb = NULL;
944 }
945
946 err = sk_stream_error(sk, msg->msg_flags, err);
947
948 /* make sure we wake any epoll edge trigger waiter */
949 if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
950 sk->sk_write_space(sk);
951
952 release_sock(sk);
953 return err;
954 }
955
kcm_splice_eof(struct socket * sock)956 static void kcm_splice_eof(struct socket *sock)
957 {
958 struct sock *sk = sock->sk;
959 struct kcm_sock *kcm = kcm_sk(sk);
960
961 if (skb_queue_empty_lockless(&sk->sk_write_queue))
962 return;
963
964 lock_sock(sk);
965 kcm_write_msgs(kcm);
966 release_sock(sk);
967 }
968
kcm_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)969 static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
970 size_t len, int flags)
971 {
972 struct sock *sk = sock->sk;
973 struct kcm_sock *kcm = kcm_sk(sk);
974 int err = 0;
975 struct strp_msg *stm;
976 int copied = 0;
977 struct sk_buff *skb;
978
979 skb = skb_recv_datagram(sk, flags, &err);
980 if (!skb)
981 goto out;
982
983 /* Okay, have a message on the receive queue */
984
985 stm = strp_msg(skb);
986
987 if (len > stm->full_len)
988 len = stm->full_len;
989
990 err = skb_copy_datagram_msg(skb, stm->offset, msg, len);
991 if (err < 0)
992 goto out;
993
994 copied = len;
995 if (likely(!(flags & MSG_PEEK))) {
996 KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
997 if (copied < stm->full_len) {
998 if (sock->type == SOCK_DGRAM) {
999 /* Truncated message */
1000 msg->msg_flags |= MSG_TRUNC;
1001 goto msg_finished;
1002 }
1003 stm->offset += copied;
1004 stm->full_len -= copied;
1005 } else {
1006 msg_finished:
1007 /* Finished with message */
1008 msg->msg_flags |= MSG_EOR;
1009 KCM_STATS_INCR(kcm->stats.rx_msgs);
1010 }
1011 }
1012
1013 out:
1014 skb_free_datagram(sk, skb);
1015 return copied ? : err;
1016 }
1017
kcm_splice_read(struct socket * sock,loff_t * ppos,struct pipe_inode_info * pipe,size_t len,unsigned int flags)1018 static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
1019 struct pipe_inode_info *pipe, size_t len,
1020 unsigned int flags)
1021 {
1022 struct sock *sk = sock->sk;
1023 struct kcm_sock *kcm = kcm_sk(sk);
1024 struct strp_msg *stm;
1025 int err = 0;
1026 ssize_t copied;
1027 struct sk_buff *skb;
1028
1029 /* Only support splice for SOCKSEQPACKET */
1030
1031 skb = skb_recv_datagram(sk, flags, &err);
1032 if (!skb)
1033 goto err_out;
1034
1035 /* Okay, have a message on the receive queue */
1036
1037 stm = strp_msg(skb);
1038
1039 if (len > stm->full_len)
1040 len = stm->full_len;
1041
1042 copied = skb_splice_bits(skb, sk, stm->offset, pipe, len, flags);
1043 if (copied < 0) {
1044 err = copied;
1045 goto err_out;
1046 }
1047
1048 KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1049
1050 stm->offset += copied;
1051 stm->full_len -= copied;
1052
1053 /* We have no way to return MSG_EOR. If all the bytes have been
1054 * read we still leave the message in the receive socket buffer.
1055 * A subsequent recvmsg needs to be done to return MSG_EOR and
1056 * finish reading the message.
1057 */
1058
1059 skb_free_datagram(sk, skb);
1060 return copied;
1061
1062 err_out:
1063 skb_free_datagram(sk, skb);
1064 return err;
1065 }
1066
1067 /* kcm sock lock held */
kcm_recv_disable(struct kcm_sock * kcm)1068 static void kcm_recv_disable(struct kcm_sock *kcm)
1069 {
1070 struct kcm_mux *mux = kcm->mux;
1071
1072 if (kcm->rx_disabled)
1073 return;
1074
1075 spin_lock_bh(&mux->rx_lock);
1076
1077 kcm->rx_disabled = 1;
1078
1079 /* If a psock is reserved we'll do cleanup in unreserve */
1080 if (!kcm->rx_psock) {
1081 if (kcm->rx_wait) {
1082 list_del(&kcm->wait_rx_list);
1083 /* paired with lockless reads in kcm_rfree() */
1084 WRITE_ONCE(kcm->rx_wait, false);
1085 }
1086
1087 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
1088 }
1089
1090 spin_unlock_bh(&mux->rx_lock);
1091 }
1092
1093 /* kcm sock lock held */
kcm_recv_enable(struct kcm_sock * kcm)1094 static void kcm_recv_enable(struct kcm_sock *kcm)
1095 {
1096 struct kcm_mux *mux = kcm->mux;
1097
1098 if (!kcm->rx_disabled)
1099 return;
1100
1101 spin_lock_bh(&mux->rx_lock);
1102
1103 kcm->rx_disabled = 0;
1104 kcm_rcv_ready(kcm);
1105
1106 spin_unlock_bh(&mux->rx_lock);
1107 }
1108
kcm_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int optlen)1109 static int kcm_setsockopt(struct socket *sock, int level, int optname,
1110 sockptr_t optval, unsigned int optlen)
1111 {
1112 struct kcm_sock *kcm = kcm_sk(sock->sk);
1113 int val, valbool;
1114 int err = 0;
1115
1116 if (level != SOL_KCM)
1117 return -ENOPROTOOPT;
1118
1119 if (optlen < sizeof(int))
1120 return -EINVAL;
1121
1122 if (copy_from_sockptr(&val, optval, sizeof(int)))
1123 return -EFAULT;
1124
1125 valbool = val ? 1 : 0;
1126
1127 switch (optname) {
1128 case KCM_RECV_DISABLE:
1129 lock_sock(&kcm->sk);
1130 if (valbool)
1131 kcm_recv_disable(kcm);
1132 else
1133 kcm_recv_enable(kcm);
1134 release_sock(&kcm->sk);
1135 break;
1136 default:
1137 err = -ENOPROTOOPT;
1138 }
1139
1140 return err;
1141 }
1142
kcm_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)1143 static int kcm_getsockopt(struct socket *sock, int level, int optname,
1144 char __user *optval, int __user *optlen)
1145 {
1146 struct kcm_sock *kcm = kcm_sk(sock->sk);
1147 int val, len;
1148
1149 if (level != SOL_KCM)
1150 return -ENOPROTOOPT;
1151
1152 if (get_user(len, optlen))
1153 return -EFAULT;
1154
1155 if (len < 0)
1156 return -EINVAL;
1157
1158 len = min_t(unsigned int, len, sizeof(int));
1159
1160 switch (optname) {
1161 case KCM_RECV_DISABLE:
1162 val = kcm->rx_disabled;
1163 break;
1164 default:
1165 return -ENOPROTOOPT;
1166 }
1167
1168 if (put_user(len, optlen))
1169 return -EFAULT;
1170 if (copy_to_user(optval, &val, len))
1171 return -EFAULT;
1172 return 0;
1173 }
1174
init_kcm_sock(struct kcm_sock * kcm,struct kcm_mux * mux)1175 static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
1176 {
1177 struct kcm_sock *tkcm;
1178 struct list_head *head;
1179 int index = 0;
1180
1181 /* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
1182 * we set sk_state, otherwise epoll_wait always returns right away with
1183 * EPOLLHUP
1184 */
1185 kcm->sk.sk_state = TCP_ESTABLISHED;
1186
1187 /* Add to mux's kcm sockets list */
1188 kcm->mux = mux;
1189 spin_lock_bh(&mux->lock);
1190
1191 head = &mux->kcm_socks;
1192 list_for_each_entry(tkcm, &mux->kcm_socks, kcm_sock_list) {
1193 if (tkcm->index != index)
1194 break;
1195 head = &tkcm->kcm_sock_list;
1196 index++;
1197 }
1198
1199 list_add(&kcm->kcm_sock_list, head);
1200 kcm->index = index;
1201
1202 mux->kcm_socks_cnt++;
1203 spin_unlock_bh(&mux->lock);
1204
1205 INIT_WORK(&kcm->tx_work, kcm_tx_work);
1206
1207 spin_lock_bh(&mux->rx_lock);
1208 kcm_rcv_ready(kcm);
1209 spin_unlock_bh(&mux->rx_lock);
1210 }
1211
kcm_attach(struct socket * sock,struct socket * csock,struct bpf_prog * prog)1212 static int kcm_attach(struct socket *sock, struct socket *csock,
1213 struct bpf_prog *prog)
1214 {
1215 struct kcm_sock *kcm = kcm_sk(sock->sk);
1216 struct kcm_mux *mux = kcm->mux;
1217 struct sock *csk;
1218 struct kcm_psock *psock = NULL, *tpsock;
1219 struct list_head *head;
1220 int index = 0;
1221 static const struct strp_callbacks cb = {
1222 .rcv_msg = kcm_rcv_strparser,
1223 .parse_msg = kcm_parse_func_strparser,
1224 .read_sock_done = kcm_read_sock_done,
1225 };
1226 int err = 0;
1227
1228 csk = csock->sk;
1229 if (!csk)
1230 return -EINVAL;
1231
1232 lock_sock(csk);
1233
1234 /* Only allow TCP sockets to be attached for now */
1235 if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
1236 csk->sk_protocol != IPPROTO_TCP) {
1237 err = -EOPNOTSUPP;
1238 goto out;
1239 }
1240
1241 /* Don't allow listeners or closed sockets */
1242 if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) {
1243 err = -EOPNOTSUPP;
1244 goto out;
1245 }
1246
1247 psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
1248 if (!psock) {
1249 err = -ENOMEM;
1250 goto out;
1251 }
1252
1253 psock->mux = mux;
1254 psock->sk = csk;
1255 psock->bpf_prog = prog;
1256
1257 write_lock_bh(&csk->sk_callback_lock);
1258
1259 /* Check if sk_user_data is already by KCM or someone else.
1260 * Must be done under lock to prevent race conditions.
1261 */
1262 if (csk->sk_user_data) {
1263 write_unlock_bh(&csk->sk_callback_lock);
1264 kmem_cache_free(kcm_psockp, psock);
1265 err = -EALREADY;
1266 goto out;
1267 }
1268
1269 err = strp_init(&psock->strp, csk, &cb);
1270 if (err) {
1271 write_unlock_bh(&csk->sk_callback_lock);
1272 kmem_cache_free(kcm_psockp, psock);
1273 goto out;
1274 }
1275
1276 psock->save_data_ready = csk->sk_data_ready;
1277 psock->save_write_space = csk->sk_write_space;
1278 psock->save_state_change = csk->sk_state_change;
1279 csk->sk_user_data = psock;
1280 csk->sk_data_ready = psock_data_ready;
1281 csk->sk_write_space = psock_write_space;
1282 csk->sk_state_change = psock_state_change;
1283
1284 write_unlock_bh(&csk->sk_callback_lock);
1285
1286 sock_hold(csk);
1287
1288 /* Finished initialization, now add the psock to the MUX. */
1289 spin_lock_bh(&mux->lock);
1290 head = &mux->psocks;
1291 list_for_each_entry(tpsock, &mux->psocks, psock_list) {
1292 if (tpsock->index != index)
1293 break;
1294 head = &tpsock->psock_list;
1295 index++;
1296 }
1297
1298 list_add(&psock->psock_list, head);
1299 psock->index = index;
1300
1301 KCM_STATS_INCR(mux->stats.psock_attach);
1302 mux->psocks_cnt++;
1303 psock_now_avail(psock);
1304 spin_unlock_bh(&mux->lock);
1305
1306 /* Schedule RX work in case there are already bytes queued */
1307 strp_check_rcv(&psock->strp);
1308
1309 out:
1310 release_sock(csk);
1311
1312 return err;
1313 }
1314
kcm_attach_ioctl(struct socket * sock,struct kcm_attach * info)1315 static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
1316 {
1317 struct socket *csock;
1318 struct bpf_prog *prog;
1319 int err;
1320
1321 csock = sockfd_lookup(info->fd, &err);
1322 if (!csock)
1323 return -ENOENT;
1324
1325 prog = bpf_prog_get_type(info->bpf_fd, BPF_PROG_TYPE_SOCKET_FILTER);
1326 if (IS_ERR(prog)) {
1327 err = PTR_ERR(prog);
1328 goto out;
1329 }
1330
1331 err = kcm_attach(sock, csock, prog);
1332 if (err) {
1333 bpf_prog_put(prog);
1334 goto out;
1335 }
1336
1337 /* Keep reference on file also */
1338
1339 return 0;
1340 out:
1341 sockfd_put(csock);
1342 return err;
1343 }
1344
kcm_unattach(struct kcm_psock * psock)1345 static void kcm_unattach(struct kcm_psock *psock)
1346 {
1347 struct sock *csk = psock->sk;
1348 struct kcm_mux *mux = psock->mux;
1349
1350 lock_sock(csk);
1351
1352 /* Stop getting callbacks from TCP socket. After this there should
1353 * be no way to reserve a kcm for this psock.
1354 */
1355 write_lock_bh(&csk->sk_callback_lock);
1356 csk->sk_user_data = NULL;
1357 csk->sk_data_ready = psock->save_data_ready;
1358 csk->sk_write_space = psock->save_write_space;
1359 csk->sk_state_change = psock->save_state_change;
1360 strp_stop(&psock->strp);
1361
1362 if (WARN_ON(psock->rx_kcm)) {
1363 write_unlock_bh(&csk->sk_callback_lock);
1364 release_sock(csk);
1365 return;
1366 }
1367
1368 spin_lock_bh(&mux->rx_lock);
1369
1370 /* Stop receiver activities. After this point psock should not be
1371 * able to get onto ready list either through callbacks or work.
1372 */
1373 if (psock->ready_rx_msg) {
1374 list_del(&psock->psock_ready_list);
1375 kfree_skb(psock->ready_rx_msg);
1376 psock->ready_rx_msg = NULL;
1377 KCM_STATS_INCR(mux->stats.rx_ready_drops);
1378 }
1379
1380 spin_unlock_bh(&mux->rx_lock);
1381
1382 write_unlock_bh(&csk->sk_callback_lock);
1383
1384 /* Call strp_done without sock lock */
1385 release_sock(csk);
1386 strp_done(&psock->strp);
1387 lock_sock(csk);
1388
1389 bpf_prog_put(psock->bpf_prog);
1390
1391 spin_lock_bh(&mux->lock);
1392
1393 aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats);
1394 save_strp_stats(&psock->strp, &mux->aggregate_strp_stats);
1395
1396 KCM_STATS_INCR(mux->stats.psock_unattach);
1397
1398 if (psock->tx_kcm) {
1399 /* psock was reserved. Just mark it finished and we will clean
1400 * up in the kcm paths, we need kcm lock which can not be
1401 * acquired here.
1402 */
1403 KCM_STATS_INCR(mux->stats.psock_unattach_rsvd);
1404 spin_unlock_bh(&mux->lock);
1405
1406 /* We are unattaching a socket that is reserved. Abort the
1407 * socket since we may be out of sync in sending on it. We need
1408 * to do this without the mux lock.
1409 */
1410 kcm_abort_tx_psock(psock, EPIPE, false);
1411
1412 spin_lock_bh(&mux->lock);
1413 if (!psock->tx_kcm) {
1414 /* psock now unreserved in window mux was unlocked */
1415 goto no_reserved;
1416 }
1417 psock->done = 1;
1418
1419 /* Commit done before queuing work to process it */
1420 smp_mb();
1421
1422 /* Queue tx work to make sure psock->done is handled */
1423 queue_work(kcm_wq, &psock->tx_kcm->tx_work);
1424 spin_unlock_bh(&mux->lock);
1425 } else {
1426 no_reserved:
1427 if (!psock->tx_stopped)
1428 list_del(&psock->psock_avail_list);
1429 list_del(&psock->psock_list);
1430 mux->psocks_cnt--;
1431 spin_unlock_bh(&mux->lock);
1432
1433 sock_put(csk);
1434 fput(csk->sk_socket->file);
1435 kmem_cache_free(kcm_psockp, psock);
1436 }
1437
1438 release_sock(csk);
1439 }
1440
kcm_unattach_ioctl(struct socket * sock,struct kcm_unattach * info)1441 static int kcm_unattach_ioctl(struct socket *sock, struct kcm_unattach *info)
1442 {
1443 struct kcm_sock *kcm = kcm_sk(sock->sk);
1444 struct kcm_mux *mux = kcm->mux;
1445 struct kcm_psock *psock;
1446 struct socket *csock;
1447 struct sock *csk;
1448 int err;
1449
1450 csock = sockfd_lookup(info->fd, &err);
1451 if (!csock)
1452 return -ENOENT;
1453
1454 csk = csock->sk;
1455 if (!csk) {
1456 err = -EINVAL;
1457 goto out;
1458 }
1459
1460 err = -ENOENT;
1461
1462 spin_lock_bh(&mux->lock);
1463
1464 list_for_each_entry(psock, &mux->psocks, psock_list) {
1465 if (psock->sk != csk)
1466 continue;
1467
1468 /* Found the matching psock */
1469
1470 if (psock->unattaching || WARN_ON(psock->done)) {
1471 err = -EALREADY;
1472 break;
1473 }
1474
1475 psock->unattaching = 1;
1476
1477 spin_unlock_bh(&mux->lock);
1478
1479 /* Lower socket lock should already be held */
1480 kcm_unattach(psock);
1481
1482 err = 0;
1483 goto out;
1484 }
1485
1486 spin_unlock_bh(&mux->lock);
1487
1488 out:
1489 sockfd_put(csock);
1490 return err;
1491 }
1492
1493 static struct proto kcm_proto = {
1494 .name = "KCM",
1495 .owner = THIS_MODULE,
1496 .obj_size = sizeof(struct kcm_sock),
1497 };
1498
1499 /* Clone a kcm socket. */
kcm_clone(struct socket * osock)1500 static struct file *kcm_clone(struct socket *osock)
1501 {
1502 struct socket *newsock;
1503 struct sock *newsk;
1504
1505 newsock = sock_alloc();
1506 if (!newsock)
1507 return ERR_PTR(-ENFILE);
1508
1509 newsock->type = osock->type;
1510 newsock->ops = osock->ops;
1511
1512 __module_get(newsock->ops->owner);
1513
1514 newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
1515 &kcm_proto, false);
1516 if (!newsk) {
1517 sock_release(newsock);
1518 return ERR_PTR(-ENOMEM);
1519 }
1520 sock_init_data(newsock, newsk);
1521 init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
1522
1523 return sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
1524 }
1525
kcm_ioctl(struct socket * sock,unsigned int cmd,unsigned long arg)1526 static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1527 {
1528 int err;
1529
1530 switch (cmd) {
1531 case SIOCKCMATTACH: {
1532 struct kcm_attach info;
1533
1534 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1535 return -EFAULT;
1536
1537 err = kcm_attach_ioctl(sock, &info);
1538
1539 break;
1540 }
1541 case SIOCKCMUNATTACH: {
1542 struct kcm_unattach info;
1543
1544 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1545 return -EFAULT;
1546
1547 err = kcm_unattach_ioctl(sock, &info);
1548
1549 break;
1550 }
1551 case SIOCKCMCLONE: {
1552 struct kcm_clone info;
1553 struct file *file;
1554
1555 info.fd = get_unused_fd_flags(0);
1556 if (unlikely(info.fd < 0))
1557 return info.fd;
1558
1559 file = kcm_clone(sock);
1560 if (IS_ERR(file)) {
1561 put_unused_fd(info.fd);
1562 return PTR_ERR(file);
1563 }
1564 if (copy_to_user((void __user *)arg, &info,
1565 sizeof(info))) {
1566 put_unused_fd(info.fd);
1567 fput(file);
1568 return -EFAULT;
1569 }
1570 fd_install(info.fd, file);
1571 err = 0;
1572 break;
1573 }
1574 default:
1575 err = -ENOIOCTLCMD;
1576 break;
1577 }
1578
1579 return err;
1580 }
1581
free_mux(struct rcu_head * rcu)1582 static void free_mux(struct rcu_head *rcu)
1583 {
1584 struct kcm_mux *mux = container_of(rcu,
1585 struct kcm_mux, rcu);
1586
1587 kmem_cache_free(kcm_muxp, mux);
1588 }
1589
release_mux(struct kcm_mux * mux)1590 static void release_mux(struct kcm_mux *mux)
1591 {
1592 struct kcm_net *knet = mux->knet;
1593 struct kcm_psock *psock, *tmp_psock;
1594
1595 /* Release psocks */
1596 list_for_each_entry_safe(psock, tmp_psock,
1597 &mux->psocks, psock_list) {
1598 if (!WARN_ON(psock->unattaching))
1599 kcm_unattach(psock);
1600 }
1601
1602 if (WARN_ON(mux->psocks_cnt))
1603 return;
1604
1605 __skb_queue_purge(&mux->rx_hold_queue);
1606
1607 mutex_lock(&knet->mutex);
1608 aggregate_mux_stats(&mux->stats, &knet->aggregate_mux_stats);
1609 aggregate_psock_stats(&mux->aggregate_psock_stats,
1610 &knet->aggregate_psock_stats);
1611 aggregate_strp_stats(&mux->aggregate_strp_stats,
1612 &knet->aggregate_strp_stats);
1613 list_del_rcu(&mux->kcm_mux_list);
1614 knet->count--;
1615 mutex_unlock(&knet->mutex);
1616
1617 call_rcu(&mux->rcu, free_mux);
1618 }
1619
kcm_done(struct kcm_sock * kcm)1620 static void kcm_done(struct kcm_sock *kcm)
1621 {
1622 struct kcm_mux *mux = kcm->mux;
1623 struct sock *sk = &kcm->sk;
1624 int socks_cnt;
1625
1626 spin_lock_bh(&mux->rx_lock);
1627 if (kcm->rx_psock) {
1628 /* Cleanup in unreserve_rx_kcm */
1629 WARN_ON(kcm->done);
1630 kcm->rx_disabled = 1;
1631 kcm->done = 1;
1632 spin_unlock_bh(&mux->rx_lock);
1633 return;
1634 }
1635
1636 if (kcm->rx_wait) {
1637 list_del(&kcm->wait_rx_list);
1638 /* paired with lockless reads in kcm_rfree() */
1639 WRITE_ONCE(kcm->rx_wait, false);
1640 }
1641 /* Move any pending receive messages to other kcm sockets */
1642 requeue_rx_msgs(mux, &sk->sk_receive_queue);
1643
1644 spin_unlock_bh(&mux->rx_lock);
1645
1646 if (WARN_ON(sk_rmem_alloc_get(sk)))
1647 return;
1648
1649 /* Detach from MUX */
1650 spin_lock_bh(&mux->lock);
1651
1652 list_del(&kcm->kcm_sock_list);
1653 mux->kcm_socks_cnt--;
1654 socks_cnt = mux->kcm_socks_cnt;
1655
1656 spin_unlock_bh(&mux->lock);
1657
1658 if (!socks_cnt) {
1659 /* We are done with the mux now. */
1660 release_mux(mux);
1661 }
1662
1663 WARN_ON(kcm->rx_wait);
1664
1665 sock_put(&kcm->sk);
1666 }
1667
1668 /* Called by kcm_release to close a KCM socket.
1669 * If this is the last KCM socket on the MUX, destroy the MUX.
1670 */
kcm_release(struct socket * sock)1671 static int kcm_release(struct socket *sock)
1672 {
1673 struct sock *sk = sock->sk;
1674 struct kcm_sock *kcm;
1675 struct kcm_mux *mux;
1676 struct kcm_psock *psock;
1677
1678 if (!sk)
1679 return 0;
1680
1681 kcm = kcm_sk(sk);
1682 mux = kcm->mux;
1683
1684 lock_sock(sk);
1685 sock_orphan(sk);
1686 kfree_skb(kcm->seq_skb);
1687
1688 /* Purge queue under lock to avoid race condition with tx_work trying
1689 * to act when queue is nonempty. If tx_work runs after this point
1690 * it will just return.
1691 */
1692 __skb_queue_purge(&sk->sk_write_queue);
1693
1694 /* Set tx_stopped. This is checked when psock is bound to a kcm and we
1695 * get a writespace callback. This prevents further work being queued
1696 * from the callback (unbinding the psock occurs after canceling work.
1697 */
1698 kcm->tx_stopped = 1;
1699
1700 release_sock(sk);
1701
1702 spin_lock_bh(&mux->lock);
1703 if (kcm->tx_wait) {
1704 /* Take of tx_wait list, after this point there should be no way
1705 * that a psock will be assigned to this kcm.
1706 */
1707 list_del(&kcm->wait_psock_list);
1708 kcm->tx_wait = false;
1709 }
1710 spin_unlock_bh(&mux->lock);
1711
1712 /* Cancel work. After this point there should be no outside references
1713 * to the kcm socket.
1714 */
1715 cancel_work_sync(&kcm->tx_work);
1716
1717 lock_sock(sk);
1718 psock = kcm->tx_psock;
1719 if (psock) {
1720 /* A psock was reserved, so we need to kill it since it
1721 * may already have some bytes queued from a message. We
1722 * need to do this after removing kcm from tx_wait list.
1723 */
1724 kcm_abort_tx_psock(psock, EPIPE, false);
1725 unreserve_psock(kcm);
1726 }
1727 release_sock(sk);
1728
1729 WARN_ON(kcm->tx_wait);
1730 WARN_ON(kcm->tx_psock);
1731
1732 sock->sk = NULL;
1733
1734 kcm_done(kcm);
1735
1736 return 0;
1737 }
1738
1739 static const struct proto_ops kcm_dgram_ops = {
1740 .family = PF_KCM,
1741 .owner = THIS_MODULE,
1742 .release = kcm_release,
1743 .bind = sock_no_bind,
1744 .connect = sock_no_connect,
1745 .socketpair = sock_no_socketpair,
1746 .accept = sock_no_accept,
1747 .getname = sock_no_getname,
1748 .poll = datagram_poll,
1749 .ioctl = kcm_ioctl,
1750 .listen = sock_no_listen,
1751 .shutdown = sock_no_shutdown,
1752 .setsockopt = kcm_setsockopt,
1753 .getsockopt = kcm_getsockopt,
1754 .sendmsg = kcm_sendmsg,
1755 .recvmsg = kcm_recvmsg,
1756 .mmap = sock_no_mmap,
1757 .splice_eof = kcm_splice_eof,
1758 };
1759
1760 static const struct proto_ops kcm_seqpacket_ops = {
1761 .family = PF_KCM,
1762 .owner = THIS_MODULE,
1763 .release = kcm_release,
1764 .bind = sock_no_bind,
1765 .connect = sock_no_connect,
1766 .socketpair = sock_no_socketpair,
1767 .accept = sock_no_accept,
1768 .getname = sock_no_getname,
1769 .poll = datagram_poll,
1770 .ioctl = kcm_ioctl,
1771 .listen = sock_no_listen,
1772 .shutdown = sock_no_shutdown,
1773 .setsockopt = kcm_setsockopt,
1774 .getsockopt = kcm_getsockopt,
1775 .sendmsg = kcm_sendmsg,
1776 .recvmsg = kcm_recvmsg,
1777 .mmap = sock_no_mmap,
1778 .splice_eof = kcm_splice_eof,
1779 .splice_read = kcm_splice_read,
1780 };
1781
1782 /* Create proto operation for kcm sockets */
kcm_create(struct net * net,struct socket * sock,int protocol,int kern)1783 static int kcm_create(struct net *net, struct socket *sock,
1784 int protocol, int kern)
1785 {
1786 struct kcm_net *knet = net_generic(net, kcm_net_id);
1787 struct sock *sk;
1788 struct kcm_mux *mux;
1789
1790 switch (sock->type) {
1791 case SOCK_DGRAM:
1792 sock->ops = &kcm_dgram_ops;
1793 break;
1794 case SOCK_SEQPACKET:
1795 sock->ops = &kcm_seqpacket_ops;
1796 break;
1797 default:
1798 return -ESOCKTNOSUPPORT;
1799 }
1800
1801 if (protocol != KCMPROTO_CONNECTED)
1802 return -EPROTONOSUPPORT;
1803
1804 sk = sk_alloc(net, PF_KCM, GFP_KERNEL, &kcm_proto, kern);
1805 if (!sk)
1806 return -ENOMEM;
1807
1808 /* Allocate a kcm mux, shared between KCM sockets */
1809 mux = kmem_cache_zalloc(kcm_muxp, GFP_KERNEL);
1810 if (!mux) {
1811 sk_free(sk);
1812 return -ENOMEM;
1813 }
1814
1815 spin_lock_init(&mux->lock);
1816 spin_lock_init(&mux->rx_lock);
1817 INIT_LIST_HEAD(&mux->kcm_socks);
1818 INIT_LIST_HEAD(&mux->kcm_rx_waiters);
1819 INIT_LIST_HEAD(&mux->kcm_tx_waiters);
1820
1821 INIT_LIST_HEAD(&mux->psocks);
1822 INIT_LIST_HEAD(&mux->psocks_ready);
1823 INIT_LIST_HEAD(&mux->psocks_avail);
1824
1825 mux->knet = knet;
1826
1827 /* Add new MUX to list */
1828 mutex_lock(&knet->mutex);
1829 list_add_rcu(&mux->kcm_mux_list, &knet->mux_list);
1830 knet->count++;
1831 mutex_unlock(&knet->mutex);
1832
1833 skb_queue_head_init(&mux->rx_hold_queue);
1834
1835 /* Init KCM socket */
1836 sock_init_data(sock, sk);
1837 init_kcm_sock(kcm_sk(sk), mux);
1838
1839 return 0;
1840 }
1841
1842 static const struct net_proto_family kcm_family_ops = {
1843 .family = PF_KCM,
1844 .create = kcm_create,
1845 .owner = THIS_MODULE,
1846 };
1847
kcm_init_net(struct net * net)1848 static __net_init int kcm_init_net(struct net *net)
1849 {
1850 struct kcm_net *knet = net_generic(net, kcm_net_id);
1851
1852 INIT_LIST_HEAD_RCU(&knet->mux_list);
1853 mutex_init(&knet->mutex);
1854
1855 return 0;
1856 }
1857
kcm_exit_net(struct net * net)1858 static __net_exit void kcm_exit_net(struct net *net)
1859 {
1860 struct kcm_net *knet = net_generic(net, kcm_net_id);
1861
1862 /* All KCM sockets should be closed at this point, which should mean
1863 * that all multiplexors and psocks have been destroyed.
1864 */
1865 WARN_ON(!list_empty(&knet->mux_list));
1866
1867 mutex_destroy(&knet->mutex);
1868 }
1869
1870 static struct pernet_operations kcm_net_ops = {
1871 .init = kcm_init_net,
1872 .exit = kcm_exit_net,
1873 .id = &kcm_net_id,
1874 .size = sizeof(struct kcm_net),
1875 };
1876
kcm_init(void)1877 static int __init kcm_init(void)
1878 {
1879 int err = -ENOMEM;
1880
1881 kcm_muxp = kmem_cache_create("kcm_mux_cache",
1882 sizeof(struct kcm_mux), 0,
1883 SLAB_HWCACHE_ALIGN, NULL);
1884 if (!kcm_muxp)
1885 goto fail;
1886
1887 kcm_psockp = kmem_cache_create("kcm_psock_cache",
1888 sizeof(struct kcm_psock), 0,
1889 SLAB_HWCACHE_ALIGN, NULL);
1890 if (!kcm_psockp)
1891 goto fail;
1892
1893 kcm_wq = create_singlethread_workqueue("kkcmd");
1894 if (!kcm_wq)
1895 goto fail;
1896
1897 err = proto_register(&kcm_proto, 1);
1898 if (err)
1899 goto fail;
1900
1901 err = register_pernet_device(&kcm_net_ops);
1902 if (err)
1903 goto net_ops_fail;
1904
1905 err = sock_register(&kcm_family_ops);
1906 if (err)
1907 goto sock_register_fail;
1908
1909 err = kcm_proc_init();
1910 if (err)
1911 goto proc_init_fail;
1912
1913 return 0;
1914
1915 proc_init_fail:
1916 sock_unregister(PF_KCM);
1917
1918 sock_register_fail:
1919 unregister_pernet_device(&kcm_net_ops);
1920
1921 net_ops_fail:
1922 proto_unregister(&kcm_proto);
1923
1924 fail:
1925 kmem_cache_destroy(kcm_muxp);
1926 kmem_cache_destroy(kcm_psockp);
1927
1928 if (kcm_wq)
1929 destroy_workqueue(kcm_wq);
1930
1931 return err;
1932 }
1933
kcm_exit(void)1934 static void __exit kcm_exit(void)
1935 {
1936 kcm_proc_exit();
1937 sock_unregister(PF_KCM);
1938 unregister_pernet_device(&kcm_net_ops);
1939 proto_unregister(&kcm_proto);
1940 destroy_workqueue(kcm_wq);
1941
1942 kmem_cache_destroy(kcm_muxp);
1943 kmem_cache_destroy(kcm_psockp);
1944 }
1945
1946 module_init(kcm_init);
1947 module_exit(kcm_exit);
1948
1949 MODULE_LICENSE("GPL");
1950 MODULE_ALIAS_NETPROTO(PF_KCM);
1951