xref: /openbmc/linux/net/mptcp/pm.c (revision 26721b02)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Multipath TCP
3  *
4  * Copyright (c) 2019, Intel Corporation.
5  */
6 #define pr_fmt(fmt) "MPTCP: " fmt
7 
8 #include <linux/kernel.h>
9 #include <net/tcp.h>
10 #include <net/mptcp.h>
11 #include "protocol.h"
12 
13 /* path manager command handlers */
14 
15 int mptcp_pm_announce_addr(struct mptcp_sock *msk,
16 			   const struct mptcp_addr_info *addr)
17 {
18 	pr_debug("msk=%p, local_id=%d", msk, addr->id);
19 
20 	msk->pm.local = *addr;
21 	WRITE_ONCE(msk->pm.addr_signal, true);
22 	return 0;
23 }
24 
25 int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id)
26 {
27 	return -ENOTSUPP;
28 }
29 
30 int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 remote_id)
31 {
32 	return -ENOTSUPP;
33 }
34 
35 /* path manager event handlers */
36 
37 void mptcp_pm_new_connection(struct mptcp_sock *msk, int server_side)
38 {
39 	struct mptcp_pm_data *pm = &msk->pm;
40 
41 	pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side);
42 
43 	WRITE_ONCE(pm->server_side, server_side);
44 }
45 
46 bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
47 {
48 	struct mptcp_pm_data *pm = &msk->pm;
49 	int ret;
50 
51 	pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows,
52 		 pm->subflows_max, READ_ONCE(pm->accept_subflow));
53 
54 	/* try to avoid acquiring the lock below */
55 	if (!READ_ONCE(pm->accept_subflow))
56 		return false;
57 
58 	spin_lock_bh(&pm->lock);
59 	ret = pm->subflows < pm->subflows_max;
60 	if (ret && ++pm->subflows == pm->subflows_max)
61 		WRITE_ONCE(pm->accept_subflow, false);
62 	spin_unlock_bh(&pm->lock);
63 
64 	return ret;
65 }
66 
67 /* return true if the new status bit is currently cleared, that is, this event
68  * can be server, eventually by an already scheduled work
69  */
70 static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
71 				   enum mptcp_pm_status new_status)
72 {
73 	pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status,
74 		 BIT(new_status));
75 	if (msk->pm.status & BIT(new_status))
76 		return false;
77 
78 	msk->pm.status |= BIT(new_status);
79 	if (schedule_work(&msk->work))
80 		sock_hold((struct sock *)msk);
81 	return true;
82 }
83 
84 void mptcp_pm_fully_established(struct mptcp_sock *msk)
85 {
86 	struct mptcp_pm_data *pm = &msk->pm;
87 
88 	pr_debug("msk=%p", msk);
89 
90 	/* try to avoid acquiring the lock below */
91 	if (!READ_ONCE(pm->work_pending))
92 		return;
93 
94 	spin_lock_bh(&pm->lock);
95 
96 	if (READ_ONCE(pm->work_pending))
97 		mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED);
98 
99 	spin_unlock_bh(&pm->lock);
100 }
101 
102 void mptcp_pm_connection_closed(struct mptcp_sock *msk)
103 {
104 	pr_debug("msk=%p", msk);
105 }
106 
107 void mptcp_pm_subflow_established(struct mptcp_sock *msk,
108 				  struct mptcp_subflow_context *subflow)
109 {
110 	struct mptcp_pm_data *pm = &msk->pm;
111 
112 	pr_debug("msk=%p", msk);
113 
114 	if (!READ_ONCE(pm->work_pending))
115 		return;
116 
117 	spin_lock_bh(&pm->lock);
118 
119 	if (READ_ONCE(pm->work_pending))
120 		mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
121 
122 	spin_unlock_bh(&pm->lock);
123 }
124 
125 void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id)
126 {
127 	pr_debug("msk=%p", msk);
128 }
129 
130 void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
131 				const struct mptcp_addr_info *addr)
132 {
133 	struct mptcp_pm_data *pm = &msk->pm;
134 
135 	pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id,
136 		 READ_ONCE(pm->accept_addr));
137 
138 	/* avoid acquiring the lock if there is no room for fouther addresses */
139 	if (!READ_ONCE(pm->accept_addr))
140 		return;
141 
142 	spin_lock_bh(&pm->lock);
143 
144 	/* be sure there is something to signal re-checking under PM lock */
145 	if (READ_ONCE(pm->accept_addr) &&
146 	    mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED))
147 		pm->remote = *addr;
148 
149 	spin_unlock_bh(&pm->lock);
150 }
151 
152 /* path manager helpers */
153 
154 bool mptcp_pm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
155 			  struct mptcp_addr_info *saddr)
156 {
157 	int ret = false;
158 
159 	spin_lock_bh(&msk->pm.lock);
160 
161 	/* double check after the lock is acquired */
162 	if (!mptcp_pm_should_signal(msk))
163 		goto out_unlock;
164 
165 	if (remaining < mptcp_add_addr_len(msk->pm.local.family))
166 		goto out_unlock;
167 
168 	*saddr = msk->pm.local;
169 	WRITE_ONCE(msk->pm.addr_signal, false);
170 	ret = true;
171 
172 out_unlock:
173 	spin_unlock_bh(&msk->pm.lock);
174 	return ret;
175 }
176 
177 int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
178 {
179 	return mptcp_pm_nl_get_local_id(msk, skc);
180 }
181 
182 void mptcp_pm_data_init(struct mptcp_sock *msk)
183 {
184 	msk->pm.add_addr_signaled = 0;
185 	msk->pm.add_addr_accepted = 0;
186 	msk->pm.local_addr_used = 0;
187 	msk->pm.subflows = 0;
188 	WRITE_ONCE(msk->pm.work_pending, false);
189 	WRITE_ONCE(msk->pm.addr_signal, false);
190 	WRITE_ONCE(msk->pm.accept_addr, false);
191 	WRITE_ONCE(msk->pm.accept_subflow, false);
192 	msk->pm.status = 0;
193 
194 	spin_lock_init(&msk->pm.lock);
195 
196 	mptcp_pm_nl_data_init(msk);
197 }
198 
199 void __init mptcp_pm_init(void)
200 {
201 	mptcp_pm_nl_init();
202 }
203