blob: 977d9c8b14533ade88d9c2a0489fe44d058c19bf [file] [log] [blame]
Peter Krystad1b1c7a02020-03-27 14:48:38 -07001// SPDX-License-Identifier: GPL-2.0
2/* Multipath TCP
3 *
4 * Copyright (c) 2019, Intel Corporation.
5 */
Geliang Tangc85adce2020-04-03 17:14:08 +08006#define pr_fmt(fmt) "MPTCP: " fmt
7
Peter Krystad1b1c7a02020-03-27 14:48:38 -07008#include <linux/kernel.h>
9#include <net/tcp.h>
10#include <net/mptcp.h>
11#include "protocol.h"
12
13static struct workqueue_struct *pm_wq;
14
15/* path manager command handlers */
16
17int mptcp_pm_announce_addr(struct mptcp_sock *msk,
18 const struct mptcp_addr_info *addr)
19{
Peter Krystad926bdea2020-03-27 14:48:41 -070020 pr_debug("msk=%p, local_id=%d", msk, addr->id);
21
22 msk->pm.local = *addr;
23 WRITE_ONCE(msk->pm.addr_signal, true);
24 return 0;
Peter Krystad1b1c7a02020-03-27 14:48:38 -070025}
26
27int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id)
28{
29 return -ENOTSUPP;
30}
31
32int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 remote_id)
33{
34 return -ENOTSUPP;
35}
36
37/* path manager event handlers */
38
39void mptcp_pm_new_connection(struct mptcp_sock *msk, int server_side)
40{
41 struct mptcp_pm_data *pm = &msk->pm;
42
43 pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side);
44
45 WRITE_ONCE(pm->server_side, server_side);
46}
47
48bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
49{
Peter Krystad926bdea2020-03-27 14:48:41 -070050 struct mptcp_pm_data *pm = &msk->pm;
51 int ret;
52
53 pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows,
54 pm->subflows_max, READ_ONCE(pm->accept_subflow));
55
56 /* try to avoid acquiring the lock below */
57 if (!READ_ONCE(pm->accept_subflow))
58 return false;
59
60 spin_lock_bh(&pm->lock);
61 ret = pm->subflows < pm->subflows_max;
62 if (ret && ++pm->subflows == pm->subflows_max)
63 WRITE_ONCE(pm->accept_subflow, false);
64 spin_unlock_bh(&pm->lock);
65
66 return ret;
67}
68
69/* return true if the new status bit is currently cleared, that is, this event
70 * can be server, eventually by an already scheduled work
71 */
72static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
73 enum mptcp_pm_status new_status)
74{
75 pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status,
76 BIT(new_status));
77 if (msk->pm.status & BIT(new_status))
78 return false;
79
80 msk->pm.status |= BIT(new_status);
81 if (queue_work(pm_wq, &msk->pm.work))
82 sock_hold((struct sock *)msk);
83 return true;
Peter Krystad1b1c7a02020-03-27 14:48:38 -070084}
85
86void mptcp_pm_fully_established(struct mptcp_sock *msk)
87{
Peter Krystad926bdea2020-03-27 14:48:41 -070088 struct mptcp_pm_data *pm = &msk->pm;
89
Peter Krystad1b1c7a02020-03-27 14:48:38 -070090 pr_debug("msk=%p", msk);
Peter Krystad926bdea2020-03-27 14:48:41 -070091
92 /* try to avoid acquiring the lock below */
93 if (!READ_ONCE(pm->work_pending))
94 return;
95
96 spin_lock_bh(&pm->lock);
97
98 if (READ_ONCE(pm->work_pending))
99 mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED);
100
101 spin_unlock_bh(&pm->lock);
Peter Krystad1b1c7a02020-03-27 14:48:38 -0700102}
103
104void mptcp_pm_connection_closed(struct mptcp_sock *msk)
105{
106 pr_debug("msk=%p", msk);
107}
108
109void mptcp_pm_subflow_established(struct mptcp_sock *msk,
110 struct mptcp_subflow_context *subflow)
111{
Peter Krystad926bdea2020-03-27 14:48:41 -0700112 struct mptcp_pm_data *pm = &msk->pm;
113
Peter Krystad1b1c7a02020-03-27 14:48:38 -0700114 pr_debug("msk=%p", msk);
Peter Krystad926bdea2020-03-27 14:48:41 -0700115
116 if (!READ_ONCE(pm->work_pending))
117 return;
118
119 spin_lock_bh(&pm->lock);
120
121 if (READ_ONCE(pm->work_pending))
122 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
123
124 spin_unlock_bh(&pm->lock);
Peter Krystad1b1c7a02020-03-27 14:48:38 -0700125}
126
127void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id)
128{
129 pr_debug("msk=%p", msk);
130}
131
132void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
133 const struct mptcp_addr_info *addr)
134{
Peter Krystad926bdea2020-03-27 14:48:41 -0700135 struct mptcp_pm_data *pm = &msk->pm;
136
137 pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id,
138 READ_ONCE(pm->accept_addr));
139
140 /* avoid acquiring the lock if there is no room for fouther addresses */
141 if (!READ_ONCE(pm->accept_addr))
142 return;
143
144 spin_lock_bh(&pm->lock);
145
146 /* be sure there is something to signal re-checking under PM lock */
147 if (READ_ONCE(pm->accept_addr) &&
148 mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED))
149 pm->remote = *addr;
150
151 spin_unlock_bh(&pm->lock);
Peter Krystad1b1c7a02020-03-27 14:48:38 -0700152}
153
154/* path manager helpers */
155
156bool mptcp_pm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
157 struct mptcp_addr_info *saddr)
158{
Peter Krystad926bdea2020-03-27 14:48:41 -0700159 int ret = false;
160
161 spin_lock_bh(&msk->pm.lock);
162
163 /* double check after the lock is acquired */
164 if (!mptcp_pm_should_signal(msk))
165 goto out_unlock;
166
167 if (remaining < mptcp_add_addr_len(msk->pm.local.family))
168 goto out_unlock;
169
170 *saddr = msk->pm.local;
171 WRITE_ONCE(msk->pm.addr_signal, false);
172 ret = true;
173
174out_unlock:
175 spin_unlock_bh(&msk->pm.lock);
176 return ret;
Peter Krystad1b1c7a02020-03-27 14:48:38 -0700177}
178
179int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
180{
Paolo Abeni01cacb02020-03-27 14:48:51 -0700181 return mptcp_pm_nl_get_local_id(msk, skc);
Peter Krystad1b1c7a02020-03-27 14:48:38 -0700182}
183
184static void pm_worker(struct work_struct *work)
185{
Peter Krystad926bdea2020-03-27 14:48:41 -0700186 struct mptcp_pm_data *pm = container_of(work, struct mptcp_pm_data,
187 work);
188 struct mptcp_sock *msk = container_of(pm, struct mptcp_sock, pm);
189 struct sock *sk = (struct sock *)msk;
190
191 lock_sock(sk);
192 spin_lock_bh(&msk->pm.lock);
193
194 pr_debug("msk=%p status=%x", msk, pm->status);
195 if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) {
196 pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
Paolo Abeni01cacb02020-03-27 14:48:51 -0700197 mptcp_pm_nl_add_addr_received(msk);
Peter Krystad926bdea2020-03-27 14:48:41 -0700198 }
199 if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) {
200 pm->status &= ~BIT(MPTCP_PM_ESTABLISHED);
Paolo Abeni01cacb02020-03-27 14:48:51 -0700201 mptcp_pm_nl_fully_established(msk);
Peter Krystad926bdea2020-03-27 14:48:41 -0700202 }
203 if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) {
204 pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED);
Paolo Abeni01cacb02020-03-27 14:48:51 -0700205 mptcp_pm_nl_subflow_established(msk);
Peter Krystad926bdea2020-03-27 14:48:41 -0700206 }
207
208 spin_unlock_bh(&msk->pm.lock);
209 release_sock(sk);
210 sock_put(sk);
Peter Krystad1b1c7a02020-03-27 14:48:38 -0700211}
212
213void mptcp_pm_data_init(struct mptcp_sock *msk)
214{
215 msk->pm.add_addr_signaled = 0;
216 msk->pm.add_addr_accepted = 0;
217 msk->pm.local_addr_used = 0;
218 msk->pm.subflows = 0;
219 WRITE_ONCE(msk->pm.work_pending, false);
220 WRITE_ONCE(msk->pm.addr_signal, false);
221 WRITE_ONCE(msk->pm.accept_addr, false);
222 WRITE_ONCE(msk->pm.accept_subflow, false);
223 msk->pm.status = 0;
224
225 spin_lock_init(&msk->pm.lock);
226 INIT_WORK(&msk->pm.work, pm_worker);
Paolo Abeni01cacb02020-03-27 14:48:51 -0700227
228 mptcp_pm_nl_data_init(msk);
Peter Krystad1b1c7a02020-03-27 14:48:38 -0700229}
230
Peter Krystad926bdea2020-03-27 14:48:41 -0700231void mptcp_pm_close(struct mptcp_sock *msk)
232{
233 if (cancel_work_sync(&msk->pm.work))
234 sock_put((struct sock *)msk);
235}
236
Peter Krystad1b1c7a02020-03-27 14:48:38 -0700237void mptcp_pm_init(void)
238{
239 pm_wq = alloc_workqueue("pm_wq", WQ_UNBOUND | WQ_MEM_RECLAIM, 8);
240 if (!pm_wq)
241 panic("Failed to allocate workqueue");
Paolo Abeni01cacb02020-03-27 14:48:51 -0700242
243 mptcp_pm_nl_init();
Peter Krystad1b1c7a02020-03-27 14:48:38 -0700244}