blob: b7b5cc01b78e273abb19a70d30cac4c10ccf7625 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ursula Braun9bf9abe2017-01-09 16:55:21 +01002/*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * Link Layer Control (LLC)
6 *
Ursula Braun9bf9abe2017-01-09 16:55:21 +01007 * Copyright IBM Corp. 2016
8 *
9 * Author(s): Klaus Wacker <Klaus.Wacker@de.ibm.com>
10 * Ursula Braun <ubraun@linux.vnet.ibm.com>
11 */
12
13#include <net/tcp.h>
14#include <rdma/ib_verbs.h>
15
16#include "smc.h"
17#include "smc_core.h"
18#include "smc_clc.h"
19#include "smc_llc.h"
20
Stefan Raspl0f627122018-03-01 13:51:26 +010021#define SMC_LLC_DATA_LEN 40
22
23struct smc_llc_hdr {
24 struct smc_wr_rx_hdr common;
25 u8 length; /* 44 */
Karsten Graul52bedf32018-03-01 13:51:32 +010026#if defined(__BIG_ENDIAN_BITFIELD)
27 u8 reserved:4,
28 add_link_rej_rsn:4;
29#elif defined(__LITTLE_ENDIAN_BITFIELD)
30 u8 add_link_rej_rsn:4,
31 reserved:4;
32#endif
Stefan Raspl0f627122018-03-01 13:51:26 +010033 u8 flags;
34};
35
Karsten Graul75d320d2018-03-01 13:51:31 +010036#define SMC_LLC_FLAG_NO_RMBE_EYEC 0x03
37
Stefan Raspl0f627122018-03-01 13:51:26 +010038struct smc_llc_msg_confirm_link { /* type 0x01 */
39 struct smc_llc_hdr hd;
40 u8 sender_mac[ETH_ALEN];
41 u8 sender_gid[SMC_GID_SIZE];
42 u8 sender_qp_num[3];
43 u8 link_num;
44 u8 link_uid[SMC_LGR_ID_SIZE];
45 u8 max_links;
46 u8 reserved[9];
47};
48
Karsten Graul52bedf32018-03-01 13:51:32 +010049#define SMC_LLC_FLAG_ADD_LNK_REJ 0x40
50#define SMC_LLC_REJ_RSN_NO_ALT_PATH 1
51
52#define SMC_LLC_ADD_LNK_MAX_LINKS 2
53
54struct smc_llc_msg_add_link { /* type 0x02 */
55 struct smc_llc_hdr hd;
56 u8 sender_mac[ETH_ALEN];
57 u8 reserved2[2];
58 u8 sender_gid[SMC_GID_SIZE];
59 u8 sender_qp_num[3];
60 u8 link_num;
61 u8 flags2; /* QP mtu */
62 u8 initial_psn[3];
63 u8 reserved[8];
64};
65
66#define SMC_LLC_FLAG_DEL_LINK_ALL 0x40
67#define SMC_LLC_FLAG_DEL_LINK_ORDERLY 0x20
68
69struct smc_llc_msg_del_link { /* type 0x04 */
70 struct smc_llc_hdr hd;
71 u8 link_num;
72 __be32 reason;
73 u8 reserved[35];
74} __packed; /* format defined in RFC7609 */
75
Karsten Graul313164d2018-03-01 13:51:29 +010076struct smc_llc_msg_test_link { /* type 0x07 */
77 struct smc_llc_hdr hd;
78 u8 user_data[16];
79 u8 reserved[24];
80};
81
Karsten Graul4ed75de2018-03-01 13:51:30 +010082struct smc_rmb_rtoken {
83 union {
84 u8 num_rkeys; /* first rtoken byte of CONFIRM LINK msg */
85 /* is actually the num of rtokens, first */
86 /* rtoken is always for the current link */
87 u8 link_id; /* link id of the rtoken */
88 };
89 __be32 rmb_key;
90 __be64 rmb_vaddr;
91} __packed; /* format defined in RFC7609 */
92
93#define SMC_LLC_RKEYS_PER_MSG 3
94
95struct smc_llc_msg_confirm_rkey { /* type 0x06 */
96 struct smc_llc_hdr hd;
97 struct smc_rmb_rtoken rtoken[SMC_LLC_RKEYS_PER_MSG];
98 u8 reserved;
99};
100
101struct smc_llc_msg_confirm_rkey_cont { /* type 0x08 */
102 struct smc_llc_hdr hd;
103 u8 num_rkeys;
104 struct smc_rmb_rtoken rtoken[SMC_LLC_RKEYS_PER_MSG];
105};
106
107#define SMC_LLC_DEL_RKEY_MAX 8
Karsten Graul3bc67e02020-04-30 15:55:48 +0200108#define SMC_LLC_FLAG_RKEY_RETRY 0x10
Karsten Graul4ed75de2018-03-01 13:51:30 +0100109#define SMC_LLC_FLAG_RKEY_NEG 0x20
110
111struct smc_llc_msg_delete_rkey { /* type 0x09 */
112 struct smc_llc_hdr hd;
113 u8 num_rkeys;
114 u8 err_mask;
115 u8 reserved[2];
116 __be32 rkey[8];
117 u8 reserved2[4];
118};
119
Stefan Raspl0f627122018-03-01 13:51:26 +0100120union smc_llc_msg {
121 struct smc_llc_msg_confirm_link confirm_link;
Karsten Graul52bedf32018-03-01 13:51:32 +0100122 struct smc_llc_msg_add_link add_link;
123 struct smc_llc_msg_del_link delete_link;
Karsten Graul4ed75de2018-03-01 13:51:30 +0100124
125 struct smc_llc_msg_confirm_rkey confirm_rkey;
126 struct smc_llc_msg_confirm_rkey_cont confirm_rkey_cont;
127 struct smc_llc_msg_delete_rkey delete_rkey;
128
Karsten Graul313164d2018-03-01 13:51:29 +0100129 struct smc_llc_msg_test_link test_link;
Stefan Raspl0f627122018-03-01 13:51:26 +0100130 struct {
131 struct smc_llc_hdr hdr;
132 u8 data[SMC_LLC_DATA_LEN];
133 } raw;
134};
135
136#define SMC_LLC_FLAG_RESP 0x80
137
Karsten Graul6c8968c2020-04-29 17:10:46 +0200138struct smc_llc_qentry {
139 struct list_head list;
140 struct smc_link *link;
141 union smc_llc_msg msg;
142};
143
Karsten Graul555da9a2020-04-30 15:55:38 +0200144struct smc_llc_qentry *smc_llc_flow_qentry_clr(struct smc_llc_flow *flow)
145{
146 struct smc_llc_qentry *qentry = flow->qentry;
147
148 flow->qentry = NULL;
149 return qentry;
150}
151
152void smc_llc_flow_qentry_del(struct smc_llc_flow *flow)
153{
154 struct smc_llc_qentry *qentry;
155
156 if (flow->qentry) {
157 qentry = flow->qentry;
158 flow->qentry = NULL;
159 kfree(qentry);
160 }
161}
162
163static inline void smc_llc_flow_qentry_set(struct smc_llc_flow *flow,
164 struct smc_llc_qentry *qentry)
165{
166 flow->qentry = qentry;
167}
168
169/* try to start a new llc flow, initiated by an incoming llc msg */
170static bool smc_llc_flow_start(struct smc_llc_flow *flow,
171 struct smc_llc_qentry *qentry)
172{
173 struct smc_link_group *lgr = qentry->link->lgr;
174
175 spin_lock_bh(&lgr->llc_flow_lock);
176 if (flow->type) {
177 /* a flow is already active */
178 if ((qentry->msg.raw.hdr.common.type == SMC_LLC_ADD_LINK ||
179 qentry->msg.raw.hdr.common.type == SMC_LLC_DELETE_LINK) &&
180 !lgr->delayed_event) {
181 lgr->delayed_event = qentry;
182 } else {
183 /* forget this llc request */
184 kfree(qentry);
185 }
186 spin_unlock_bh(&lgr->llc_flow_lock);
187 return false;
188 }
189 switch (qentry->msg.raw.hdr.common.type) {
190 case SMC_LLC_ADD_LINK:
191 flow->type = SMC_LLC_FLOW_ADD_LINK;
192 break;
193 case SMC_LLC_DELETE_LINK:
194 flow->type = SMC_LLC_FLOW_DEL_LINK;
195 break;
196 case SMC_LLC_CONFIRM_RKEY:
197 case SMC_LLC_DELETE_RKEY:
198 flow->type = SMC_LLC_FLOW_RKEY;
199 break;
200 default:
201 flow->type = SMC_LLC_FLOW_NONE;
202 }
203 if (qentry == lgr->delayed_event)
204 lgr->delayed_event = NULL;
205 spin_unlock_bh(&lgr->llc_flow_lock);
206 smc_llc_flow_qentry_set(flow, qentry);
207 return true;
208}
209
210/* start a new local llc flow, wait till current flow finished */
211int smc_llc_flow_initiate(struct smc_link_group *lgr,
212 enum smc_llc_flowtype type)
213{
214 enum smc_llc_flowtype allowed_remote = SMC_LLC_FLOW_NONE;
215 int rc;
216
217 /* all flows except confirm_rkey and delete_rkey are exclusive,
218 * confirm/delete rkey flows can run concurrently (local and remote)
219 */
220 if (type == SMC_LLC_FLOW_RKEY)
221 allowed_remote = SMC_LLC_FLOW_RKEY;
222again:
223 if (list_empty(&lgr->list))
224 return -ENODEV;
225 spin_lock_bh(&lgr->llc_flow_lock);
226 if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE &&
227 (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE ||
228 lgr->llc_flow_rmt.type == allowed_remote)) {
229 lgr->llc_flow_lcl.type = type;
230 spin_unlock_bh(&lgr->llc_flow_lock);
231 return 0;
232 }
233 spin_unlock_bh(&lgr->llc_flow_lock);
234 rc = wait_event_interruptible_timeout(lgr->llc_waiter,
235 (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE &&
236 (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE ||
237 lgr->llc_flow_rmt.type == allowed_remote)),
238 SMC_LLC_WAIT_TIME);
239 if (!rc)
240 return -ETIMEDOUT;
241 goto again;
242}
243
244/* finish the current llc flow */
245void smc_llc_flow_stop(struct smc_link_group *lgr, struct smc_llc_flow *flow)
246{
247 spin_lock_bh(&lgr->llc_flow_lock);
248 memset(flow, 0, sizeof(*flow));
249 flow->type = SMC_LLC_FLOW_NONE;
250 spin_unlock_bh(&lgr->llc_flow_lock);
251 if (!list_empty(&lgr->list) && lgr->delayed_event &&
252 flow == &lgr->llc_flow_lcl)
253 schedule_work(&lgr->llc_event_work);
254 else
255 wake_up_interruptible(&lgr->llc_waiter);
256}
257
258/* lnk is optional and used for early wakeup when link goes down, useful in
259 * cases where we wait for a response on the link after we sent a request
260 */
261struct smc_llc_qentry *smc_llc_wait(struct smc_link_group *lgr,
262 struct smc_link *lnk,
263 int time_out, u8 exp_msg)
264{
265 struct smc_llc_flow *flow = &lgr->llc_flow_lcl;
266
267 wait_event_interruptible_timeout(lgr->llc_waiter,
268 (flow->qentry ||
269 (lnk && !smc_link_usable(lnk)) ||
270 list_empty(&lgr->list)),
271 time_out);
272 if (!flow->qentry ||
273 (lnk && !smc_link_usable(lnk)) || list_empty(&lgr->list)) {
274 smc_llc_flow_qentry_del(flow);
275 goto out;
276 }
277 if (exp_msg && flow->qentry->msg.raw.hdr.common.type != exp_msg) {
278 if (exp_msg == SMC_LLC_ADD_LINK &&
279 flow->qentry->msg.raw.hdr.common.type ==
280 SMC_LLC_DELETE_LINK) {
281 /* flow_start will delay the unexpected msg */
282 smc_llc_flow_start(&lgr->llc_flow_lcl,
283 smc_llc_flow_qentry_clr(flow));
284 return NULL;
285 }
286 smc_llc_flow_qentry_del(flow);
287 }
288out:
289 return flow->qentry;
290}
291
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100292/********************************** send *************************************/
293
294struct smc_llc_tx_pend {
295};
296
297/* handler for send/transmission completion of an LLC msg */
298static void smc_llc_tx_handler(struct smc_wr_tx_pend_priv *pend,
299 struct smc_link *link,
300 enum ib_wc_status wc_status)
301{
302 /* future work: handle wc_status error for recovery and failover */
303}
304
305/**
306 * smc_llc_add_pending_send() - add LLC control message to pending WQE transmits
307 * @link: Pointer to SMC link used for sending LLC control message.
308 * @wr_buf: Out variable returning pointer to work request payload buffer.
309 * @pend: Out variable returning pointer to private pending WR tracking.
310 * It's the context the transmit complete handler will get.
311 *
312 * Reserves and pre-fills an entry for a pending work request send/tx.
313 * Used by mid-level smc_llc_send_msg() to prepare for later actual send/tx.
314 * Can sleep due to smc_get_ctrl_buf (if not in softirq context).
315 *
316 * Return: 0 on success, otherwise an error value.
317 */
318static int smc_llc_add_pending_send(struct smc_link *link,
319 struct smc_wr_buf **wr_buf,
320 struct smc_wr_tx_pend_priv **pend)
321{
322 int rc;
323
Ursula Braunad6f3172019-02-04 13:44:44 +0100324 rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, NULL,
325 pend);
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100326 if (rc < 0)
327 return rc;
328 BUILD_BUG_ON_MSG(
329 sizeof(union smc_llc_msg) > SMC_WR_BUF_SIZE,
330 "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_llc_msg)");
331 BUILD_BUG_ON_MSG(
332 sizeof(union smc_llc_msg) != SMC_WR_TX_SIZE,
333 "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_llc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()");
334 BUILD_BUG_ON_MSG(
335 sizeof(struct smc_llc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE,
336 "must increase SMC_WR_TX_PEND_PRIV_SIZE to at least sizeof(struct smc_llc_tx_pend)");
337 return 0;
338}
339
340/* high-level API to send LLC confirm link */
Ursula Braun947541f2018-07-25 16:35:30 +0200341int smc_llc_send_confirm_link(struct smc_link *link,
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100342 enum smc_llc_reqresp reqresp)
343{
Stefan Raspl00e5fb22018-07-23 13:53:10 +0200344 struct smc_link_group *lgr = smc_get_lgr(link);
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100345 struct smc_llc_msg_confirm_link *confllc;
346 struct smc_wr_tx_pend_priv *pend;
347 struct smc_wr_buf *wr_buf;
348 int rc;
349
350 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
351 if (rc)
352 return rc;
353 confllc = (struct smc_llc_msg_confirm_link *)wr_buf;
354 memset(confllc, 0, sizeof(*confllc));
355 confllc->hd.common.type = SMC_LLC_CONFIRM_LINK;
356 confllc->hd.length = sizeof(struct smc_llc_msg_confirm_link);
Karsten Graul75d320d2018-03-01 13:51:31 +0100357 confllc->hd.flags |= SMC_LLC_FLAG_NO_RMBE_EYEC;
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100358 if (reqresp == SMC_LLC_RESP)
359 confllc->hd.flags |= SMC_LLC_FLAG_RESP;
Ursula Braun947541f2018-07-25 16:35:30 +0200360 memcpy(confllc->sender_mac, link->smcibdev->mac[link->ibport - 1],
361 ETH_ALEN);
Ursula Braun7005ada2018-07-25 16:35:31 +0200362 memcpy(confllc->sender_gid, link->gid, SMC_GID_SIZE);
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100363 hton24(confllc->sender_qp_num, link->roce_qp->qp_num);
Karsten Graul2be922f2018-02-28 12:44:08 +0100364 confllc->link_num = link->link_id;
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100365 memcpy(confllc->link_uid, lgr->id, SMC_LGR_ID_SIZE);
Karsten Graul52bedf32018-03-01 13:51:32 +0100366 confllc->max_links = SMC_LLC_ADD_LNK_MAX_LINKS; /* enforce peer resp. */
367 /* send llc message */
368 rc = smc_wr_tx_send(link, pend);
369 return rc;
370}
371
Karsten Graul44aa81c2018-05-15 17:04:55 +0200372/* send LLC confirm rkey request */
Karsten Graul3d88a212020-04-30 15:55:44 +0200373static int smc_llc_send_confirm_rkey(struct smc_link *send_link,
Karsten Graul44aa81c2018-05-15 17:04:55 +0200374 struct smc_buf_desc *rmb_desc)
375{
376 struct smc_llc_msg_confirm_rkey *rkeyllc;
377 struct smc_wr_tx_pend_priv *pend;
378 struct smc_wr_buf *wr_buf;
Karsten Graul3d88a212020-04-30 15:55:44 +0200379 struct smc_link *link;
380 int i, rc, rtok_ix;
Karsten Graul44aa81c2018-05-15 17:04:55 +0200381
Karsten Graul3d88a212020-04-30 15:55:44 +0200382 rc = smc_llc_add_pending_send(send_link, &wr_buf, &pend);
Karsten Graul44aa81c2018-05-15 17:04:55 +0200383 if (rc)
384 return rc;
385 rkeyllc = (struct smc_llc_msg_confirm_rkey *)wr_buf;
386 memset(rkeyllc, 0, sizeof(*rkeyllc));
387 rkeyllc->hd.common.type = SMC_LLC_CONFIRM_RKEY;
388 rkeyllc->hd.length = sizeof(struct smc_llc_msg_confirm_rkey);
Karsten Graul3d88a212020-04-30 15:55:44 +0200389
390 rtok_ix = 1;
391 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
392 link = &send_link->lgr->lnk[i];
393 if (link->state == SMC_LNK_ACTIVE && link != send_link) {
394 rkeyllc->rtoken[rtok_ix].link_id = link->link_id;
395 rkeyllc->rtoken[rtok_ix].rmb_key =
396 htonl(rmb_desc->mr_rx[link->link_idx]->rkey);
397 rkeyllc->rtoken[rtok_ix].rmb_vaddr = cpu_to_be64(
398 (u64)sg_dma_address(
399 rmb_desc->sgt[link->link_idx].sgl));
400 rtok_ix++;
401 }
402 }
403 /* rkey of send_link is in rtoken[0] */
404 rkeyllc->rtoken[0].num_rkeys = rtok_ix - 1;
Karsten Graul44aa81c2018-05-15 17:04:55 +0200405 rkeyllc->rtoken[0].rmb_key =
Karsten Graul3d88a212020-04-30 15:55:44 +0200406 htonl(rmb_desc->mr_rx[send_link->link_idx]->rkey);
Karsten Graul44aa81c2018-05-15 17:04:55 +0200407 rkeyllc->rtoken[0].rmb_vaddr = cpu_to_be64(
Karsten Graul3d88a212020-04-30 15:55:44 +0200408 (u64)sg_dma_address(rmb_desc->sgt[send_link->link_idx].sgl));
Karsten Graul44aa81c2018-05-15 17:04:55 +0200409 /* send llc message */
Karsten Graul3d88a212020-04-30 15:55:44 +0200410 rc = smc_wr_tx_send(send_link, pend);
Karsten Graul44aa81c2018-05-15 17:04:55 +0200411 return rc;
412}
413
Karsten Graul60e03c62018-11-22 10:26:42 +0100414/* send LLC delete rkey request */
415static int smc_llc_send_delete_rkey(struct smc_link *link,
416 struct smc_buf_desc *rmb_desc)
417{
418 struct smc_llc_msg_delete_rkey *rkeyllc;
419 struct smc_wr_tx_pend_priv *pend;
420 struct smc_wr_buf *wr_buf;
421 int rc;
422
423 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
424 if (rc)
425 return rc;
426 rkeyllc = (struct smc_llc_msg_delete_rkey *)wr_buf;
427 memset(rkeyllc, 0, sizeof(*rkeyllc));
428 rkeyllc->hd.common.type = SMC_LLC_DELETE_RKEY;
429 rkeyllc->hd.length = sizeof(struct smc_llc_msg_delete_rkey);
430 rkeyllc->num_rkeys = 1;
Karsten Graul387707f2020-04-29 17:10:40 +0200431 rkeyllc->rkey[0] = htonl(rmb_desc->mr_rx[link->link_idx]->rkey);
Karsten Graul60e03c62018-11-22 10:26:42 +0100432 /* send llc message */
433 rc = smc_wr_tx_send(link, pend);
434 return rc;
435}
436
Karsten Graul2a4c57a2018-05-15 17:04:59 +0200437/* prepare an add link message */
438static void smc_llc_prep_add_link(struct smc_llc_msg_add_link *addllc,
Ursula Braun7005ada2018-07-25 16:35:31 +0200439 struct smc_link *link, u8 mac[], u8 gid[],
Karsten Graul2a4c57a2018-05-15 17:04:59 +0200440 enum smc_llc_reqresp reqresp)
441{
442 memset(addllc, 0, sizeof(*addllc));
443 addllc->hd.common.type = SMC_LLC_ADD_LINK;
444 addllc->hd.length = sizeof(struct smc_llc_msg_add_link);
445 if (reqresp == SMC_LLC_RESP) {
446 addllc->hd.flags |= SMC_LLC_FLAG_RESP;
447 /* always reject more links for now */
448 addllc->hd.flags |= SMC_LLC_FLAG_ADD_LNK_REJ;
449 addllc->hd.add_link_rej_rsn = SMC_LLC_REJ_RSN_NO_ALT_PATH;
450 }
451 memcpy(addllc->sender_mac, mac, ETH_ALEN);
452 memcpy(addllc->sender_gid, gid, SMC_GID_SIZE);
453}
454
Karsten Graul52bedf32018-03-01 13:51:32 +0100455/* send ADD LINK request or response */
Ursula Braun7005ada2018-07-25 16:35:31 +0200456int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[],
Karsten Graul52bedf32018-03-01 13:51:32 +0100457 enum smc_llc_reqresp reqresp)
458{
459 struct smc_llc_msg_add_link *addllc;
460 struct smc_wr_tx_pend_priv *pend;
461 struct smc_wr_buf *wr_buf;
462 int rc;
463
464 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
465 if (rc)
466 return rc;
467 addllc = (struct smc_llc_msg_add_link *)wr_buf;
Karsten Graul2a4c57a2018-05-15 17:04:59 +0200468 smc_llc_prep_add_link(addllc, link, mac, gid, reqresp);
Karsten Graul52bedf32018-03-01 13:51:32 +0100469 /* send llc message */
470 rc = smc_wr_tx_send(link, pend);
471 return rc;
472}
473
Karsten Graul2a4c57a2018-05-15 17:04:59 +0200474/* prepare a delete link message */
475static void smc_llc_prep_delete_link(struct smc_llc_msg_del_link *delllc,
476 struct smc_link *link,
Karsten Graul0d18a0cb2018-07-25 16:35:33 +0200477 enum smc_llc_reqresp reqresp, bool orderly)
Karsten Graul2a4c57a2018-05-15 17:04:59 +0200478{
479 memset(delllc, 0, sizeof(*delllc));
480 delllc->hd.common.type = SMC_LLC_DELETE_LINK;
481 delllc->hd.length = sizeof(struct smc_llc_msg_add_link);
482 if (reqresp == SMC_LLC_RESP)
483 delllc->hd.flags |= SMC_LLC_FLAG_RESP;
484 /* DEL_LINK_ALL because only 1 link supported */
485 delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL;
Karsten Graul0d18a0cb2018-07-25 16:35:33 +0200486 if (orderly)
487 delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
Karsten Graul2a4c57a2018-05-15 17:04:59 +0200488 delllc->link_num = link->link_id;
489}
490
Karsten Graul52bedf32018-03-01 13:51:32 +0100491/* send DELETE LINK request or response */
492int smc_llc_send_delete_link(struct smc_link *link,
Karsten Graul0d18a0cb2018-07-25 16:35:33 +0200493 enum smc_llc_reqresp reqresp, bool orderly)
Karsten Graul52bedf32018-03-01 13:51:32 +0100494{
495 struct smc_llc_msg_del_link *delllc;
496 struct smc_wr_tx_pend_priv *pend;
497 struct smc_wr_buf *wr_buf;
498 int rc;
499
500 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
501 if (rc)
502 return rc;
503 delllc = (struct smc_llc_msg_del_link *)wr_buf;
Karsten Graul0d18a0cb2018-07-25 16:35:33 +0200504 smc_llc_prep_delete_link(delllc, link, reqresp, orderly);
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100505 /* send llc message */
506 rc = smc_wr_tx_send(link, pend);
507 return rc;
508}
509
Karsten Grauld97935f2018-05-15 17:04:57 +0200510/* send LLC test link request */
511static int smc_llc_send_test_link(struct smc_link *link, u8 user_data[16])
Karsten Graul313164d2018-03-01 13:51:29 +0100512{
513 struct smc_llc_msg_test_link *testllc;
514 struct smc_wr_tx_pend_priv *pend;
515 struct smc_wr_buf *wr_buf;
516 int rc;
517
518 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
519 if (rc)
520 return rc;
521 testllc = (struct smc_llc_msg_test_link *)wr_buf;
522 memset(testllc, 0, sizeof(*testllc));
523 testllc->hd.common.type = SMC_LLC_TEST_LINK;
524 testllc->hd.length = sizeof(struct smc_llc_msg_test_link);
Karsten Graul313164d2018-03-01 13:51:29 +0100525 memcpy(testllc->user_data, user_data, sizeof(testllc->user_data));
526 /* send llc message */
527 rc = smc_wr_tx_send(link, pend);
528 return rc;
529}
530
Karsten Graul6c8968c2020-04-29 17:10:46 +0200531/* schedule an llc send on link, may wait for buffers */
532static int smc_llc_send_message(struct smc_link *link, void *llcbuf)
Karsten Graul4ed75de2018-03-01 13:51:30 +0100533{
534 struct smc_wr_tx_pend_priv *pend;
535 struct smc_wr_buf *wr_buf;
536 int rc;
537
Karsten Graul6c8968c2020-04-29 17:10:46 +0200538 if (!smc_link_usable(link))
539 return -ENOLINK;
540 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
Karsten Graul4ed75de2018-03-01 13:51:30 +0100541 if (rc)
Karsten Graul6c8968c2020-04-29 17:10:46 +0200542 return rc;
543 memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg));
544 return smc_wr_tx_send(link, pend);
Karsten Graul4ed75de2018-03-01 13:51:30 +0100545}
546
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100547/********************************* receive ***********************************/
548
Karsten Graul52bedf32018-03-01 13:51:32 +0100549static void smc_llc_rx_delete_link(struct smc_link *link,
550 struct smc_llc_msg_del_link *llc)
551{
Stefan Raspl00e5fb22018-07-23 13:53:10 +0200552 struct smc_link_group *lgr = smc_get_lgr(link);
Karsten Graul52bedf32018-03-01 13:51:32 +0100553
Karsten Graulef79d432020-04-29 17:10:47 +0200554 smc_lgr_forget(lgr);
555 smc_llc_link_deleting(link);
556 if (lgr->role == SMC_SERV) {
557 /* client asks to delete this link, send request */
558 smc_llc_prep_delete_link(llc, link, SMC_LLC_REQ, true);
Karsten Graul52bedf32018-03-01 13:51:32 +0100559 } else {
Karsten Graulef79d432020-04-29 17:10:47 +0200560 /* server requests to delete this link, send response */
561 smc_llc_prep_delete_link(llc, link, SMC_LLC_RESP, true);
Karsten Graul52bedf32018-03-01 13:51:32 +0100562 }
Karsten Graulef79d432020-04-29 17:10:47 +0200563 smc_llc_send_message(link, llc);
564 smc_lgr_terminate_sched(lgr);
Karsten Graul52bedf32018-03-01 13:51:32 +0100565}
566
Karsten Graul3bc67e02020-04-30 15:55:48 +0200567/* process a confirm_rkey request from peer, remote flow */
568static void smc_llc_rmt_conf_rkey(struct smc_link_group *lgr)
Karsten Graul4ed75de2018-03-01 13:51:30 +0100569{
Karsten Graul3bc67e02020-04-30 15:55:48 +0200570 struct smc_llc_msg_confirm_rkey *llc;
571 struct smc_llc_qentry *qentry;
572 struct smc_link *link;
573 int num_entries;
574 int rk_idx;
575 int i;
Karsten Graul4ed75de2018-03-01 13:51:30 +0100576
Karsten Graul3bc67e02020-04-30 15:55:48 +0200577 qentry = lgr->llc_flow_rmt.qentry;
578 llc = &qentry->msg.confirm_rkey;
579 link = qentry->link;
Karsten Graul4ed75de2018-03-01 13:51:30 +0100580
Karsten Graul3bc67e02020-04-30 15:55:48 +0200581 num_entries = llc->rtoken[0].num_rkeys;
582 /* first rkey entry is for receiving link */
583 rk_idx = smc_rtoken_add(link,
584 llc->rtoken[0].rmb_vaddr,
585 llc->rtoken[0].rmb_key);
586 if (rk_idx < 0)
587 goto out_err;
Karsten Graul4ed75de2018-03-01 13:51:30 +0100588
Karsten Graul3bc67e02020-04-30 15:55:48 +0200589 for (i = 1; i <= min_t(u8, num_entries, SMC_LLC_RKEYS_PER_MSG - 1); i++)
590 smc_rtoken_set2(lgr, rk_idx, llc->rtoken[i].link_id,
591 llc->rtoken[i].rmb_vaddr,
592 llc->rtoken[i].rmb_key);
593 /* max links is 3 so there is no need to support conf_rkey_cont msgs */
594 goto out;
595out_err:
596 llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG;
597 llc->hd.flags |= SMC_LLC_FLAG_RKEY_RETRY;
598out:
Karsten Graulef79d432020-04-29 17:10:47 +0200599 llc->hd.flags |= SMC_LLC_FLAG_RESP;
Karsten Graul3bc67e02020-04-30 15:55:48 +0200600 smc_llc_send_message(link, &qentry->msg);
601 smc_llc_flow_qentry_del(&lgr->llc_flow_rmt);
Karsten Graul4ed75de2018-03-01 13:51:30 +0100602}
603
604static void smc_llc_rx_confirm_rkey_cont(struct smc_link *link,
605 struct smc_llc_msg_confirm_rkey_cont *llc)
606{
Karsten Graulef79d432020-04-29 17:10:47 +0200607 /* ignore rtokens for other links, we have only one link */
608 llc->hd.flags |= SMC_LLC_FLAG_RESP;
609 smc_llc_send_message(link, llc);
Karsten Graul4ed75de2018-03-01 13:51:30 +0100610}
611
612static void smc_llc_rx_delete_rkey(struct smc_link *link,
613 struct smc_llc_msg_delete_rkey *llc)
614{
Karsten Graul4ed75de2018-03-01 13:51:30 +0100615 u8 err_mask = 0;
616 int i, max;
617
Karsten Graulef79d432020-04-29 17:10:47 +0200618 max = min_t(u8, llc->num_rkeys, SMC_LLC_DEL_RKEY_MAX);
619 for (i = 0; i < max; i++) {
620 if (smc_rtoken_delete(link, llc->rkey[i]))
621 err_mask |= 1 << (SMC_LLC_DEL_RKEY_MAX - 1 - i);
Karsten Graul4ed75de2018-03-01 13:51:30 +0100622 }
Karsten Graulef79d432020-04-29 17:10:47 +0200623
624 if (err_mask) {
625 llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG;
626 llc->err_mask = err_mask;
627 }
628
629 llc->hd.flags |= SMC_LLC_FLAG_RESP;
630 smc_llc_send_message(link, llc);
Karsten Graul4ed75de2018-03-01 13:51:30 +0100631}
632
Karsten Graul6c8968c2020-04-29 17:10:46 +0200633/* flush the llc event queue */
Karsten Graul00a049c2020-04-29 17:10:49 +0200634static void smc_llc_event_flush(struct smc_link_group *lgr)
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100635{
Karsten Graul6c8968c2020-04-29 17:10:46 +0200636 struct smc_llc_qentry *qentry, *q;
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100637
Karsten Graul6c8968c2020-04-29 17:10:46 +0200638 spin_lock_bh(&lgr->llc_event_q_lock);
639 list_for_each_entry_safe(qentry, q, &lgr->llc_event_q, list) {
640 list_del_init(&qentry->list);
641 kfree(qentry);
642 }
643 spin_unlock_bh(&lgr->llc_event_q_lock);
644}
645
646static void smc_llc_event_handler(struct smc_llc_qentry *qentry)
647{
648 union smc_llc_msg *llc = &qentry->msg;
649 struct smc_link *link = qentry->link;
Karsten Graul0fb0b022020-04-30 15:55:43 +0200650 struct smc_link_group *lgr = link->lgr;
Karsten Graul6c8968c2020-04-29 17:10:46 +0200651
Karsten Grauld854fcb2020-04-29 17:10:43 +0200652 if (!smc_link_usable(link))
Karsten Graul6c8968c2020-04-29 17:10:46 +0200653 goto out;
Karsten Graul313164d2018-03-01 13:51:29 +0100654
655 switch (llc->raw.hdr.common.type) {
656 case SMC_LLC_TEST_LINK:
Karsten Graul56e80912020-04-30 15:55:46 +0200657 llc->test_link.hd.flags |= SMC_LLC_FLAG_RESP;
658 smc_llc_send_message(link, llc);
Karsten Graul313164d2018-03-01 13:51:29 +0100659 break;
Karsten Graul52bedf32018-03-01 13:51:32 +0100660 case SMC_LLC_ADD_LINK:
Karsten Graul0fb0b022020-04-30 15:55:43 +0200661 if (list_empty(&lgr->list))
662 goto out; /* lgr is terminating */
663 if (lgr->role == SMC_CLNT) {
664 if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK) {
665 /* a flow is waiting for this message */
666 smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
667 qentry);
668 wake_up_interruptible(&lgr->llc_waiter);
669 } else if (smc_llc_flow_start(&lgr->llc_flow_lcl,
670 qentry)) {
671 /* tbd: schedule_work(&lgr->llc_add_link_work); */
672 }
673 } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
674 /* as smc server, handle client suggestion */
675 /* tbd: schedule_work(&lgr->llc_add_link_work); */
676 }
677 return;
678 case SMC_LLC_CONFIRM_LINK:
679 if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
680 /* a flow is waiting for this message */
681 smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry);
682 wake_up_interruptible(&lgr->llc_waiter);
683 return;
684 }
Karsten Graul52bedf32018-03-01 13:51:32 +0100685 break;
686 case SMC_LLC_DELETE_LINK:
687 smc_llc_rx_delete_link(link, &llc->delete_link);
688 break;
Karsten Graul4ed75de2018-03-01 13:51:30 +0100689 case SMC_LLC_CONFIRM_RKEY:
Karsten Graul3bc67e02020-04-30 15:55:48 +0200690 /* new request from remote, assign to remote flow */
691 if (smc_llc_flow_start(&lgr->llc_flow_rmt, qentry)) {
692 /* process here, does not wait for more llc msgs */
693 smc_llc_rmt_conf_rkey(lgr);
694 smc_llc_flow_stop(lgr, &lgr->llc_flow_rmt);
695 }
696 return;
Karsten Graul4ed75de2018-03-01 13:51:30 +0100697 case SMC_LLC_CONFIRM_RKEY_CONT:
698 smc_llc_rx_confirm_rkey_cont(link, &llc->confirm_rkey_cont);
699 break;
700 case SMC_LLC_DELETE_RKEY:
701 smc_llc_rx_delete_rkey(link, &llc->delete_rkey);
702 break;
Karsten Graul313164d2018-03-01 13:51:29 +0100703 }
Karsten Graul6c8968c2020-04-29 17:10:46 +0200704out:
705 kfree(qentry);
706}
707
708/* worker to process llc messages on the event queue */
709static void smc_llc_event_work(struct work_struct *work)
710{
711 struct smc_link_group *lgr = container_of(work, struct smc_link_group,
712 llc_event_work);
713 struct smc_llc_qentry *qentry;
714
Karsten Graul555da9a2020-04-30 15:55:38 +0200715 if (!lgr->llc_flow_lcl.type && lgr->delayed_event) {
716 if (smc_link_usable(lgr->delayed_event->link)) {
717 smc_llc_event_handler(lgr->delayed_event);
718 } else {
719 qentry = lgr->delayed_event;
720 lgr->delayed_event = NULL;
721 kfree(qentry);
722 }
723 }
724
Karsten Graul6c8968c2020-04-29 17:10:46 +0200725again:
726 spin_lock_bh(&lgr->llc_event_q_lock);
727 if (!list_empty(&lgr->llc_event_q)) {
728 qentry = list_first_entry(&lgr->llc_event_q,
729 struct smc_llc_qentry, list);
730 list_del_init(&qentry->list);
731 spin_unlock_bh(&lgr->llc_event_q_lock);
732 smc_llc_event_handler(qentry);
733 goto again;
734 }
735 spin_unlock_bh(&lgr->llc_event_q_lock);
736}
737
Karsten Graulef79d432020-04-29 17:10:47 +0200738/* process llc responses in tasklet context */
Karsten Graula6688d92020-04-30 15:55:39 +0200739static void smc_llc_rx_response(struct smc_link *link,
740 struct smc_llc_qentry *qentry)
Karsten Graulef79d432020-04-29 17:10:47 +0200741{
Karsten Graula6688d92020-04-30 15:55:39 +0200742 u8 llc_type = qentry->msg.raw.hdr.common.type;
Karsten Graulef79d432020-04-29 17:10:47 +0200743
Karsten Graula6688d92020-04-30 15:55:39 +0200744 switch (llc_type) {
Karsten Graulef79d432020-04-29 17:10:47 +0200745 case SMC_LLC_TEST_LINK:
746 if (link->state == SMC_LNK_ACTIVE)
747 complete(&link->llc_testlink_resp);
748 break;
Karsten Graulef79d432020-04-29 17:10:47 +0200749 case SMC_LLC_ADD_LINK:
Karsten Graul4667bb42020-04-30 15:55:42 +0200750 case SMC_LLC_CONFIRM_LINK:
Karsten Graul3d88a212020-04-30 15:55:44 +0200751 case SMC_LLC_CONFIRM_RKEY:
Karsten Graul6d74c3a2020-04-30 15:55:45 +0200752 case SMC_LLC_DELETE_RKEY:
Karsten Graul4667bb42020-04-30 15:55:42 +0200753 /* assign responses to the local flow, we requested them */
754 smc_llc_flow_qentry_set(&link->lgr->llc_flow_lcl, qentry);
755 wake_up_interruptible(&link->lgr->llc_waiter);
756 return;
Karsten Graulef79d432020-04-29 17:10:47 +0200757 case SMC_LLC_DELETE_LINK:
758 if (link->lgr->role == SMC_SERV)
759 smc_lgr_schedule_free_work_fast(link->lgr);
760 break;
Karsten Graulef79d432020-04-29 17:10:47 +0200761 case SMC_LLC_CONFIRM_RKEY_CONT:
762 /* unused as long as we don't send this type of msg */
763 break;
Karsten Graulef79d432020-04-29 17:10:47 +0200764 }
Karsten Graula6688d92020-04-30 15:55:39 +0200765 kfree(qentry);
Karsten Graulef79d432020-04-29 17:10:47 +0200766}
767
Karsten Graula6688d92020-04-30 15:55:39 +0200768static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc)
Karsten Graul6c8968c2020-04-29 17:10:46 +0200769{
Karsten Graul6c8968c2020-04-29 17:10:46 +0200770 struct smc_link_group *lgr = link->lgr;
771 struct smc_llc_qentry *qentry;
Karsten Graul6c8968c2020-04-29 17:10:46 +0200772 unsigned long flags;
773
Karsten Graul6c8968c2020-04-29 17:10:46 +0200774 qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
775 if (!qentry)
776 return;
777 qentry->link = link;
778 INIT_LIST_HEAD(&qentry->list);
779 memcpy(&qentry->msg, llc, sizeof(union smc_llc_msg));
Karsten Graula6688d92020-04-30 15:55:39 +0200780
781 /* process responses immediately */
782 if (llc->raw.hdr.flags & SMC_LLC_FLAG_RESP) {
783 smc_llc_rx_response(link, qentry);
784 return;
785 }
786
787 /* add requests to event queue */
Karsten Graul6c8968c2020-04-29 17:10:46 +0200788 spin_lock_irqsave(&lgr->llc_event_q_lock, flags);
789 list_add_tail(&qentry->list, &lgr->llc_event_q);
790 spin_unlock_irqrestore(&lgr->llc_event_q_lock, flags);
791 schedule_work(&link->lgr->llc_event_work);
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100792}
793
Karsten Graula6688d92020-04-30 15:55:39 +0200794/* copy received msg and add it to the event queue */
795static void smc_llc_rx_handler(struct ib_wc *wc, void *buf)
796{
797 struct smc_link *link = (struct smc_link *)wc->qp->qp_context;
798 union smc_llc_msg *llc = buf;
799
800 if (wc->byte_len < sizeof(*llc))
801 return; /* short message */
802 if (llc->raw.hdr.length != sizeof(*llc))
803 return; /* invalid message */
804
805 smc_llc_enqueue(link, llc);
806}
807
Karsten Graul44aa81c2018-05-15 17:04:55 +0200808/***************************** worker, utils *********************************/
Karsten Graul877ae5b2018-05-02 16:56:44 +0200809
810static void smc_llc_testlink_work(struct work_struct *work)
811{
812 struct smc_link *link = container_of(to_delayed_work(work),
813 struct smc_link, llc_testlink_wrk);
814 unsigned long next_interval;
Karsten Graul877ae5b2018-05-02 16:56:44 +0200815 unsigned long expire_time;
816 u8 user_data[16] = { 0 };
817 int rc;
818
Karsten Graul877ae5b2018-05-02 16:56:44 +0200819 if (link->state != SMC_LNK_ACTIVE)
820 return; /* don't reschedule worker */
821 expire_time = link->wr_rx_tstamp + link->llc_testlink_time;
822 if (time_is_after_jiffies(expire_time)) {
823 next_interval = expire_time - jiffies;
824 goto out;
825 }
826 reinit_completion(&link->llc_testlink_resp);
Karsten Grauld97935f2018-05-15 17:04:57 +0200827 smc_llc_send_test_link(link, user_data);
Karsten Graul877ae5b2018-05-02 16:56:44 +0200828 /* receive TEST LINK response over RoCE fabric */
829 rc = wait_for_completion_interruptible_timeout(&link->llc_testlink_resp,
830 SMC_LLC_WAIT_TIME);
Karsten Graul1020e1e2020-04-29 17:10:44 +0200831 if (link->state != SMC_LNK_ACTIVE)
832 return; /* link state changed */
Karsten Graul877ae5b2018-05-02 16:56:44 +0200833 if (rc <= 0) {
Karsten Graul5f78fe92020-02-17 16:24:54 +0100834 smc_lgr_terminate_sched(smc_get_lgr(link));
Karsten Graul877ae5b2018-05-02 16:56:44 +0200835 return;
836 }
837 next_interval = link->llc_testlink_time;
838out:
Karsten Graul1020e1e2020-04-29 17:10:44 +0200839 schedule_delayed_work(&link->llc_testlink_wrk, next_interval);
Karsten Graul877ae5b2018-05-02 16:56:44 +0200840}
841
Karsten Graul00a049c2020-04-29 17:10:49 +0200842void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc)
843{
844 struct net *net = sock_net(smc->clcsock->sk);
845
846 INIT_WORK(&lgr->llc_event_work, smc_llc_event_work);
847 INIT_LIST_HEAD(&lgr->llc_event_q);
848 spin_lock_init(&lgr->llc_event_q_lock);
Karsten Graul555da9a2020-04-30 15:55:38 +0200849 spin_lock_init(&lgr->llc_flow_lock);
850 init_waitqueue_head(&lgr->llc_waiter);
Karsten Graul00a049c2020-04-29 17:10:49 +0200851 lgr->llc_testlink_time = net->ipv4.sysctl_tcp_keepalive_time;
852}
853
854/* called after lgr was removed from lgr_list */
855void smc_llc_lgr_clear(struct smc_link_group *lgr)
856{
857 smc_llc_event_flush(lgr);
Karsten Graul555da9a2020-04-30 15:55:38 +0200858 wake_up_interruptible_all(&lgr->llc_waiter);
Karsten Graul00a049c2020-04-29 17:10:49 +0200859 cancel_work_sync(&lgr->llc_event_work);
Karsten Graul555da9a2020-04-30 15:55:38 +0200860 if (lgr->delayed_event) {
861 kfree(lgr->delayed_event);
862 lgr->delayed_event = NULL;
863 }
Karsten Graul00a049c2020-04-29 17:10:49 +0200864}
865
Karsten Graul2a4c57a2018-05-15 17:04:59 +0200866int smc_llc_link_init(struct smc_link *link)
Karsten Graul877ae5b2018-05-02 16:56:44 +0200867{
868 init_completion(&link->llc_testlink_resp);
869 INIT_DELAYED_WORK(&link->llc_testlink_wrk, smc_llc_testlink_work);
Karsten Graul2a4c57a2018-05-15 17:04:59 +0200870 return 0;
Karsten Graulb32cf4a2018-05-15 17:04:58 +0200871}
872
Karsten Graul00a049c2020-04-29 17:10:49 +0200873void smc_llc_link_active(struct smc_link *link)
Karsten Graulb32cf4a2018-05-15 17:04:58 +0200874{
Karsten Graul877ae5b2018-05-02 16:56:44 +0200875 link->state = SMC_LNK_ACTIVE;
Karsten Graul00a049c2020-04-29 17:10:49 +0200876 if (link->lgr->llc_testlink_time) {
877 link->llc_testlink_time = link->lgr->llc_testlink_time * HZ;
Karsten Graul1020e1e2020-04-29 17:10:44 +0200878 schedule_delayed_work(&link->llc_testlink_wrk,
879 link->llc_testlink_time);
Karsten Graul877ae5b2018-05-02 16:56:44 +0200880 }
881}
882
Karsten Graul0d18a0cb2018-07-25 16:35:33 +0200883void smc_llc_link_deleting(struct smc_link *link)
884{
885 link->state = SMC_LNK_DELETING;
Ursula Braun15e1b992019-11-14 13:02:44 +0100886 smc_wr_wakeup_tx_wait(link);
Karsten Graul0d18a0cb2018-07-25 16:35:33 +0200887}
888
Karsten Graul877ae5b2018-05-02 16:56:44 +0200889/* called in worker context */
Karsten Graul2a4c57a2018-05-15 17:04:59 +0200890void smc_llc_link_clear(struct smc_link *link)
Karsten Graul877ae5b2018-05-02 16:56:44 +0200891{
Karsten Graul2140ac22020-04-29 17:10:45 +0200892 complete(&link->llc_testlink_resp);
893 cancel_delayed_work_sync(&link->llc_testlink_wrk);
894 smc_wr_wakeup_reg_wait(link);
895 smc_wr_wakeup_tx_wait(link);
Karsten Graul877ae5b2018-05-02 16:56:44 +0200896}
897
Karsten Graul3d88a212020-04-30 15:55:44 +0200898/* register a new rtoken at the remote peer (for all links) */
899int smc_llc_do_confirm_rkey(struct smc_link *send_link,
Karsten Graul44aa81c2018-05-15 17:04:55 +0200900 struct smc_buf_desc *rmb_desc)
901{
Karsten Graul3d88a212020-04-30 15:55:44 +0200902 struct smc_link_group *lgr = send_link->lgr;
903 struct smc_llc_qentry *qentry = NULL;
904 int rc = 0;
Karsten Graul44aa81c2018-05-15 17:04:55 +0200905
Karsten Graul3d88a212020-04-30 15:55:44 +0200906 rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
Karsten Graul4600cfc2018-11-22 10:26:41 +0100907 if (rc)
908 return rc;
Karsten Graul3d88a212020-04-30 15:55:44 +0200909 rc = smc_llc_send_confirm_rkey(send_link, rmb_desc);
910 if (rc)
911 goto out;
Karsten Graul44aa81c2018-05-15 17:04:55 +0200912 /* receive CONFIRM RKEY response from server over RoCE fabric */
Karsten Graul3d88a212020-04-30 15:55:44 +0200913 qentry = smc_llc_wait(lgr, send_link, SMC_LLC_WAIT_TIME,
914 SMC_LLC_CONFIRM_RKEY);
915 if (!qentry || (qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_RKEY_NEG))
916 rc = -EFAULT;
917out:
918 if (qentry)
919 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
920 smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
921 return rc;
Karsten Graul44aa81c2018-05-15 17:04:55 +0200922}
923
Karsten Graul60e03c62018-11-22 10:26:42 +0100924/* unregister an rtoken at the remote peer */
Karsten Graul6d74c3a2020-04-30 15:55:45 +0200925int smc_llc_do_delete_rkey(struct smc_link_group *lgr,
Karsten Graul60e03c62018-11-22 10:26:42 +0100926 struct smc_buf_desc *rmb_desc)
927{
Karsten Graul6d74c3a2020-04-30 15:55:45 +0200928 struct smc_llc_qentry *qentry = NULL;
929 struct smc_link *send_link;
Ursula Braun0b29ec642019-11-14 13:02:47 +0100930 int rc = 0;
Karsten Graul60e03c62018-11-22 10:26:42 +0100931
Karsten Graul6d74c3a2020-04-30 15:55:45 +0200932 send_link = smc_llc_usable_link(lgr);
933 if (!send_link)
934 return -ENOLINK;
935
936 rc = smc_llc_flow_initiate(lgr, SMC_LLC_FLOW_RKEY);
937 if (rc)
938 return rc;
939 /* protected by llc_flow control */
940 rc = smc_llc_send_delete_rkey(send_link, rmb_desc);
Karsten Graul60e03c62018-11-22 10:26:42 +0100941 if (rc)
942 goto out;
943 /* receive DELETE RKEY response from server over RoCE fabric */
Karsten Graul6d74c3a2020-04-30 15:55:45 +0200944 qentry = smc_llc_wait(lgr, send_link, SMC_LLC_WAIT_TIME,
945 SMC_LLC_DELETE_RKEY);
946 if (!qentry || (qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_RKEY_NEG))
Karsten Graul60e03c62018-11-22 10:26:42 +0100947 rc = -EFAULT;
Karsten Graul60e03c62018-11-22 10:26:42 +0100948out:
Karsten Graul6d74c3a2020-04-30 15:55:45 +0200949 if (qentry)
950 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
951 smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl);
Karsten Graul60e03c62018-11-22 10:26:42 +0100952 return rc;
953}
954
Karsten Graul92334cf2020-04-30 15:55:41 +0200955/* evaluate confirm link request or response */
956int smc_llc_eval_conf_link(struct smc_llc_qentry *qentry,
957 enum smc_llc_reqresp type)
958{
959 if (type == SMC_LLC_REQ) /* SMC server assigns link_id */
960 qentry->link->link_id = qentry->msg.confirm_link.link_num;
961 if (!(qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_NO_RMBE_EYEC))
962 return -ENOTSUPP;
963 return 0;
964}
965
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100966/***************************** init, exit, misc ******************************/
967
968static struct smc_wr_rx_handler smc_llc_rx_handlers[] = {
969 {
970 .handler = smc_llc_rx_handler,
971 .type = SMC_LLC_CONFIRM_LINK
972 },
973 {
Karsten Graul313164d2018-03-01 13:51:29 +0100974 .handler = smc_llc_rx_handler,
975 .type = SMC_LLC_TEST_LINK
976 },
977 {
Karsten Graul4ed75de2018-03-01 13:51:30 +0100978 .handler = smc_llc_rx_handler,
Karsten Graul52bedf32018-03-01 13:51:32 +0100979 .type = SMC_LLC_ADD_LINK
980 },
981 {
982 .handler = smc_llc_rx_handler,
983 .type = SMC_LLC_DELETE_LINK
984 },
985 {
986 .handler = smc_llc_rx_handler,
Karsten Graul4ed75de2018-03-01 13:51:30 +0100987 .type = SMC_LLC_CONFIRM_RKEY
988 },
989 {
990 .handler = smc_llc_rx_handler,
991 .type = SMC_LLC_CONFIRM_RKEY_CONT
992 },
993 {
994 .handler = smc_llc_rx_handler,
995 .type = SMC_LLC_DELETE_RKEY
996 },
997 {
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100998 .handler = NULL,
999 }
1000};
1001
1002int __init smc_llc_init(void)
1003{
1004 struct smc_wr_rx_handler *handler;
1005 int rc = 0;
1006
1007 for (handler = smc_llc_rx_handlers; handler->handler; handler++) {
1008 INIT_HLIST_NODE(&handler->list);
1009 rc = smc_wr_rx_register_handler(handler);
1010 if (rc)
1011 break;
1012 }
1013 return rc;
1014}