blob: 644e9ab0dec5e22c06141110d65e389eba15d196 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ursula Braun9bf9abe2017-01-09 16:55:21 +01002/*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * Link Layer Control (LLC)
6 *
Ursula Braun9bf9abe2017-01-09 16:55:21 +01007 * Copyright IBM Corp. 2016
8 *
9 * Author(s): Klaus Wacker <Klaus.Wacker@de.ibm.com>
10 * Ursula Braun <ubraun@linux.vnet.ibm.com>
11 */
12
13#include <net/tcp.h>
14#include <rdma/ib_verbs.h>
15
16#include "smc.h"
17#include "smc_core.h"
18#include "smc_clc.h"
19#include "smc_llc.h"
20
Stefan Raspl0f627122018-03-01 13:51:26 +010021#define SMC_LLC_DATA_LEN 40
22
23struct smc_llc_hdr {
24 struct smc_wr_rx_hdr common;
25 u8 length; /* 44 */
Karsten Graul52bedf32018-03-01 13:51:32 +010026#if defined(__BIG_ENDIAN_BITFIELD)
27 u8 reserved:4,
28 add_link_rej_rsn:4;
29#elif defined(__LITTLE_ENDIAN_BITFIELD)
30 u8 add_link_rej_rsn:4,
31 reserved:4;
32#endif
Stefan Raspl0f627122018-03-01 13:51:26 +010033 u8 flags;
34};
35
Karsten Graul75d320d2018-03-01 13:51:31 +010036#define SMC_LLC_FLAG_NO_RMBE_EYEC 0x03
37
Stefan Raspl0f627122018-03-01 13:51:26 +010038struct smc_llc_msg_confirm_link { /* type 0x01 */
39 struct smc_llc_hdr hd;
40 u8 sender_mac[ETH_ALEN];
41 u8 sender_gid[SMC_GID_SIZE];
42 u8 sender_qp_num[3];
43 u8 link_num;
44 u8 link_uid[SMC_LGR_ID_SIZE];
45 u8 max_links;
46 u8 reserved[9];
47};
48
Karsten Graul52bedf32018-03-01 13:51:32 +010049#define SMC_LLC_FLAG_ADD_LNK_REJ 0x40
50#define SMC_LLC_REJ_RSN_NO_ALT_PATH 1
51
52#define SMC_LLC_ADD_LNK_MAX_LINKS 2
53
54struct smc_llc_msg_add_link { /* type 0x02 */
55 struct smc_llc_hdr hd;
56 u8 sender_mac[ETH_ALEN];
57 u8 reserved2[2];
58 u8 sender_gid[SMC_GID_SIZE];
59 u8 sender_qp_num[3];
60 u8 link_num;
61 u8 flags2; /* QP mtu */
62 u8 initial_psn[3];
63 u8 reserved[8];
64};
65
66#define SMC_LLC_FLAG_DEL_LINK_ALL 0x40
67#define SMC_LLC_FLAG_DEL_LINK_ORDERLY 0x20
68
69struct smc_llc_msg_del_link { /* type 0x04 */
70 struct smc_llc_hdr hd;
71 u8 link_num;
72 __be32 reason;
73 u8 reserved[35];
74} __packed; /* format defined in RFC7609 */
75
Karsten Graul313164d2018-03-01 13:51:29 +010076struct smc_llc_msg_test_link { /* type 0x07 */
77 struct smc_llc_hdr hd;
78 u8 user_data[16];
79 u8 reserved[24];
80};
81
Karsten Graul4ed75de2018-03-01 13:51:30 +010082struct smc_rmb_rtoken {
83 union {
84 u8 num_rkeys; /* first rtoken byte of CONFIRM LINK msg */
85 /* is actually the num of rtokens, first */
86 /* rtoken is always for the current link */
87 u8 link_id; /* link id of the rtoken */
88 };
89 __be32 rmb_key;
90 __be64 rmb_vaddr;
91} __packed; /* format defined in RFC7609 */
92
93#define SMC_LLC_RKEYS_PER_MSG 3
94
95struct smc_llc_msg_confirm_rkey { /* type 0x06 */
96 struct smc_llc_hdr hd;
97 struct smc_rmb_rtoken rtoken[SMC_LLC_RKEYS_PER_MSG];
98 u8 reserved;
99};
100
101struct smc_llc_msg_confirm_rkey_cont { /* type 0x08 */
102 struct smc_llc_hdr hd;
103 u8 num_rkeys;
104 struct smc_rmb_rtoken rtoken[SMC_LLC_RKEYS_PER_MSG];
105};
106
107#define SMC_LLC_DEL_RKEY_MAX 8
108#define SMC_LLC_FLAG_RKEY_NEG 0x20
109
110struct smc_llc_msg_delete_rkey { /* type 0x09 */
111 struct smc_llc_hdr hd;
112 u8 num_rkeys;
113 u8 err_mask;
114 u8 reserved[2];
115 __be32 rkey[8];
116 u8 reserved2[4];
117};
118
Stefan Raspl0f627122018-03-01 13:51:26 +0100119union smc_llc_msg {
120 struct smc_llc_msg_confirm_link confirm_link;
Karsten Graul52bedf32018-03-01 13:51:32 +0100121 struct smc_llc_msg_add_link add_link;
122 struct smc_llc_msg_del_link delete_link;
Karsten Graul4ed75de2018-03-01 13:51:30 +0100123
124 struct smc_llc_msg_confirm_rkey confirm_rkey;
125 struct smc_llc_msg_confirm_rkey_cont confirm_rkey_cont;
126 struct smc_llc_msg_delete_rkey delete_rkey;
127
Karsten Graul313164d2018-03-01 13:51:29 +0100128 struct smc_llc_msg_test_link test_link;
Stefan Raspl0f627122018-03-01 13:51:26 +0100129 struct {
130 struct smc_llc_hdr hdr;
131 u8 data[SMC_LLC_DATA_LEN];
132 } raw;
133};
134
135#define SMC_LLC_FLAG_RESP 0x80
136
Karsten Graul6c8968c2020-04-29 17:10:46 +0200137struct smc_llc_qentry {
138 struct list_head list;
139 struct smc_link *link;
140 union smc_llc_msg msg;
141};
142
Karsten Graul555da9a2020-04-30 15:55:38 +0200143struct smc_llc_qentry *smc_llc_flow_qentry_clr(struct smc_llc_flow *flow)
144{
145 struct smc_llc_qentry *qentry = flow->qentry;
146
147 flow->qentry = NULL;
148 return qentry;
149}
150
151void smc_llc_flow_qentry_del(struct smc_llc_flow *flow)
152{
153 struct smc_llc_qentry *qentry;
154
155 if (flow->qentry) {
156 qentry = flow->qentry;
157 flow->qentry = NULL;
158 kfree(qentry);
159 }
160}
161
162static inline void smc_llc_flow_qentry_set(struct smc_llc_flow *flow,
163 struct smc_llc_qentry *qentry)
164{
165 flow->qentry = qentry;
166}
167
168/* try to start a new llc flow, initiated by an incoming llc msg */
169static bool smc_llc_flow_start(struct smc_llc_flow *flow,
170 struct smc_llc_qentry *qentry)
171{
172 struct smc_link_group *lgr = qentry->link->lgr;
173
174 spin_lock_bh(&lgr->llc_flow_lock);
175 if (flow->type) {
176 /* a flow is already active */
177 if ((qentry->msg.raw.hdr.common.type == SMC_LLC_ADD_LINK ||
178 qentry->msg.raw.hdr.common.type == SMC_LLC_DELETE_LINK) &&
179 !lgr->delayed_event) {
180 lgr->delayed_event = qentry;
181 } else {
182 /* forget this llc request */
183 kfree(qentry);
184 }
185 spin_unlock_bh(&lgr->llc_flow_lock);
186 return false;
187 }
188 switch (qentry->msg.raw.hdr.common.type) {
189 case SMC_LLC_ADD_LINK:
190 flow->type = SMC_LLC_FLOW_ADD_LINK;
191 break;
192 case SMC_LLC_DELETE_LINK:
193 flow->type = SMC_LLC_FLOW_DEL_LINK;
194 break;
195 case SMC_LLC_CONFIRM_RKEY:
196 case SMC_LLC_DELETE_RKEY:
197 flow->type = SMC_LLC_FLOW_RKEY;
198 break;
199 default:
200 flow->type = SMC_LLC_FLOW_NONE;
201 }
202 if (qentry == lgr->delayed_event)
203 lgr->delayed_event = NULL;
204 spin_unlock_bh(&lgr->llc_flow_lock);
205 smc_llc_flow_qentry_set(flow, qentry);
206 return true;
207}
208
209/* start a new local llc flow, wait till current flow finished */
210int smc_llc_flow_initiate(struct smc_link_group *lgr,
211 enum smc_llc_flowtype type)
212{
213 enum smc_llc_flowtype allowed_remote = SMC_LLC_FLOW_NONE;
214 int rc;
215
216 /* all flows except confirm_rkey and delete_rkey are exclusive,
217 * confirm/delete rkey flows can run concurrently (local and remote)
218 */
219 if (type == SMC_LLC_FLOW_RKEY)
220 allowed_remote = SMC_LLC_FLOW_RKEY;
221again:
222 if (list_empty(&lgr->list))
223 return -ENODEV;
224 spin_lock_bh(&lgr->llc_flow_lock);
225 if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE &&
226 (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE ||
227 lgr->llc_flow_rmt.type == allowed_remote)) {
228 lgr->llc_flow_lcl.type = type;
229 spin_unlock_bh(&lgr->llc_flow_lock);
230 return 0;
231 }
232 spin_unlock_bh(&lgr->llc_flow_lock);
233 rc = wait_event_interruptible_timeout(lgr->llc_waiter,
234 (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE &&
235 (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE ||
236 lgr->llc_flow_rmt.type == allowed_remote)),
237 SMC_LLC_WAIT_TIME);
238 if (!rc)
239 return -ETIMEDOUT;
240 goto again;
241}
242
243/* finish the current llc flow */
244void smc_llc_flow_stop(struct smc_link_group *lgr, struct smc_llc_flow *flow)
245{
246 spin_lock_bh(&lgr->llc_flow_lock);
247 memset(flow, 0, sizeof(*flow));
248 flow->type = SMC_LLC_FLOW_NONE;
249 spin_unlock_bh(&lgr->llc_flow_lock);
250 if (!list_empty(&lgr->list) && lgr->delayed_event &&
251 flow == &lgr->llc_flow_lcl)
252 schedule_work(&lgr->llc_event_work);
253 else
254 wake_up_interruptible(&lgr->llc_waiter);
255}
256
257/* lnk is optional and used for early wakeup when link goes down, useful in
258 * cases where we wait for a response on the link after we sent a request
259 */
260struct smc_llc_qentry *smc_llc_wait(struct smc_link_group *lgr,
261 struct smc_link *lnk,
262 int time_out, u8 exp_msg)
263{
264 struct smc_llc_flow *flow = &lgr->llc_flow_lcl;
265
266 wait_event_interruptible_timeout(lgr->llc_waiter,
267 (flow->qentry ||
268 (lnk && !smc_link_usable(lnk)) ||
269 list_empty(&lgr->list)),
270 time_out);
271 if (!flow->qentry ||
272 (lnk && !smc_link_usable(lnk)) || list_empty(&lgr->list)) {
273 smc_llc_flow_qentry_del(flow);
274 goto out;
275 }
276 if (exp_msg && flow->qentry->msg.raw.hdr.common.type != exp_msg) {
277 if (exp_msg == SMC_LLC_ADD_LINK &&
278 flow->qentry->msg.raw.hdr.common.type ==
279 SMC_LLC_DELETE_LINK) {
280 /* flow_start will delay the unexpected msg */
281 smc_llc_flow_start(&lgr->llc_flow_lcl,
282 smc_llc_flow_qentry_clr(flow));
283 return NULL;
284 }
285 smc_llc_flow_qentry_del(flow);
286 }
287out:
288 return flow->qentry;
289}
290
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100291/********************************** send *************************************/
292
293struct smc_llc_tx_pend {
294};
295
296/* handler for send/transmission completion of an LLC msg */
297static void smc_llc_tx_handler(struct smc_wr_tx_pend_priv *pend,
298 struct smc_link *link,
299 enum ib_wc_status wc_status)
300{
301 /* future work: handle wc_status error for recovery and failover */
302}
303
304/**
305 * smc_llc_add_pending_send() - add LLC control message to pending WQE transmits
306 * @link: Pointer to SMC link used for sending LLC control message.
307 * @wr_buf: Out variable returning pointer to work request payload buffer.
308 * @pend: Out variable returning pointer to private pending WR tracking.
309 * It's the context the transmit complete handler will get.
310 *
311 * Reserves and pre-fills an entry for a pending work request send/tx.
312 * Used by mid-level smc_llc_send_msg() to prepare for later actual send/tx.
313 * Can sleep due to smc_get_ctrl_buf (if not in softirq context).
314 *
315 * Return: 0 on success, otherwise an error value.
316 */
317static int smc_llc_add_pending_send(struct smc_link *link,
318 struct smc_wr_buf **wr_buf,
319 struct smc_wr_tx_pend_priv **pend)
320{
321 int rc;
322
Ursula Braunad6f3172019-02-04 13:44:44 +0100323 rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, NULL,
324 pend);
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100325 if (rc < 0)
326 return rc;
327 BUILD_BUG_ON_MSG(
328 sizeof(union smc_llc_msg) > SMC_WR_BUF_SIZE,
329 "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_llc_msg)");
330 BUILD_BUG_ON_MSG(
331 sizeof(union smc_llc_msg) != SMC_WR_TX_SIZE,
332 "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_llc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()");
333 BUILD_BUG_ON_MSG(
334 sizeof(struct smc_llc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE,
335 "must increase SMC_WR_TX_PEND_PRIV_SIZE to at least sizeof(struct smc_llc_tx_pend)");
336 return 0;
337}
338
339/* high-level API to send LLC confirm link */
Ursula Braun947541f2018-07-25 16:35:30 +0200340int smc_llc_send_confirm_link(struct smc_link *link,
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100341 enum smc_llc_reqresp reqresp)
342{
Stefan Raspl00e5fb22018-07-23 13:53:10 +0200343 struct smc_link_group *lgr = smc_get_lgr(link);
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100344 struct smc_llc_msg_confirm_link *confllc;
345 struct smc_wr_tx_pend_priv *pend;
346 struct smc_wr_buf *wr_buf;
347 int rc;
348
349 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
350 if (rc)
351 return rc;
352 confllc = (struct smc_llc_msg_confirm_link *)wr_buf;
353 memset(confllc, 0, sizeof(*confllc));
354 confllc->hd.common.type = SMC_LLC_CONFIRM_LINK;
355 confllc->hd.length = sizeof(struct smc_llc_msg_confirm_link);
Karsten Graul75d320d2018-03-01 13:51:31 +0100356 confllc->hd.flags |= SMC_LLC_FLAG_NO_RMBE_EYEC;
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100357 if (reqresp == SMC_LLC_RESP)
358 confllc->hd.flags |= SMC_LLC_FLAG_RESP;
Ursula Braun947541f2018-07-25 16:35:30 +0200359 memcpy(confllc->sender_mac, link->smcibdev->mac[link->ibport - 1],
360 ETH_ALEN);
Ursula Braun7005ada2018-07-25 16:35:31 +0200361 memcpy(confllc->sender_gid, link->gid, SMC_GID_SIZE);
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100362 hton24(confllc->sender_qp_num, link->roce_qp->qp_num);
Karsten Graul2be922f2018-02-28 12:44:08 +0100363 confllc->link_num = link->link_id;
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100364 memcpy(confllc->link_uid, lgr->id, SMC_LGR_ID_SIZE);
Karsten Graul52bedf32018-03-01 13:51:32 +0100365 confllc->max_links = SMC_LLC_ADD_LNK_MAX_LINKS; /* enforce peer resp. */
366 /* send llc message */
367 rc = smc_wr_tx_send(link, pend);
368 return rc;
369}
370
Karsten Graul44aa81c2018-05-15 17:04:55 +0200371/* send LLC confirm rkey request */
372static int smc_llc_send_confirm_rkey(struct smc_link *link,
373 struct smc_buf_desc *rmb_desc)
374{
375 struct smc_llc_msg_confirm_rkey *rkeyllc;
376 struct smc_wr_tx_pend_priv *pend;
377 struct smc_wr_buf *wr_buf;
378 int rc;
379
380 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
381 if (rc)
382 return rc;
383 rkeyllc = (struct smc_llc_msg_confirm_rkey *)wr_buf;
384 memset(rkeyllc, 0, sizeof(*rkeyllc));
385 rkeyllc->hd.common.type = SMC_LLC_CONFIRM_RKEY;
386 rkeyllc->hd.length = sizeof(struct smc_llc_msg_confirm_rkey);
387 rkeyllc->rtoken[0].rmb_key =
Karsten Graul387707f2020-04-29 17:10:40 +0200388 htonl(rmb_desc->mr_rx[link->link_idx]->rkey);
Karsten Graul44aa81c2018-05-15 17:04:55 +0200389 rkeyllc->rtoken[0].rmb_vaddr = cpu_to_be64(
Karsten Graul387707f2020-04-29 17:10:40 +0200390 (u64)sg_dma_address(rmb_desc->sgt[link->link_idx].sgl));
Karsten Graul44aa81c2018-05-15 17:04:55 +0200391 /* send llc message */
392 rc = smc_wr_tx_send(link, pend);
393 return rc;
394}
395
Karsten Graul60e03c62018-11-22 10:26:42 +0100396/* send LLC delete rkey request */
397static int smc_llc_send_delete_rkey(struct smc_link *link,
398 struct smc_buf_desc *rmb_desc)
399{
400 struct smc_llc_msg_delete_rkey *rkeyllc;
401 struct smc_wr_tx_pend_priv *pend;
402 struct smc_wr_buf *wr_buf;
403 int rc;
404
405 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
406 if (rc)
407 return rc;
408 rkeyllc = (struct smc_llc_msg_delete_rkey *)wr_buf;
409 memset(rkeyllc, 0, sizeof(*rkeyllc));
410 rkeyllc->hd.common.type = SMC_LLC_DELETE_RKEY;
411 rkeyllc->hd.length = sizeof(struct smc_llc_msg_delete_rkey);
412 rkeyllc->num_rkeys = 1;
Karsten Graul387707f2020-04-29 17:10:40 +0200413 rkeyllc->rkey[0] = htonl(rmb_desc->mr_rx[link->link_idx]->rkey);
Karsten Graul60e03c62018-11-22 10:26:42 +0100414 /* send llc message */
415 rc = smc_wr_tx_send(link, pend);
416 return rc;
417}
418
Karsten Graul2a4c57a2018-05-15 17:04:59 +0200419/* prepare an add link message */
420static void smc_llc_prep_add_link(struct smc_llc_msg_add_link *addllc,
Ursula Braun7005ada2018-07-25 16:35:31 +0200421 struct smc_link *link, u8 mac[], u8 gid[],
Karsten Graul2a4c57a2018-05-15 17:04:59 +0200422 enum smc_llc_reqresp reqresp)
423{
424 memset(addllc, 0, sizeof(*addllc));
425 addllc->hd.common.type = SMC_LLC_ADD_LINK;
426 addllc->hd.length = sizeof(struct smc_llc_msg_add_link);
427 if (reqresp == SMC_LLC_RESP) {
428 addllc->hd.flags |= SMC_LLC_FLAG_RESP;
429 /* always reject more links for now */
430 addllc->hd.flags |= SMC_LLC_FLAG_ADD_LNK_REJ;
431 addllc->hd.add_link_rej_rsn = SMC_LLC_REJ_RSN_NO_ALT_PATH;
432 }
433 memcpy(addllc->sender_mac, mac, ETH_ALEN);
434 memcpy(addllc->sender_gid, gid, SMC_GID_SIZE);
435}
436
Karsten Graul52bedf32018-03-01 13:51:32 +0100437/* send ADD LINK request or response */
Ursula Braun7005ada2018-07-25 16:35:31 +0200438int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[],
Karsten Graul52bedf32018-03-01 13:51:32 +0100439 enum smc_llc_reqresp reqresp)
440{
441 struct smc_llc_msg_add_link *addllc;
442 struct smc_wr_tx_pend_priv *pend;
443 struct smc_wr_buf *wr_buf;
444 int rc;
445
446 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
447 if (rc)
448 return rc;
449 addllc = (struct smc_llc_msg_add_link *)wr_buf;
Karsten Graul2a4c57a2018-05-15 17:04:59 +0200450 smc_llc_prep_add_link(addllc, link, mac, gid, reqresp);
Karsten Graul52bedf32018-03-01 13:51:32 +0100451 /* send llc message */
452 rc = smc_wr_tx_send(link, pend);
453 return rc;
454}
455
Karsten Graul2a4c57a2018-05-15 17:04:59 +0200456/* prepare a delete link message */
457static void smc_llc_prep_delete_link(struct smc_llc_msg_del_link *delllc,
458 struct smc_link *link,
Karsten Graul0d18a0cb2018-07-25 16:35:33 +0200459 enum smc_llc_reqresp reqresp, bool orderly)
Karsten Graul2a4c57a2018-05-15 17:04:59 +0200460{
461 memset(delllc, 0, sizeof(*delllc));
462 delllc->hd.common.type = SMC_LLC_DELETE_LINK;
463 delllc->hd.length = sizeof(struct smc_llc_msg_add_link);
464 if (reqresp == SMC_LLC_RESP)
465 delllc->hd.flags |= SMC_LLC_FLAG_RESP;
466 /* DEL_LINK_ALL because only 1 link supported */
467 delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL;
Karsten Graul0d18a0cb2018-07-25 16:35:33 +0200468 if (orderly)
469 delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
Karsten Graul2a4c57a2018-05-15 17:04:59 +0200470 delllc->link_num = link->link_id;
471}
472
Karsten Graul52bedf32018-03-01 13:51:32 +0100473/* send DELETE LINK request or response */
474int smc_llc_send_delete_link(struct smc_link *link,
Karsten Graul0d18a0cb2018-07-25 16:35:33 +0200475 enum smc_llc_reqresp reqresp, bool orderly)
Karsten Graul52bedf32018-03-01 13:51:32 +0100476{
477 struct smc_llc_msg_del_link *delllc;
478 struct smc_wr_tx_pend_priv *pend;
479 struct smc_wr_buf *wr_buf;
480 int rc;
481
482 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
483 if (rc)
484 return rc;
485 delllc = (struct smc_llc_msg_del_link *)wr_buf;
Karsten Graul0d18a0cb2018-07-25 16:35:33 +0200486 smc_llc_prep_delete_link(delllc, link, reqresp, orderly);
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100487 /* send llc message */
488 rc = smc_wr_tx_send(link, pend);
489 return rc;
490}
491
Karsten Grauld97935f2018-05-15 17:04:57 +0200492/* send LLC test link request */
493static int smc_llc_send_test_link(struct smc_link *link, u8 user_data[16])
Karsten Graul313164d2018-03-01 13:51:29 +0100494{
495 struct smc_llc_msg_test_link *testllc;
496 struct smc_wr_tx_pend_priv *pend;
497 struct smc_wr_buf *wr_buf;
498 int rc;
499
500 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
501 if (rc)
502 return rc;
503 testllc = (struct smc_llc_msg_test_link *)wr_buf;
504 memset(testllc, 0, sizeof(*testllc));
505 testllc->hd.common.type = SMC_LLC_TEST_LINK;
506 testllc->hd.length = sizeof(struct smc_llc_msg_test_link);
Karsten Graul313164d2018-03-01 13:51:29 +0100507 memcpy(testllc->user_data, user_data, sizeof(testllc->user_data));
508 /* send llc message */
509 rc = smc_wr_tx_send(link, pend);
510 return rc;
511}
512
Karsten Graul6c8968c2020-04-29 17:10:46 +0200513/* schedule an llc send on link, may wait for buffers */
514static int smc_llc_send_message(struct smc_link *link, void *llcbuf)
Karsten Graul4ed75de2018-03-01 13:51:30 +0100515{
516 struct smc_wr_tx_pend_priv *pend;
517 struct smc_wr_buf *wr_buf;
518 int rc;
519
Karsten Graul6c8968c2020-04-29 17:10:46 +0200520 if (!smc_link_usable(link))
521 return -ENOLINK;
522 rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
Karsten Graul4ed75de2018-03-01 13:51:30 +0100523 if (rc)
Karsten Graul6c8968c2020-04-29 17:10:46 +0200524 return rc;
525 memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg));
526 return smc_wr_tx_send(link, pend);
Karsten Graul4ed75de2018-03-01 13:51:30 +0100527}
528
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100529/********************************* receive ***********************************/
530
Karsten Graul52bedf32018-03-01 13:51:32 +0100531static void smc_llc_rx_delete_link(struct smc_link *link,
532 struct smc_llc_msg_del_link *llc)
533{
Stefan Raspl00e5fb22018-07-23 13:53:10 +0200534 struct smc_link_group *lgr = smc_get_lgr(link);
Karsten Graul52bedf32018-03-01 13:51:32 +0100535
Karsten Graulef79d432020-04-29 17:10:47 +0200536 smc_lgr_forget(lgr);
537 smc_llc_link_deleting(link);
538 if (lgr->role == SMC_SERV) {
539 /* client asks to delete this link, send request */
540 smc_llc_prep_delete_link(llc, link, SMC_LLC_REQ, true);
Karsten Graul52bedf32018-03-01 13:51:32 +0100541 } else {
Karsten Graulef79d432020-04-29 17:10:47 +0200542 /* server requests to delete this link, send response */
543 smc_llc_prep_delete_link(llc, link, SMC_LLC_RESP, true);
Karsten Graul52bedf32018-03-01 13:51:32 +0100544 }
Karsten Graulef79d432020-04-29 17:10:47 +0200545 smc_llc_send_message(link, llc);
546 smc_lgr_terminate_sched(lgr);
Karsten Graul52bedf32018-03-01 13:51:32 +0100547}
548
Karsten Graul313164d2018-03-01 13:51:29 +0100549static void smc_llc_rx_test_link(struct smc_link *link,
550 struct smc_llc_msg_test_link *llc)
551{
Karsten Graulef79d432020-04-29 17:10:47 +0200552 llc->hd.flags |= SMC_LLC_FLAG_RESP;
553 smc_llc_send_message(link, llc);
Karsten Graul313164d2018-03-01 13:51:29 +0100554}
555
Karsten Graul4ed75de2018-03-01 13:51:30 +0100556static void smc_llc_rx_confirm_rkey(struct smc_link *link,
557 struct smc_llc_msg_confirm_rkey *llc)
558{
Karsten Graul4ed75de2018-03-01 13:51:30 +0100559 int rc;
560
Karsten Graulef79d432020-04-29 17:10:47 +0200561 rc = smc_rtoken_add(link,
562 llc->rtoken[0].rmb_vaddr,
563 llc->rtoken[0].rmb_key);
Karsten Graul4ed75de2018-03-01 13:51:30 +0100564
Karsten Graulef79d432020-04-29 17:10:47 +0200565 /* ignore rtokens for other links, we have only one link */
Karsten Graul4ed75de2018-03-01 13:51:30 +0100566
Karsten Graulef79d432020-04-29 17:10:47 +0200567 llc->hd.flags |= SMC_LLC_FLAG_RESP;
568 if (rc < 0)
569 llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG;
570 smc_llc_send_message(link, llc);
Karsten Graul4ed75de2018-03-01 13:51:30 +0100571}
572
573static void smc_llc_rx_confirm_rkey_cont(struct smc_link *link,
574 struct smc_llc_msg_confirm_rkey_cont *llc)
575{
Karsten Graulef79d432020-04-29 17:10:47 +0200576 /* ignore rtokens for other links, we have only one link */
577 llc->hd.flags |= SMC_LLC_FLAG_RESP;
578 smc_llc_send_message(link, llc);
Karsten Graul4ed75de2018-03-01 13:51:30 +0100579}
580
581static void smc_llc_rx_delete_rkey(struct smc_link *link,
582 struct smc_llc_msg_delete_rkey *llc)
583{
Karsten Graul4ed75de2018-03-01 13:51:30 +0100584 u8 err_mask = 0;
585 int i, max;
586
Karsten Graulef79d432020-04-29 17:10:47 +0200587 max = min_t(u8, llc->num_rkeys, SMC_LLC_DEL_RKEY_MAX);
588 for (i = 0; i < max; i++) {
589 if (smc_rtoken_delete(link, llc->rkey[i]))
590 err_mask |= 1 << (SMC_LLC_DEL_RKEY_MAX - 1 - i);
Karsten Graul4ed75de2018-03-01 13:51:30 +0100591 }
Karsten Graulef79d432020-04-29 17:10:47 +0200592
593 if (err_mask) {
594 llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG;
595 llc->err_mask = err_mask;
596 }
597
598 llc->hd.flags |= SMC_LLC_FLAG_RESP;
599 smc_llc_send_message(link, llc);
Karsten Graul4ed75de2018-03-01 13:51:30 +0100600}
601
Karsten Graul6c8968c2020-04-29 17:10:46 +0200602/* flush the llc event queue */
Karsten Graul00a049c2020-04-29 17:10:49 +0200603static void smc_llc_event_flush(struct smc_link_group *lgr)
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100604{
Karsten Graul6c8968c2020-04-29 17:10:46 +0200605 struct smc_llc_qentry *qentry, *q;
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100606
Karsten Graul6c8968c2020-04-29 17:10:46 +0200607 spin_lock_bh(&lgr->llc_event_q_lock);
608 list_for_each_entry_safe(qentry, q, &lgr->llc_event_q, list) {
609 list_del_init(&qentry->list);
610 kfree(qentry);
611 }
612 spin_unlock_bh(&lgr->llc_event_q_lock);
613}
614
615static void smc_llc_event_handler(struct smc_llc_qentry *qentry)
616{
617 union smc_llc_msg *llc = &qentry->msg;
618 struct smc_link *link = qentry->link;
Karsten Graul0fb0b022020-04-30 15:55:43 +0200619 struct smc_link_group *lgr = link->lgr;
Karsten Graul6c8968c2020-04-29 17:10:46 +0200620
Karsten Grauld854fcb2020-04-29 17:10:43 +0200621 if (!smc_link_usable(link))
Karsten Graul6c8968c2020-04-29 17:10:46 +0200622 goto out;
Karsten Graul313164d2018-03-01 13:51:29 +0100623
624 switch (llc->raw.hdr.common.type) {
625 case SMC_LLC_TEST_LINK:
626 smc_llc_rx_test_link(link, &llc->test_link);
627 break;
Karsten Graul52bedf32018-03-01 13:51:32 +0100628 case SMC_LLC_ADD_LINK:
Karsten Graul0fb0b022020-04-30 15:55:43 +0200629 if (list_empty(&lgr->list))
630 goto out; /* lgr is terminating */
631 if (lgr->role == SMC_CLNT) {
632 if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK) {
633 /* a flow is waiting for this message */
634 smc_llc_flow_qentry_set(&lgr->llc_flow_lcl,
635 qentry);
636 wake_up_interruptible(&lgr->llc_waiter);
637 } else if (smc_llc_flow_start(&lgr->llc_flow_lcl,
638 qentry)) {
639 /* tbd: schedule_work(&lgr->llc_add_link_work); */
640 }
641 } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) {
642 /* as smc server, handle client suggestion */
643 /* tbd: schedule_work(&lgr->llc_add_link_work); */
644 }
645 return;
646 case SMC_LLC_CONFIRM_LINK:
647 if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
648 /* a flow is waiting for this message */
649 smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry);
650 wake_up_interruptible(&lgr->llc_waiter);
651 return;
652 }
Karsten Graul52bedf32018-03-01 13:51:32 +0100653 break;
654 case SMC_LLC_DELETE_LINK:
655 smc_llc_rx_delete_link(link, &llc->delete_link);
656 break;
Karsten Graul4ed75de2018-03-01 13:51:30 +0100657 case SMC_LLC_CONFIRM_RKEY:
658 smc_llc_rx_confirm_rkey(link, &llc->confirm_rkey);
659 break;
660 case SMC_LLC_CONFIRM_RKEY_CONT:
661 smc_llc_rx_confirm_rkey_cont(link, &llc->confirm_rkey_cont);
662 break;
663 case SMC_LLC_DELETE_RKEY:
664 smc_llc_rx_delete_rkey(link, &llc->delete_rkey);
665 break;
Karsten Graul313164d2018-03-01 13:51:29 +0100666 }
Karsten Graul6c8968c2020-04-29 17:10:46 +0200667out:
668 kfree(qentry);
669}
670
671/* worker to process llc messages on the event queue */
672static void smc_llc_event_work(struct work_struct *work)
673{
674 struct smc_link_group *lgr = container_of(work, struct smc_link_group,
675 llc_event_work);
676 struct smc_llc_qentry *qentry;
677
Karsten Graul555da9a2020-04-30 15:55:38 +0200678 if (!lgr->llc_flow_lcl.type && lgr->delayed_event) {
679 if (smc_link_usable(lgr->delayed_event->link)) {
680 smc_llc_event_handler(lgr->delayed_event);
681 } else {
682 qentry = lgr->delayed_event;
683 lgr->delayed_event = NULL;
684 kfree(qentry);
685 }
686 }
687
Karsten Graul6c8968c2020-04-29 17:10:46 +0200688again:
689 spin_lock_bh(&lgr->llc_event_q_lock);
690 if (!list_empty(&lgr->llc_event_q)) {
691 qentry = list_first_entry(&lgr->llc_event_q,
692 struct smc_llc_qentry, list);
693 list_del_init(&qentry->list);
694 spin_unlock_bh(&lgr->llc_event_q_lock);
695 smc_llc_event_handler(qentry);
696 goto again;
697 }
698 spin_unlock_bh(&lgr->llc_event_q_lock);
699}
700
Karsten Graulef79d432020-04-29 17:10:47 +0200701/* process llc responses in tasklet context */
Karsten Graula6688d92020-04-30 15:55:39 +0200702static void smc_llc_rx_response(struct smc_link *link,
703 struct smc_llc_qentry *qentry)
Karsten Graulef79d432020-04-29 17:10:47 +0200704{
Karsten Graula6688d92020-04-30 15:55:39 +0200705 u8 llc_type = qentry->msg.raw.hdr.common.type;
706 union smc_llc_msg *llc = &qentry->msg;
Karsten Graulef79d432020-04-29 17:10:47 +0200707
Karsten Graula6688d92020-04-30 15:55:39 +0200708 switch (llc_type) {
Karsten Graulef79d432020-04-29 17:10:47 +0200709 case SMC_LLC_TEST_LINK:
710 if (link->state == SMC_LNK_ACTIVE)
711 complete(&link->llc_testlink_resp);
712 break;
Karsten Graulef79d432020-04-29 17:10:47 +0200713 case SMC_LLC_ADD_LINK:
Karsten Graul4667bb42020-04-30 15:55:42 +0200714 case SMC_LLC_CONFIRM_LINK:
715 /* assign responses to the local flow, we requested them */
716 smc_llc_flow_qentry_set(&link->lgr->llc_flow_lcl, qentry);
717 wake_up_interruptible(&link->lgr->llc_waiter);
718 return;
Karsten Graulef79d432020-04-29 17:10:47 +0200719 case SMC_LLC_DELETE_LINK:
720 if (link->lgr->role == SMC_SERV)
721 smc_lgr_schedule_free_work_fast(link->lgr);
722 break;
723 case SMC_LLC_CONFIRM_RKEY:
724 link->llc_confirm_rkey_resp_rc = llc->raw.hdr.flags &
725 SMC_LLC_FLAG_RKEY_NEG;
726 complete(&link->llc_confirm_rkey_resp);
727 break;
728 case SMC_LLC_CONFIRM_RKEY_CONT:
729 /* unused as long as we don't send this type of msg */
730 break;
731 case SMC_LLC_DELETE_RKEY:
732 link->llc_delete_rkey_resp_rc = llc->raw.hdr.flags &
733 SMC_LLC_FLAG_RKEY_NEG;
734 complete(&link->llc_delete_rkey_resp);
735 break;
736 }
Karsten Graula6688d92020-04-30 15:55:39 +0200737 kfree(qentry);
Karsten Graulef79d432020-04-29 17:10:47 +0200738}
739
Karsten Graula6688d92020-04-30 15:55:39 +0200740static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc)
Karsten Graul6c8968c2020-04-29 17:10:46 +0200741{
Karsten Graul6c8968c2020-04-29 17:10:46 +0200742 struct smc_link_group *lgr = link->lgr;
743 struct smc_llc_qentry *qentry;
Karsten Graul6c8968c2020-04-29 17:10:46 +0200744 unsigned long flags;
745
Karsten Graul6c8968c2020-04-29 17:10:46 +0200746 qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
747 if (!qentry)
748 return;
749 qentry->link = link;
750 INIT_LIST_HEAD(&qentry->list);
751 memcpy(&qentry->msg, llc, sizeof(union smc_llc_msg));
Karsten Graula6688d92020-04-30 15:55:39 +0200752
753 /* process responses immediately */
754 if (llc->raw.hdr.flags & SMC_LLC_FLAG_RESP) {
755 smc_llc_rx_response(link, qentry);
756 return;
757 }
758
759 /* add requests to event queue */
Karsten Graul6c8968c2020-04-29 17:10:46 +0200760 spin_lock_irqsave(&lgr->llc_event_q_lock, flags);
761 list_add_tail(&qentry->list, &lgr->llc_event_q);
762 spin_unlock_irqrestore(&lgr->llc_event_q_lock, flags);
763 schedule_work(&link->lgr->llc_event_work);
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100764}
765
Karsten Graula6688d92020-04-30 15:55:39 +0200766/* copy received msg and add it to the event queue */
767static void smc_llc_rx_handler(struct ib_wc *wc, void *buf)
768{
769 struct smc_link *link = (struct smc_link *)wc->qp->qp_context;
770 union smc_llc_msg *llc = buf;
771
772 if (wc->byte_len < sizeof(*llc))
773 return; /* short message */
774 if (llc->raw.hdr.length != sizeof(*llc))
775 return; /* invalid message */
776
777 smc_llc_enqueue(link, llc);
778}
779
Karsten Graul44aa81c2018-05-15 17:04:55 +0200780/***************************** worker, utils *********************************/
Karsten Graul877ae5b2018-05-02 16:56:44 +0200781
782static void smc_llc_testlink_work(struct work_struct *work)
783{
784 struct smc_link *link = container_of(to_delayed_work(work),
785 struct smc_link, llc_testlink_wrk);
786 unsigned long next_interval;
Karsten Graul877ae5b2018-05-02 16:56:44 +0200787 unsigned long expire_time;
788 u8 user_data[16] = { 0 };
789 int rc;
790
Karsten Graul877ae5b2018-05-02 16:56:44 +0200791 if (link->state != SMC_LNK_ACTIVE)
792 return; /* don't reschedule worker */
793 expire_time = link->wr_rx_tstamp + link->llc_testlink_time;
794 if (time_is_after_jiffies(expire_time)) {
795 next_interval = expire_time - jiffies;
796 goto out;
797 }
798 reinit_completion(&link->llc_testlink_resp);
Karsten Grauld97935f2018-05-15 17:04:57 +0200799 smc_llc_send_test_link(link, user_data);
Karsten Graul877ae5b2018-05-02 16:56:44 +0200800 /* receive TEST LINK response over RoCE fabric */
801 rc = wait_for_completion_interruptible_timeout(&link->llc_testlink_resp,
802 SMC_LLC_WAIT_TIME);
Karsten Graul1020e1e2020-04-29 17:10:44 +0200803 if (link->state != SMC_LNK_ACTIVE)
804 return; /* link state changed */
Karsten Graul877ae5b2018-05-02 16:56:44 +0200805 if (rc <= 0) {
Karsten Graul5f78fe92020-02-17 16:24:54 +0100806 smc_lgr_terminate_sched(smc_get_lgr(link));
Karsten Graul877ae5b2018-05-02 16:56:44 +0200807 return;
808 }
809 next_interval = link->llc_testlink_time;
810out:
Karsten Graul1020e1e2020-04-29 17:10:44 +0200811 schedule_delayed_work(&link->llc_testlink_wrk, next_interval);
Karsten Graul877ae5b2018-05-02 16:56:44 +0200812}
813
Karsten Graul00a049c2020-04-29 17:10:49 +0200814void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc)
815{
816 struct net *net = sock_net(smc->clcsock->sk);
817
818 INIT_WORK(&lgr->llc_event_work, smc_llc_event_work);
819 INIT_LIST_HEAD(&lgr->llc_event_q);
820 spin_lock_init(&lgr->llc_event_q_lock);
Karsten Graul555da9a2020-04-30 15:55:38 +0200821 spin_lock_init(&lgr->llc_flow_lock);
822 init_waitqueue_head(&lgr->llc_waiter);
Karsten Graul00a049c2020-04-29 17:10:49 +0200823 lgr->llc_testlink_time = net->ipv4.sysctl_tcp_keepalive_time;
824}
825
826/* called after lgr was removed from lgr_list */
827void smc_llc_lgr_clear(struct smc_link_group *lgr)
828{
829 smc_llc_event_flush(lgr);
Karsten Graul555da9a2020-04-30 15:55:38 +0200830 wake_up_interruptible_all(&lgr->llc_waiter);
Karsten Graul00a049c2020-04-29 17:10:49 +0200831 cancel_work_sync(&lgr->llc_event_work);
Karsten Graul555da9a2020-04-30 15:55:38 +0200832 if (lgr->delayed_event) {
833 kfree(lgr->delayed_event);
834 lgr->delayed_event = NULL;
835 }
Karsten Graul00a049c2020-04-29 17:10:49 +0200836}
837
Karsten Graul2a4c57a2018-05-15 17:04:59 +0200838int smc_llc_link_init(struct smc_link *link)
Karsten Graul877ae5b2018-05-02 16:56:44 +0200839{
Karsten Graulef79d432020-04-29 17:10:47 +0200840 init_completion(&link->llc_confirm_rkey_resp);
841 init_completion(&link->llc_delete_rkey_resp);
Karsten Graul60e03c62018-11-22 10:26:42 +0100842 mutex_init(&link->llc_delete_rkey_mutex);
Karsten Graul877ae5b2018-05-02 16:56:44 +0200843 init_completion(&link->llc_testlink_resp);
844 INIT_DELAYED_WORK(&link->llc_testlink_wrk, smc_llc_testlink_work);
Karsten Graul2a4c57a2018-05-15 17:04:59 +0200845 return 0;
Karsten Graulb32cf4a2018-05-15 17:04:58 +0200846}
847
Karsten Graul00a049c2020-04-29 17:10:49 +0200848void smc_llc_link_active(struct smc_link *link)
Karsten Graulb32cf4a2018-05-15 17:04:58 +0200849{
Karsten Graul877ae5b2018-05-02 16:56:44 +0200850 link->state = SMC_LNK_ACTIVE;
Karsten Graul00a049c2020-04-29 17:10:49 +0200851 if (link->lgr->llc_testlink_time) {
852 link->llc_testlink_time = link->lgr->llc_testlink_time * HZ;
Karsten Graul1020e1e2020-04-29 17:10:44 +0200853 schedule_delayed_work(&link->llc_testlink_wrk,
854 link->llc_testlink_time);
Karsten Graul877ae5b2018-05-02 16:56:44 +0200855 }
856}
857
Karsten Graul0d18a0cb2018-07-25 16:35:33 +0200858void smc_llc_link_deleting(struct smc_link *link)
859{
860 link->state = SMC_LNK_DELETING;
Ursula Braun15e1b992019-11-14 13:02:44 +0100861 smc_wr_wakeup_tx_wait(link);
Karsten Graul0d18a0cb2018-07-25 16:35:33 +0200862}
863
Karsten Graul877ae5b2018-05-02 16:56:44 +0200864/* called in worker context */
Karsten Graul2a4c57a2018-05-15 17:04:59 +0200865void smc_llc_link_clear(struct smc_link *link)
Karsten Graul877ae5b2018-05-02 16:56:44 +0200866{
Karsten Graul2140ac22020-04-29 17:10:45 +0200867 complete(&link->llc_testlink_resp);
868 cancel_delayed_work_sync(&link->llc_testlink_wrk);
869 smc_wr_wakeup_reg_wait(link);
870 smc_wr_wakeup_tx_wait(link);
Karsten Graul877ae5b2018-05-02 16:56:44 +0200871}
872
Karsten Graul44aa81c2018-05-15 17:04:55 +0200873/* register a new rtoken at the remote peer */
874int smc_llc_do_confirm_rkey(struct smc_link *link,
875 struct smc_buf_desc *rmb_desc)
876{
877 int rc;
878
Karsten Graul60e03c62018-11-22 10:26:42 +0100879 /* protected by mutex smc_create_lgr_pending */
Karsten Graulef79d432020-04-29 17:10:47 +0200880 reinit_completion(&link->llc_confirm_rkey_resp);
Karsten Graul4600cfc2018-11-22 10:26:41 +0100881 rc = smc_llc_send_confirm_rkey(link, rmb_desc);
882 if (rc)
883 return rc;
Karsten Graul44aa81c2018-05-15 17:04:55 +0200884 /* receive CONFIRM RKEY response from server over RoCE fabric */
Karsten Graulef79d432020-04-29 17:10:47 +0200885 rc = wait_for_completion_interruptible_timeout(
886 &link->llc_confirm_rkey_resp, SMC_LLC_WAIT_TIME);
887 if (rc <= 0 || link->llc_confirm_rkey_resp_rc)
Karsten Graul44aa81c2018-05-15 17:04:55 +0200888 return -EFAULT;
889 return 0;
890}
891
Karsten Graul60e03c62018-11-22 10:26:42 +0100892/* unregister an rtoken at the remote peer */
893int smc_llc_do_delete_rkey(struct smc_link *link,
894 struct smc_buf_desc *rmb_desc)
895{
Ursula Braun0b29ec642019-11-14 13:02:47 +0100896 int rc = 0;
Karsten Graul60e03c62018-11-22 10:26:42 +0100897
898 mutex_lock(&link->llc_delete_rkey_mutex);
Ursula Braun0b29ec642019-11-14 13:02:47 +0100899 if (link->state != SMC_LNK_ACTIVE)
900 goto out;
Karsten Graulef79d432020-04-29 17:10:47 +0200901 reinit_completion(&link->llc_delete_rkey_resp);
Karsten Graul60e03c62018-11-22 10:26:42 +0100902 rc = smc_llc_send_delete_rkey(link, rmb_desc);
903 if (rc)
904 goto out;
905 /* receive DELETE RKEY response from server over RoCE fabric */
Karsten Graulef79d432020-04-29 17:10:47 +0200906 rc = wait_for_completion_interruptible_timeout(
907 &link->llc_delete_rkey_resp, SMC_LLC_WAIT_TIME);
908 if (rc <= 0 || link->llc_delete_rkey_resp_rc)
Karsten Graul60e03c62018-11-22 10:26:42 +0100909 rc = -EFAULT;
910 else
911 rc = 0;
912out:
913 mutex_unlock(&link->llc_delete_rkey_mutex);
914 return rc;
915}
916
Karsten Graul92334cf2020-04-30 15:55:41 +0200917/* evaluate confirm link request or response */
918int smc_llc_eval_conf_link(struct smc_llc_qentry *qentry,
919 enum smc_llc_reqresp type)
920{
921 if (type == SMC_LLC_REQ) /* SMC server assigns link_id */
922 qentry->link->link_id = qentry->msg.confirm_link.link_num;
923 if (!(qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_NO_RMBE_EYEC))
924 return -ENOTSUPP;
925 return 0;
926}
927
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100928/***************************** init, exit, misc ******************************/
929
930static struct smc_wr_rx_handler smc_llc_rx_handlers[] = {
931 {
932 .handler = smc_llc_rx_handler,
933 .type = SMC_LLC_CONFIRM_LINK
934 },
935 {
Karsten Graul313164d2018-03-01 13:51:29 +0100936 .handler = smc_llc_rx_handler,
937 .type = SMC_LLC_TEST_LINK
938 },
939 {
Karsten Graul4ed75de2018-03-01 13:51:30 +0100940 .handler = smc_llc_rx_handler,
Karsten Graul52bedf32018-03-01 13:51:32 +0100941 .type = SMC_LLC_ADD_LINK
942 },
943 {
944 .handler = smc_llc_rx_handler,
945 .type = SMC_LLC_DELETE_LINK
946 },
947 {
948 .handler = smc_llc_rx_handler,
Karsten Graul4ed75de2018-03-01 13:51:30 +0100949 .type = SMC_LLC_CONFIRM_RKEY
950 },
951 {
952 .handler = smc_llc_rx_handler,
953 .type = SMC_LLC_CONFIRM_RKEY_CONT
954 },
955 {
956 .handler = smc_llc_rx_handler,
957 .type = SMC_LLC_DELETE_RKEY
958 },
959 {
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100960 .handler = NULL,
961 }
962};
963
964int __init smc_llc_init(void)
965{
966 struct smc_wr_rx_handler *handler;
967 int rc = 0;
968
969 for (handler = smc_llc_rx_handlers; handler->handler; handler++) {
970 INIT_HLIST_NODE(&handler->list);
971 rc = smc_wr_rx_register_handler(handler);
972 if (rc)
973 break;
974 }
975 return rc;
976}