Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 1 | /* |
| 2 | * net/tipc/msg.c: TIPC message header routines |
YOSHIFUJI Hideaki | c430728 | 2007-02-09 23:25:21 +0900 | [diff] [blame] | 3 | * |
Jon Paul Maloy | cf2157f | 2015-03-13 16:08:06 -0400 | [diff] [blame] | 4 | * Copyright (c) 2000-2006, 2014-2015, Ericsson AB |
Allan Stephens | 741de3e | 2011-01-25 13:33:31 -0500 | [diff] [blame] | 5 | * Copyright (c) 2005, 2010-2011, Wind River Systems |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 6 | * All rights reserved. |
| 7 | * |
Per Liden | 9ea1fd3 | 2006-01-11 13:30:43 +0100 | [diff] [blame] | 8 | * Redistribution and use in source and binary forms, with or without |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 9 | * modification, are permitted provided that the following conditions are met: |
| 10 | * |
Per Liden | 9ea1fd3 | 2006-01-11 13:30:43 +0100 | [diff] [blame] | 11 | * 1. Redistributions of source code must retain the above copyright |
| 12 | * notice, this list of conditions and the following disclaimer. |
| 13 | * 2. Redistributions in binary form must reproduce the above copyright |
| 14 | * notice, this list of conditions and the following disclaimer in the |
| 15 | * documentation and/or other materials provided with the distribution. |
| 16 | * 3. Neither the names of the copyright holders nor the names of its |
| 17 | * contributors may be used to endorse or promote products derived from |
| 18 | * this software without specific prior written permission. |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 19 | * |
Per Liden | 9ea1fd3 | 2006-01-11 13:30:43 +0100 | [diff] [blame] | 20 | * Alternatively, this software may be distributed under the terms of the |
| 21 | * GNU General Public License ("GPL") version 2 as published by the Free |
| 22 | * Software Foundation. |
| 23 | * |
| 24 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
| 25 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 26 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 27 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
| 28 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 29 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 30 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 31 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 32 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 33 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 34 | * POSSIBILITY OF SUCH DAMAGE. |
| 35 | */ |
| 36 | |
Ying Xue | c93d3ba | 2015-01-09 15:27:04 +0800 | [diff] [blame] | 37 | #include <net/sock.h> |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 38 | #include "core.h" |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 39 | #include "msg.h" |
Jon Paul Maloy | 5a37907 | 2014-06-25 20:41:36 -0500 | [diff] [blame] | 40 | #include "addr.h" |
| 41 | #include "name_table.h" |
Tuong Lien | fc1b6d6 | 2019-11-08 12:05:11 +0700 | [diff] [blame] | 42 | #include "crypto.h" |
Per Liden | b97bf3f | 2006-01-02 19:04:38 +0100 | [diff] [blame] | 43 | |
Jon Paul Maloy | 8db1bae | 2014-06-25 20:41:35 -0500 | [diff] [blame] | 44 | #define MAX_FORWARD_SIZE 1024 |
Tuong Lien | fc1b6d6 | 2019-11-08 12:05:11 +0700 | [diff] [blame] | 45 | #ifdef CONFIG_TIPC_CRYPTO |
| 46 | #define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16) |
| 47 | #define BUF_TAILROOM (TIPC_AES_GCM_TAG_SIZE) |
| 48 | #else |
Jon Paul Maloy | 27777da | 2016-06-20 09:20:46 -0400 | [diff] [blame] | 49 | #define BUF_HEADROOM (LL_MAX_HEADER + 48) |
| 50 | #define BUF_TAILROOM 16 |
Tuong Lien | fc1b6d6 | 2019-11-08 12:05:11 +0700 | [diff] [blame] | 51 | #endif |
Jon Paul Maloy | 8db1bae | 2014-06-25 20:41:35 -0500 | [diff] [blame] | 52 | |
Jon Paul Maloy | 4f1688b | 2014-06-25 20:41:32 -0500 | [diff] [blame] | 53 | static unsigned int align(unsigned int i) |
Allan Stephens | 23461e8 | 2010-05-11 14:30:18 +0000 | [diff] [blame] | 54 | { |
Jon Paul Maloy | 4f1688b | 2014-06-25 20:41:32 -0500 | [diff] [blame] | 55 | return (i + 3) & ~3u; |
Allan Stephens | 23461e8 | 2010-05-11 14:30:18 +0000 | [diff] [blame] | 56 | } |
| 57 | |
Ying Xue | 859fc7c | 2015-01-09 15:27:01 +0800 | [diff] [blame] | 58 | /** |
| 59 | * tipc_buf_acquire - creates a TIPC message buffer |
| 60 | * @size: message size (including TIPC header) |
Randy Dunlap | 5fcb7d4 | 2020-11-29 10:32:50 -0800 | [diff] [blame] | 61 | * @gfp: memory allocation flags |
Ying Xue | 859fc7c | 2015-01-09 15:27:01 +0800 | [diff] [blame] | 62 | * |
Randy Dunlap | 637b77f | 2020-11-29 10:32:48 -0800 | [diff] [blame] | 63 | * Return: a new buffer with data pointers set to the specified size. |
Ying Xue | 859fc7c | 2015-01-09 15:27:01 +0800 | [diff] [blame] | 64 | * |
Randy Dunlap | 5fcb7d4 | 2020-11-29 10:32:50 -0800 | [diff] [blame] | 65 | * NOTE: |
| 66 | * Headroom is reserved to allow prepending of a data link header. |
| 67 | * There may also be unrequested tailroom present at the buffer's end. |
Ying Xue | 859fc7c | 2015-01-09 15:27:01 +0800 | [diff] [blame] | 68 | */ |
Parthasarathy Bhuvaragan | 57d5f64 | 2017-01-13 15:46:25 +0100 | [diff] [blame] | 69 | struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp) |
Ying Xue | 859fc7c | 2015-01-09 15:27:01 +0800 | [diff] [blame] | 70 | { |
| 71 | struct sk_buff *skb; |
Tuong Lien | fc1b6d6 | 2019-11-08 12:05:11 +0700 | [diff] [blame] | 72 | #ifdef CONFIG_TIPC_CRYPTO |
| 73 | unsigned int buf_size = (BUF_HEADROOM + size + BUF_TAILROOM + 3) & ~3u; |
| 74 | #else |
Ying Xue | 859fc7c | 2015-01-09 15:27:01 +0800 | [diff] [blame] | 75 | unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u; |
Tuong Lien | fc1b6d6 | 2019-11-08 12:05:11 +0700 | [diff] [blame] | 76 | #endif |
Ying Xue | 859fc7c | 2015-01-09 15:27:01 +0800 | [diff] [blame] | 77 | |
Parthasarathy Bhuvaragan | 57d5f64 | 2017-01-13 15:46:25 +0100 | [diff] [blame] | 78 | skb = alloc_skb_fclone(buf_size, gfp); |
Ying Xue | 859fc7c | 2015-01-09 15:27:01 +0800 | [diff] [blame] | 79 | if (skb) { |
| 80 | skb_reserve(skb, BUF_HEADROOM); |
| 81 | skb_put(skb, size); |
| 82 | skb->next = NULL; |
| 83 | } |
| 84 | return skb; |
| 85 | } |
| 86 | |
Jon Paul Maloy | c589863 | 2015-02-05 08:36:36 -0500 | [diff] [blame] | 87 | void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type, |
| 88 | u32 hsize, u32 dnode) |
Allan Stephens | 23461e8 | 2010-05-11 14:30:18 +0000 | [diff] [blame] | 89 | { |
| 90 | memset(m, 0, hsize); |
| 91 | msg_set_version(m); |
| 92 | msg_set_user(m, user); |
| 93 | msg_set_hdr_sz(m, hsize); |
| 94 | msg_set_size(m, hsize); |
Jon Paul Maloy | c589863 | 2015-02-05 08:36:36 -0500 | [diff] [blame] | 95 | msg_set_prevnode(m, own_node); |
Allan Stephens | 23461e8 | 2010-05-11 14:30:18 +0000 | [diff] [blame] | 96 | msg_set_type(m, type); |
Jon Paul Maloy | 1dd0bd2 | 2014-08-22 18:09:06 -0400 | [diff] [blame] | 97 | if (hsize > SHORT_H_SIZE) { |
Jon Paul Maloy | c589863 | 2015-02-05 08:36:36 -0500 | [diff] [blame] | 98 | msg_set_orignode(m, own_node); |
| 99 | msg_set_destnode(m, dnode); |
Jon Paul Maloy | 1dd0bd2 | 2014-08-22 18:09:06 -0400 | [diff] [blame] | 100 | } |
| 101 | } |
| 102 | |
Jon Paul Maloy | c589863 | 2015-02-05 08:36:36 -0500 | [diff] [blame] | 103 | struct sk_buff *tipc_msg_create(uint user, uint type, |
Ying Xue | 3474753 | 2015-01-09 15:27:10 +0800 | [diff] [blame] | 104 | uint hdr_sz, uint data_sz, u32 dnode, |
| 105 | u32 onode, u32 dport, u32 oport, int errcode) |
Jon Paul Maloy | 1dd0bd2 | 2014-08-22 18:09:06 -0400 | [diff] [blame] | 106 | { |
| 107 | struct tipc_msg *msg; |
| 108 | struct sk_buff *buf; |
| 109 | |
Parthasarathy Bhuvaragan | 57d5f64 | 2017-01-13 15:46:25 +0100 | [diff] [blame] | 110 | buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC); |
Jon Paul Maloy | 1dd0bd2 | 2014-08-22 18:09:06 -0400 | [diff] [blame] | 111 | if (unlikely(!buf)) |
| 112 | return NULL; |
| 113 | |
| 114 | msg = buf_msg(buf); |
Jon Paul Maloy | c589863 | 2015-02-05 08:36:36 -0500 | [diff] [blame] | 115 | tipc_msg_init(onode, msg, user, type, hdr_sz, dnode); |
Jon Paul Maloy | 1dd0bd2 | 2014-08-22 18:09:06 -0400 | [diff] [blame] | 116 | msg_set_size(msg, hdr_sz + data_sz); |
Jon Paul Maloy | 1dd0bd2 | 2014-08-22 18:09:06 -0400 | [diff] [blame] | 117 | msg_set_origport(msg, oport); |
| 118 | msg_set_destport(msg, dport); |
| 119 | msg_set_errcode(msg, errcode); |
Jon Paul Maloy | 1dd0bd2 | 2014-08-22 18:09:06 -0400 | [diff] [blame] | 120 | return buf; |
Allan Stephens | 23461e8 | 2010-05-11 14:30:18 +0000 | [diff] [blame] | 121 | } |
| 122 | |
Jon Paul Maloy | 37e2216 | 2014-05-14 05:39:12 -0400 | [diff] [blame] | 123 | /* tipc_buf_append(): Append a buffer to the fragment list of another buffer |
Jon Paul Maloy | 29322d0 | 2014-07-05 13:44:13 -0400 | [diff] [blame] | 124 | * @*headbuf: in: NULL for first frag, otherwise value returned from prev call |
| 125 | * out: set when successful non-complete reassembly, otherwise NULL |
| 126 | * @*buf: in: the buffer to append. Always defined |
stephen hemminger | b2ad5e5 | 2014-10-29 22:58:51 -0700 | [diff] [blame] | 127 | * out: head buf after successful complete reassembly, otherwise NULL |
Jon Paul Maloy | 29322d0 | 2014-07-05 13:44:13 -0400 | [diff] [blame] | 128 | * Returns 1 when reassembly complete, otherwise 0 |
Jon Paul Maloy | 37e2216 | 2014-05-14 05:39:12 -0400 | [diff] [blame] | 129 | */ |
| 130 | int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) |
| 131 | { |
| 132 | struct sk_buff *head = *headbuf; |
| 133 | struct sk_buff *frag = *buf; |
Jon Paul Maloy | 45c8b7b | 2015-10-19 11:33:00 -0400 | [diff] [blame] | 134 | struct sk_buff *tail = NULL; |
Jon Paul Maloy | 13e9b99 | 2014-07-25 14:48:09 -0400 | [diff] [blame] | 135 | struct tipc_msg *msg; |
| 136 | u32 fragid; |
Jon Paul Maloy | 37e2216 | 2014-05-14 05:39:12 -0400 | [diff] [blame] | 137 | int delta; |
Jon Paul Maloy | 13e9b99 | 2014-07-25 14:48:09 -0400 | [diff] [blame] | 138 | bool headstolen; |
Jon Paul Maloy | 37e2216 | 2014-05-14 05:39:12 -0400 | [diff] [blame] | 139 | |
Jon Paul Maloy | 13e9b99 | 2014-07-25 14:48:09 -0400 | [diff] [blame] | 140 | if (!frag) |
| 141 | goto err; |
| 142 | |
| 143 | msg = buf_msg(frag); |
| 144 | fragid = msg_type(msg); |
| 145 | frag->next = NULL; |
Jon Paul Maloy | 37e2216 | 2014-05-14 05:39:12 -0400 | [diff] [blame] | 146 | skb_pull(frag, msg_hdr_sz(msg)); |
| 147 | |
| 148 | if (fragid == FIRST_FRAGMENT) { |
Jon Paul Maloy | 13e9b99 | 2014-07-25 14:48:09 -0400 | [diff] [blame] | 149 | if (unlikely(head)) |
| 150 | goto err; |
Tung Nguyen | ceb1eb2 | 2020-10-27 10:24:03 +0700 | [diff] [blame] | 151 | *buf = NULL; |
Xin Long | b7df21c | 2021-05-08 03:57:03 +0800 | [diff] [blame] | 152 | if (skb_has_frag_list(frag) && __skb_linearize(frag)) |
| 153 | goto err; |
Tung Nguyen | ceb1eb2 | 2020-10-27 10:24:03 +0700 | [diff] [blame] | 154 | frag = skb_unshare(frag, GFP_ATOMIC); |
Xin Long | ff48b62 | 2020-09-13 19:37:31 +0800 | [diff] [blame] | 155 | if (unlikely(!frag)) |
Jon Paul Maloy | 13e9b99 | 2014-07-25 14:48:09 -0400 | [diff] [blame] | 156 | goto err; |
Jon Paul Maloy | 37e2216 | 2014-05-14 05:39:12 -0400 | [diff] [blame] | 157 | head = *headbuf = frag; |
Jon Paul Maloy | 45c8b7b | 2015-10-19 11:33:00 -0400 | [diff] [blame] | 158 | TIPC_SKB_CB(head)->tail = NULL; |
Jon Paul Maloy | 37e2216 | 2014-05-14 05:39:12 -0400 | [diff] [blame] | 159 | return 0; |
| 160 | } |
Jon Paul Maloy | 13e9b99 | 2014-07-25 14:48:09 -0400 | [diff] [blame] | 161 | |
Jon Paul Maloy | 37e2216 | 2014-05-14 05:39:12 -0400 | [diff] [blame] | 162 | if (!head) |
Jon Paul Maloy | 13e9b99 | 2014-07-25 14:48:09 -0400 | [diff] [blame] | 163 | goto err; |
| 164 | |
Jon Paul Maloy | 37e2216 | 2014-05-14 05:39:12 -0400 | [diff] [blame] | 165 | if (skb_try_coalesce(head, frag, &headstolen, &delta)) { |
| 166 | kfree_skb_partial(frag, headstolen); |
| 167 | } else { |
Jon Paul Maloy | 13e9b99 | 2014-07-25 14:48:09 -0400 | [diff] [blame] | 168 | tail = TIPC_SKB_CB(head)->tail; |
Jon Paul Maloy | 37e2216 | 2014-05-14 05:39:12 -0400 | [diff] [blame] | 169 | if (!skb_has_frag_list(head)) |
| 170 | skb_shinfo(head)->frag_list = frag; |
| 171 | else |
| 172 | tail->next = frag; |
| 173 | head->truesize += frag->truesize; |
| 174 | head->data_len += frag->len; |
| 175 | head->len += frag->len; |
| 176 | TIPC_SKB_CB(head)->tail = frag; |
| 177 | } |
Jon Paul Maloy | 13e9b99 | 2014-07-25 14:48:09 -0400 | [diff] [blame] | 178 | |
Jon Paul Maloy | 37e2216 | 2014-05-14 05:39:12 -0400 | [diff] [blame] | 179 | if (fragid == LAST_FRAGMENT) { |
Tuong Lien | fc1b6d6 | 2019-11-08 12:05:11 +0700 | [diff] [blame] | 180 | TIPC_SKB_CB(head)->validated = 0; |
Jon Maloy | d618d09 | 2017-11-15 21:23:56 +0100 | [diff] [blame] | 181 | if (unlikely(!tipc_msg_validate(&head))) |
Jon Paul Maloy | 1149557 | 2015-03-13 16:08:07 -0400 | [diff] [blame] | 182 | goto err; |
Jon Paul Maloy | 37e2216 | 2014-05-14 05:39:12 -0400 | [diff] [blame] | 183 | *buf = head; |
| 184 | TIPC_SKB_CB(head)->tail = NULL; |
| 185 | *headbuf = NULL; |
| 186 | return 1; |
| 187 | } |
| 188 | *buf = NULL; |
| 189 | return 0; |
Jon Paul Maloy | 13e9b99 | 2014-07-25 14:48:09 -0400 | [diff] [blame] | 190 | err: |
Jon Paul Maloy | 37e2216 | 2014-05-14 05:39:12 -0400 | [diff] [blame] | 191 | kfree_skb(*buf); |
Jon Paul Maloy | 29322d0 | 2014-07-05 13:44:13 -0400 | [diff] [blame] | 192 | kfree_skb(*headbuf); |
| 193 | *buf = *headbuf = NULL; |
Jon Paul Maloy | 37e2216 | 2014-05-14 05:39:12 -0400 | [diff] [blame] | 194 | return 0; |
| 195 | } |
Jon Paul Maloy | 4f1688b | 2014-06-25 20:41:32 -0500 | [diff] [blame] | 196 | |
Jon Maloy | c0bceb9 | 2019-10-30 14:00:41 +0100 | [diff] [blame] | 197 | /** |
| 198 | * tipc_msg_append(): Append data to tail of an existing buffer queue |
Andrew Lunn | d814120 | 2020-07-13 01:15:14 +0200 | [diff] [blame] | 199 | * @_hdr: header to be used |
Jon Maloy | c0bceb9 | 2019-10-30 14:00:41 +0100 | [diff] [blame] | 200 | * @m: the data to be appended |
| 201 | * @mss: max allowable size of buffer |
| 202 | * @dlen: size of data to be appended |
Randy Dunlap | 637b77f | 2020-11-29 10:32:48 -0800 | [diff] [blame] | 203 | * @txq: queue to append to |
| 204 | * |
| 205 | * Return: the number of 1k blocks appended or errno value |
Jon Maloy | c0bceb9 | 2019-10-30 14:00:41 +0100 | [diff] [blame] | 206 | */ |
| 207 | int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen, |
| 208 | int mss, struct sk_buff_head *txq) |
| 209 | { |
YueHaibing | 8298a41 | 2020-05-28 07:43:59 +0000 | [diff] [blame] | 210 | struct sk_buff *skb; |
Jon Maloy | c0bceb9 | 2019-10-30 14:00:41 +0100 | [diff] [blame] | 211 | int accounted, total, curr; |
| 212 | int mlen, cpy, rem = dlen; |
| 213 | struct tipc_msg *hdr; |
| 214 | |
| 215 | skb = skb_peek_tail(txq); |
| 216 | accounted = skb ? msg_blocks(buf_msg(skb)) : 0; |
| 217 | total = accounted; |
| 218 | |
Tuong Lien | 5e9eeccc | 2020-06-03 12:06:01 +0700 | [diff] [blame] | 219 | do { |
Jon Maloy | c0bceb9 | 2019-10-30 14:00:41 +0100 | [diff] [blame] | 220 | if (!skb || skb->len >= mss) { |
Jon Maloy | c0bceb9 | 2019-10-30 14:00:41 +0100 | [diff] [blame] | 221 | skb = tipc_buf_acquire(mss, GFP_KERNEL); |
| 222 | if (unlikely(!skb)) |
| 223 | return -ENOMEM; |
| 224 | skb_orphan(skb); |
| 225 | skb_trim(skb, MIN_H_SIZE); |
| 226 | hdr = buf_msg(skb); |
| 227 | skb_copy_to_linear_data(skb, _hdr, MIN_H_SIZE); |
| 228 | msg_set_hdr_sz(hdr, MIN_H_SIZE); |
| 229 | msg_set_size(hdr, MIN_H_SIZE); |
| 230 | __skb_queue_tail(txq, skb); |
| 231 | total += 1; |
Jon Maloy | c0bceb9 | 2019-10-30 14:00:41 +0100 | [diff] [blame] | 232 | } |
| 233 | hdr = buf_msg(skb); |
| 234 | curr = msg_blocks(hdr); |
| 235 | mlen = msg_size(hdr); |
Tuong Lien | c9aa81f | 2020-06-11 17:07:35 +0700 | [diff] [blame] | 236 | cpy = min_t(size_t, rem, mss - mlen); |
Jon Maloy | c0bceb9 | 2019-10-30 14:00:41 +0100 | [diff] [blame] | 237 | if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter)) |
| 238 | return -EFAULT; |
| 239 | msg_set_size(hdr, mlen + cpy); |
| 240 | skb_put(skb, cpy); |
| 241 | rem -= cpy; |
| 242 | total += msg_blocks(hdr) - curr; |
Tuong Lien | c9aa81f | 2020-06-11 17:07:35 +0700 | [diff] [blame] | 243 | } while (rem > 0); |
Jon Maloy | c0bceb9 | 2019-10-30 14:00:41 +0100 | [diff] [blame] | 244 | return total - accounted; |
| 245 | } |
| 246 | |
Jon Paul Maloy | cf2157f | 2015-03-13 16:08:06 -0400 | [diff] [blame] | 247 | /* tipc_msg_validate - validate basic format of received message |
| 248 | * |
| 249 | * This routine ensures a TIPC message has an acceptable header, and at least |
| 250 | * as much data as the header indicates it should. The routine also ensures |
| 251 | * that the entire message header is stored in the main fragment of the message |
| 252 | * buffer, to simplify future access to message header fields. |
| 253 | * |
| 254 | * Note: Having extra info present in the message header or data areas is OK. |
| 255 | * TIPC will ignore the excess, under the assumption that it is optional info |
| 256 | * introduced by a later release of the protocol. |
| 257 | */ |
Jon Maloy | d618d09 | 2017-11-15 21:23:56 +0100 | [diff] [blame] | 258 | bool tipc_msg_validate(struct sk_buff **_skb) |
Jon Paul Maloy | cf2157f | 2015-03-13 16:08:06 -0400 | [diff] [blame] | 259 | { |
Jon Maloy | d618d09 | 2017-11-15 21:23:56 +0100 | [diff] [blame] | 260 | struct sk_buff *skb = *_skb; |
| 261 | struct tipc_msg *hdr; |
Jon Paul Maloy | cf2157f | 2015-03-13 16:08:06 -0400 | [diff] [blame] | 262 | int msz, hsz; |
| 263 | |
Jon Maloy | d618d09 | 2017-11-15 21:23:56 +0100 | [diff] [blame] | 264 | /* Ensure that flow control ratio condition is satisfied */ |
Hoang Le | 55b3280 | 2018-02-08 17:16:25 +0100 | [diff] [blame] | 265 | if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) { |
| 266 | skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC); |
Jon Maloy | d618d09 | 2017-11-15 21:23:56 +0100 | [diff] [blame] | 267 | if (!skb) |
| 268 | return false; |
| 269 | kfree_skb(*_skb); |
| 270 | *_skb = skb; |
| 271 | } |
| 272 | |
Jon Paul Maloy | cf2157f | 2015-03-13 16:08:06 -0400 | [diff] [blame] | 273 | if (unlikely(TIPC_SKB_CB(skb)->validated)) |
| 274 | return true; |
Tuong Lien | fc1b6d6 | 2019-11-08 12:05:11 +0700 | [diff] [blame] | 275 | |
Jon Paul Maloy | cf2157f | 2015-03-13 16:08:06 -0400 | [diff] [blame] | 276 | if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE))) |
| 277 | return false; |
| 278 | |
| 279 | hsz = msg_hdr_sz(buf_msg(skb)); |
| 280 | if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE)) |
| 281 | return false; |
| 282 | if (unlikely(!pskb_may_pull(skb, hsz))) |
| 283 | return false; |
| 284 | |
Jon Maloy | d618d09 | 2017-11-15 21:23:56 +0100 | [diff] [blame] | 285 | hdr = buf_msg(skb); |
| 286 | if (unlikely(msg_version(hdr) != TIPC_VERSION)) |
Jon Paul Maloy | cf2157f | 2015-03-13 16:08:06 -0400 | [diff] [blame] | 287 | return false; |
| 288 | |
Jon Maloy | d618d09 | 2017-11-15 21:23:56 +0100 | [diff] [blame] | 289 | msz = msg_size(hdr); |
Jon Paul Maloy | cf2157f | 2015-03-13 16:08:06 -0400 | [diff] [blame] | 290 | if (unlikely(msz < hsz)) |
| 291 | return false; |
| 292 | if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE)) |
| 293 | return false; |
| 294 | if (unlikely(skb->len < msz)) |
| 295 | return false; |
| 296 | |
Tuong Lien | fc1b6d6 | 2019-11-08 12:05:11 +0700 | [diff] [blame] | 297 | TIPC_SKB_CB(skb)->validated = 1; |
Jon Paul Maloy | cf2157f | 2015-03-13 16:08:06 -0400 | [diff] [blame] | 298 | return true; |
| 299 | } |
Jon Paul Maloy | 067608e | 2014-06-25 20:41:34 -0500 | [diff] [blame] | 300 | |
| 301 | /** |
Tuong Lien | 2320bcd | 2019-07-24 08:56:12 +0700 | [diff] [blame] | 302 | * tipc_msg_fragment - build a fragment skb list for TIPC message |
| 303 | * |
| 304 | * @skb: TIPC message skb |
| 305 | * @hdr: internal msg header to be put on the top of the fragments |
| 306 | * @pktmax: max size of a fragment incl. the header |
| 307 | * @frags: returned fragment skb list |
| 308 | * |
Randy Dunlap | 637b77f | 2020-11-29 10:32:48 -0800 | [diff] [blame] | 309 | * Return: 0 if the fragmentation is successful, otherwise: -EINVAL |
Tuong Lien | 2320bcd | 2019-07-24 08:56:12 +0700 | [diff] [blame] | 310 | * or -ENOMEM |
| 311 | */ |
| 312 | int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr, |
| 313 | int pktmax, struct sk_buff_head *frags) |
| 314 | { |
| 315 | int pktno, nof_fragms, dsz, dmax, eat; |
| 316 | struct tipc_msg *_hdr; |
| 317 | struct sk_buff *_skb; |
| 318 | u8 *data; |
| 319 | |
| 320 | /* Non-linear buffer? */ |
| 321 | if (skb_linearize(skb)) |
| 322 | return -ENOMEM; |
| 323 | |
| 324 | data = (u8 *)skb->data; |
| 325 | dsz = msg_size(buf_msg(skb)); |
| 326 | dmax = pktmax - INT_H_SIZE; |
| 327 | if (dsz <= dmax || !dmax) |
| 328 | return -EINVAL; |
| 329 | |
| 330 | nof_fragms = dsz / dmax + 1; |
| 331 | for (pktno = 1; pktno <= nof_fragms; pktno++) { |
| 332 | if (pktno < nof_fragms) |
| 333 | eat = dmax; |
| 334 | else |
| 335 | eat = dsz % dmax; |
| 336 | /* Allocate a new fragment */ |
| 337 | _skb = tipc_buf_acquire(INT_H_SIZE + eat, GFP_ATOMIC); |
| 338 | if (!_skb) |
| 339 | goto error; |
| 340 | skb_orphan(_skb); |
| 341 | __skb_queue_tail(frags, _skb); |
| 342 | /* Copy header & data to the fragment */ |
| 343 | skb_copy_to_linear_data(_skb, hdr, INT_H_SIZE); |
| 344 | skb_copy_to_linear_data_offset(_skb, INT_H_SIZE, data, eat); |
| 345 | data += eat; |
| 346 | /* Update the fragment's header */ |
| 347 | _hdr = buf_msg(_skb); |
| 348 | msg_set_fragm_no(_hdr, pktno); |
| 349 | msg_set_nof_fragms(_hdr, nof_fragms); |
| 350 | msg_set_size(_hdr, INT_H_SIZE + eat); |
| 351 | } |
| 352 | return 0; |
| 353 | |
| 354 | error: |
| 355 | __skb_queue_purge(frags); |
| 356 | __skb_queue_head_init(frags); |
| 357 | return -ENOMEM; |
| 358 | } |
| 359 | |
| 360 | /** |
Jon Paul Maloy | 9fbfb8b | 2014-07-16 20:41:03 -0400 | [diff] [blame] | 361 | * tipc_msg_build - create buffer chain containing specified header and data |
Jon Paul Maloy | 067608e | 2014-06-25 20:41:34 -0500 | [diff] [blame] | 362 | * @mhdr: Message header, to be prepended to data |
Al Viro | 45dcc68 | 2014-11-15 01:16:27 -0500 | [diff] [blame] | 363 | * @m: User message |
Randy Dunlap | 5fcb7d4 | 2020-11-29 10:32:50 -0800 | [diff] [blame] | 364 | * @offset: buffer offset for fragmented messages (FIXME) |
Jon Paul Maloy | 067608e | 2014-06-25 20:41:34 -0500 | [diff] [blame] | 365 | * @dsz: Total length of user data |
| 366 | * @pktmax: Max packet size that can be used |
Ying Xue | a6ca109 | 2014-11-26 11:41:55 +0800 | [diff] [blame] | 367 | * @list: Buffer or chain of buffers to be returned to caller |
| 368 | * |
Jon Maloy | 4c94cc2 | 2017-11-30 16:47:25 +0100 | [diff] [blame] | 369 | * Note that the recursive call we are making here is safe, since it can |
| 370 | * logically go only one further level down. |
| 371 | * |
Randy Dunlap | 637b77f | 2020-11-29 10:32:48 -0800 | [diff] [blame] | 372 | * Return: message data size or errno: -ENOMEM, -EFAULT |
Jon Paul Maloy | 067608e | 2014-06-25 20:41:34 -0500 | [diff] [blame] | 373 | */ |
Jon Maloy | 4c94cc2 | 2017-11-30 16:47:25 +0100 | [diff] [blame] | 374 | int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset, |
| 375 | int dsz, int pktmax, struct sk_buff_head *list) |
Jon Paul Maloy | 067608e | 2014-06-25 20:41:34 -0500 | [diff] [blame] | 376 | { |
| 377 | int mhsz = msg_hdr_sz(mhdr); |
Jon Paul Maloy | 067608e | 2014-06-25 20:41:34 -0500 | [diff] [blame] | 378 | struct tipc_msg pkthdr; |
Jon Maloy | 4c94cc2 | 2017-11-30 16:47:25 +0100 | [diff] [blame] | 379 | int msz = mhsz + dsz; |
| 380 | int pktrem = pktmax; |
Ying Xue | a6ca109 | 2014-11-26 11:41:55 +0800 | [diff] [blame] | 381 | struct sk_buff *skb; |
Jon Maloy | 4c94cc2 | 2017-11-30 16:47:25 +0100 | [diff] [blame] | 382 | int drem = dsz; |
| 383 | int pktno = 1; |
Jon Paul Maloy | 067608e | 2014-06-25 20:41:34 -0500 | [diff] [blame] | 384 | char *pktpos; |
Jon Maloy | 4c94cc2 | 2017-11-30 16:47:25 +0100 | [diff] [blame] | 385 | int pktsz; |
Jon Paul Maloy | 067608e | 2014-06-25 20:41:34 -0500 | [diff] [blame] | 386 | int rc; |
Ying Xue | a6ca109 | 2014-11-26 11:41:55 +0800 | [diff] [blame] | 387 | |
Jon Paul Maloy | 067608e | 2014-06-25 20:41:34 -0500 | [diff] [blame] | 388 | msg_set_size(mhdr, msz); |
| 389 | |
| 390 | /* No fragmentation needed? */ |
| 391 | if (likely(msz <= pktmax)) { |
Parthasarathy Bhuvaragan | 57d5f64 | 2017-01-13 15:46:25 +0100 | [diff] [blame] | 392 | skb = tipc_buf_acquire(msz, GFP_KERNEL); |
Jon Maloy | 4c94cc2 | 2017-11-30 16:47:25 +0100 | [diff] [blame] | 393 | |
| 394 | /* Fall back to smaller MTU if node local message */ |
| 395 | if (unlikely(!skb)) { |
| 396 | if (pktmax != MAX_MSG_SIZE) |
| 397 | return -ENOMEM; |
| 398 | rc = tipc_msg_build(mhdr, m, offset, dsz, FB_MTU, list); |
| 399 | if (rc != dsz) |
| 400 | return rc; |
| 401 | if (tipc_msg_assemble(list)) |
| 402 | return dsz; |
Jon Paul Maloy | 067608e | 2014-06-25 20:41:34 -0500 | [diff] [blame] | 403 | return -ENOMEM; |
Jon Maloy | 4c94cc2 | 2017-11-30 16:47:25 +0100 | [diff] [blame] | 404 | } |
Ying Xue | c93d3ba | 2015-01-09 15:27:04 +0800 | [diff] [blame] | 405 | skb_orphan(skb); |
Ying Xue | a6ca109 | 2014-11-26 11:41:55 +0800 | [diff] [blame] | 406 | __skb_queue_tail(list, skb); |
| 407 | skb_copy_to_linear_data(skb, mhdr, mhsz); |
| 408 | pktpos = skb->data + mhsz; |
Al Viro | cbbd26b | 2016-11-01 22:09:04 -0400 | [diff] [blame] | 409 | if (copy_from_iter_full(pktpos, dsz, &m->msg_iter)) |
Jon Paul Maloy | 067608e | 2014-06-25 20:41:34 -0500 | [diff] [blame] | 410 | return dsz; |
| 411 | rc = -EFAULT; |
| 412 | goto error; |
| 413 | } |
| 414 | |
| 415 | /* Prepare reusable fragment header */ |
Jon Paul Maloy | c589863 | 2015-02-05 08:36:36 -0500 | [diff] [blame] | 416 | tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER, |
| 417 | FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr)); |
Jon Paul Maloy | 067608e | 2014-06-25 20:41:34 -0500 | [diff] [blame] | 418 | msg_set_size(&pkthdr, pktmax); |
| 419 | msg_set_fragm_no(&pkthdr, pktno); |
Jon Paul Maloy | e3eea1e | 2015-03-13 16:08:11 -0400 | [diff] [blame] | 420 | msg_set_importance(&pkthdr, msg_importance(mhdr)); |
Jon Paul Maloy | 067608e | 2014-06-25 20:41:34 -0500 | [diff] [blame] | 421 | |
| 422 | /* Prepare first fragment */ |
Parthasarathy Bhuvaragan | 57d5f64 | 2017-01-13 15:46:25 +0100 | [diff] [blame] | 423 | skb = tipc_buf_acquire(pktmax, GFP_KERNEL); |
Ying Xue | a6ca109 | 2014-11-26 11:41:55 +0800 | [diff] [blame] | 424 | if (!skb) |
Jon Paul Maloy | 067608e | 2014-06-25 20:41:34 -0500 | [diff] [blame] | 425 | return -ENOMEM; |
Ying Xue | c93d3ba | 2015-01-09 15:27:04 +0800 | [diff] [blame] | 426 | skb_orphan(skb); |
Ying Xue | a6ca109 | 2014-11-26 11:41:55 +0800 | [diff] [blame] | 427 | __skb_queue_tail(list, skb); |
| 428 | pktpos = skb->data; |
| 429 | skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE); |
Jon Paul Maloy | 067608e | 2014-06-25 20:41:34 -0500 | [diff] [blame] | 430 | pktpos += INT_H_SIZE; |
| 431 | pktrem -= INT_H_SIZE; |
Ying Xue | a6ca109 | 2014-11-26 11:41:55 +0800 | [diff] [blame] | 432 | skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz); |
Jon Paul Maloy | 067608e | 2014-06-25 20:41:34 -0500 | [diff] [blame] | 433 | pktpos += mhsz; |
| 434 | pktrem -= mhsz; |
| 435 | |
| 436 | do { |
| 437 | if (drem < pktrem) |
| 438 | pktrem = drem; |
| 439 | |
Al Viro | cbbd26b | 2016-11-01 22:09:04 -0400 | [diff] [blame] | 440 | if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) { |
Jon Paul Maloy | 067608e | 2014-06-25 20:41:34 -0500 | [diff] [blame] | 441 | rc = -EFAULT; |
| 442 | goto error; |
| 443 | } |
| 444 | drem -= pktrem; |
Jon Paul Maloy | 067608e | 2014-06-25 20:41:34 -0500 | [diff] [blame] | 445 | |
| 446 | if (!drem) |
| 447 | break; |
| 448 | |
| 449 | /* Prepare new fragment: */ |
| 450 | if (drem < (pktmax - INT_H_SIZE)) |
| 451 | pktsz = drem + INT_H_SIZE; |
| 452 | else |
| 453 | pktsz = pktmax; |
Parthasarathy Bhuvaragan | 57d5f64 | 2017-01-13 15:46:25 +0100 | [diff] [blame] | 454 | skb = tipc_buf_acquire(pktsz, GFP_KERNEL); |
Ying Xue | a6ca109 | 2014-11-26 11:41:55 +0800 | [diff] [blame] | 455 | if (!skb) { |
Jon Paul Maloy | 067608e | 2014-06-25 20:41:34 -0500 | [diff] [blame] | 456 | rc = -ENOMEM; |
| 457 | goto error; |
| 458 | } |
Ying Xue | c93d3ba | 2015-01-09 15:27:04 +0800 | [diff] [blame] | 459 | skb_orphan(skb); |
Ying Xue | a6ca109 | 2014-11-26 11:41:55 +0800 | [diff] [blame] | 460 | __skb_queue_tail(list, skb); |
Jon Paul Maloy | 067608e | 2014-06-25 20:41:34 -0500 | [diff] [blame] | 461 | msg_set_type(&pkthdr, FRAGMENT); |
| 462 | msg_set_size(&pkthdr, pktsz); |
| 463 | msg_set_fragm_no(&pkthdr, ++pktno); |
Ying Xue | a6ca109 | 2014-11-26 11:41:55 +0800 | [diff] [blame] | 464 | skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE); |
| 465 | pktpos = skb->data + INT_H_SIZE; |
Jon Paul Maloy | 067608e | 2014-06-25 20:41:34 -0500 | [diff] [blame] | 466 | pktrem = pktsz - INT_H_SIZE; |
| 467 | |
| 468 | } while (1); |
Ying Xue | a6ca109 | 2014-11-26 11:41:55 +0800 | [diff] [blame] | 469 | msg_set_type(buf_msg(skb), LAST_FRAGMENT); |
Jon Paul Maloy | 067608e | 2014-06-25 20:41:34 -0500 | [diff] [blame] | 470 | return dsz; |
| 471 | error: |
Ying Xue | a6ca109 | 2014-11-26 11:41:55 +0800 | [diff] [blame] | 472 | __skb_queue_purge(list); |
| 473 | __skb_queue_head_init(list); |
Jon Paul Maloy | 067608e | 2014-06-25 20:41:34 -0500 | [diff] [blame] | 474 | return rc; |
| 475 | } |
| 476 | |
Jon Paul Maloy | 4f1688b | 2014-06-25 20:41:32 -0500 | [diff] [blame] | 477 | /** |
Tuong Lien | 06e7c70 | 2019-11-01 09:58:57 +0700 | [diff] [blame] | 478 | * tipc_msg_bundle - Append contents of a buffer to tail of an existing one |
| 479 | * @bskb: the bundle buffer to append to |
| 480 | * @msg: message to be appended |
| 481 | * @max: max allowable size for the bundle buffer |
| 482 | * |
Randy Dunlap | 637b77f | 2020-11-29 10:32:48 -0800 | [diff] [blame] | 483 | * Return: "true" if bundling has been performed, otherwise "false" |
Jon Paul Maloy | 4f1688b | 2014-06-25 20:41:32 -0500 | [diff] [blame] | 484 | */ |
Tuong Lien | 06e7c70 | 2019-11-01 09:58:57 +0700 | [diff] [blame] | 485 | static bool tipc_msg_bundle(struct sk_buff *bskb, struct tipc_msg *msg, |
| 486 | u32 max) |
Jon Paul Maloy | 4f1688b | 2014-06-25 20:41:32 -0500 | [diff] [blame] | 487 | { |
Tuong Lien | 06e7c70 | 2019-11-01 09:58:57 +0700 | [diff] [blame] | 488 | struct tipc_msg *bmsg = buf_msg(bskb); |
| 489 | u32 msz, bsz, offset, pad; |
Jon Paul Maloy | 4f1688b | 2014-06-25 20:41:32 -0500 | [diff] [blame] | 490 | |
Tuong Lien | 06e7c70 | 2019-11-01 09:58:57 +0700 | [diff] [blame] | 491 | msz = msg_size(msg); |
Jon Paul Maloy | 05dcc5a | 2015-03-13 16:08:10 -0400 | [diff] [blame] | 492 | bsz = msg_size(bmsg); |
Tuong Lien | 06e7c70 | 2019-11-01 09:58:57 +0700 | [diff] [blame] | 493 | offset = align(bsz); |
| 494 | pad = offset - bsz; |
Jon Paul Maloy | 05dcc5a | 2015-03-13 16:08:10 -0400 | [diff] [blame] | 495 | |
Tuong Lien | 06e7c70 | 2019-11-01 09:58:57 +0700 | [diff] [blame] | 496 | if (unlikely(skb_tailroom(bskb) < (pad + msz))) |
Jon Paul Maloy | 4f1688b | 2014-06-25 20:41:32 -0500 | [diff] [blame] | 497 | return false; |
Tuong Lien | 06e7c70 | 2019-11-01 09:58:57 +0700 | [diff] [blame] | 498 | if (unlikely(max < (offset + msz))) |
Jon Paul Maloy | f21e897 | 2015-05-14 10:46:17 -0400 | [diff] [blame] | 499 | return false; |
Jon Paul Maloy | 4f1688b | 2014-06-25 20:41:32 -0500 | [diff] [blame] | 500 | |
Tuong Lien | 06e7c70 | 2019-11-01 09:58:57 +0700 | [diff] [blame] | 501 | skb_put(bskb, pad + msz); |
| 502 | skb_copy_to_linear_data_offset(bskb, offset, msg, msz); |
| 503 | msg_set_size(bmsg, offset + msz); |
Jon Paul Maloy | 4f1688b | 2014-06-25 20:41:32 -0500 | [diff] [blame] | 504 | msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1); |
Jon Paul Maloy | 4f1688b | 2014-06-25 20:41:32 -0500 | [diff] [blame] | 505 | return true; |
| 506 | } |
| 507 | |
| 508 | /** |
Tuong Lien | 06e7c70 | 2019-11-01 09:58:57 +0700 | [diff] [blame] | 509 | * tipc_msg_try_bundle - Try to bundle a new message to the last one |
| 510 | * @tskb: the last/target message to which the new one will be appended |
| 511 | * @skb: the new message skb pointer |
| 512 | * @mss: max message size (header inclusive) |
| 513 | * @dnode: destination node for the message |
| 514 | * @new_bundle: if this call made a new bundle or not |
| 515 | * |
| 516 | * Return: "true" if the new message skb is potential for bundling this time or |
| 517 | * later, in the case a bundling has been done this time, the skb is consumed |
| 518 | * (the skb pointer = NULL). |
| 519 | * Otherwise, "false" if the skb cannot be bundled at all. |
| 520 | */ |
| 521 | bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss, |
| 522 | u32 dnode, bool *new_bundle) |
| 523 | { |
| 524 | struct tipc_msg *msg, *inner, *outer; |
| 525 | u32 tsz; |
| 526 | |
| 527 | /* First, check if the new buffer is suitable for bundling */ |
| 528 | msg = buf_msg(*skb); |
| 529 | if (msg_user(msg) == MSG_FRAGMENTER) |
| 530 | return false; |
| 531 | if (msg_user(msg) == TUNNEL_PROTOCOL) |
| 532 | return false; |
| 533 | if (msg_user(msg) == BCAST_PROTOCOL) |
| 534 | return false; |
| 535 | if (mss <= INT_H_SIZE + msg_size(msg)) |
| 536 | return false; |
| 537 | |
| 538 | /* Ok, but the last/target buffer can be empty? */ |
| 539 | if (unlikely(!tskb)) |
| 540 | return true; |
| 541 | |
| 542 | /* Is it a bundle already? Try to bundle the new message to it */ |
| 543 | if (msg_user(buf_msg(tskb)) == MSG_BUNDLER) { |
| 544 | *new_bundle = false; |
| 545 | goto bundle; |
| 546 | } |
| 547 | |
| 548 | /* Make a new bundle of the two messages if possible */ |
| 549 | tsz = msg_size(buf_msg(tskb)); |
| 550 | if (unlikely(mss < align(INT_H_SIZE + tsz) + msg_size(msg))) |
| 551 | return true; |
| 552 | if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE, |
| 553 | GFP_ATOMIC))) |
| 554 | return true; |
| 555 | inner = buf_msg(tskb); |
| 556 | skb_push(tskb, INT_H_SIZE); |
| 557 | outer = buf_msg(tskb); |
| 558 | tipc_msg_init(msg_prevnode(inner), outer, MSG_BUNDLER, 0, INT_H_SIZE, |
| 559 | dnode); |
| 560 | msg_set_importance(outer, msg_importance(inner)); |
| 561 | msg_set_size(outer, INT_H_SIZE + tsz); |
| 562 | msg_set_msgcnt(outer, 1); |
| 563 | *new_bundle = true; |
| 564 | |
| 565 | bundle: |
| 566 | if (likely(tipc_msg_bundle(tskb, msg, mss))) { |
| 567 | consume_skb(*skb); |
| 568 | *skb = NULL; |
| 569 | } |
| 570 | return true; |
| 571 | } |
| 572 | |
| 573 | /** |
Jon Paul Maloy | c637c10 | 2015-02-05 08:36:41 -0500 | [diff] [blame] | 574 | * tipc_msg_extract(): extract bundled inner packet from buffer |
Jon Paul Maloy | c1336ee | 2015-03-13 16:08:08 -0400 | [diff] [blame] | 575 | * @skb: buffer to be extracted from. |
Jon Paul Maloy | c637c10 | 2015-02-05 08:36:41 -0500 | [diff] [blame] | 576 | * @iskb: extracted inner buffer, to be returned |
Jon Paul Maloy | c1336ee | 2015-03-13 16:08:08 -0400 | [diff] [blame] | 577 | * @pos: position in outer message of msg to be extracted. |
Randy Dunlap | 5fcb7d4 | 2020-11-29 10:32:50 -0800 | [diff] [blame] | 578 | * Returns position of next msg. |
Jon Paul Maloy | c637c10 | 2015-02-05 08:36:41 -0500 | [diff] [blame] | 579 | * Consumes outer buffer when last packet extracted |
Randy Dunlap | 637b77f | 2020-11-29 10:32:48 -0800 | [diff] [blame] | 580 | * Return: true when there is an extracted buffer, otherwise false |
Jon Paul Maloy | c637c10 | 2015-02-05 08:36:41 -0500 | [diff] [blame] | 581 | */ |
| 582 | bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos) |
| 583 | { |
Tung Nguyen | ef9be75 | 2018-06-28 22:25:04 +0200 | [diff] [blame] | 584 | struct tipc_msg *hdr, *ihdr; |
| 585 | int imsz; |
Jon Paul Maloy | c637c10 | 2015-02-05 08:36:41 -0500 | [diff] [blame] | 586 | |
Jon Paul Maloy | c1336ee | 2015-03-13 16:08:08 -0400 | [diff] [blame] | 587 | *iskb = NULL; |
Jon Paul Maloy | 1149557 | 2015-03-13 16:08:07 -0400 | [diff] [blame] | 588 | if (unlikely(skb_linearize(skb))) |
Jon Paul Maloy | c637c10 | 2015-02-05 08:36:41 -0500 | [diff] [blame] | 589 | goto none; |
Jon Paul Maloy | c637c10 | 2015-02-05 08:36:41 -0500 | [diff] [blame] | 590 | |
Tung Nguyen | ef9be75 | 2018-06-28 22:25:04 +0200 | [diff] [blame] | 591 | hdr = buf_msg(skb); |
| 592 | if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE))) |
Jon Paul Maloy | c637c10 | 2015-02-05 08:36:41 -0500 | [diff] [blame] | 593 | goto none; |
Jon Paul Maloy | c1336ee | 2015-03-13 16:08:08 -0400 | [diff] [blame] | 594 | |
Tung Nguyen | ef9be75 | 2018-06-28 22:25:04 +0200 | [diff] [blame] | 595 | ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos); |
| 596 | imsz = msg_size(ihdr); |
| 597 | |
| 598 | if ((*pos + imsz) > msg_data_sz(hdr)) |
Jon Paul Maloy | c637c10 | 2015-02-05 08:36:41 -0500 | [diff] [blame] | 599 | goto none; |
Tung Nguyen | ef9be75 | 2018-06-28 22:25:04 +0200 | [diff] [blame] | 600 | |
| 601 | *iskb = tipc_buf_acquire(imsz, GFP_ATOMIC); |
| 602 | if (!*iskb) |
| 603 | goto none; |
| 604 | |
| 605 | skb_copy_to_linear_data(*iskb, ihdr, imsz); |
Jon Maloy | d618d09 | 2017-11-15 21:23:56 +0100 | [diff] [blame] | 606 | if (unlikely(!tipc_msg_validate(iskb))) |
Jon Paul Maloy | c1336ee | 2015-03-13 16:08:08 -0400 | [diff] [blame] | 607 | goto none; |
Tung Nguyen | ef9be75 | 2018-06-28 22:25:04 +0200 | [diff] [blame] | 608 | |
Jon Paul Maloy | c637c10 | 2015-02-05 08:36:41 -0500 | [diff] [blame] | 609 | *pos += align(imsz); |
| 610 | return true; |
| 611 | none: |
| 612 | kfree_skb(skb); |
Jon Paul Maloy | c1336ee | 2015-03-13 16:08:08 -0400 | [diff] [blame] | 613 | kfree_skb(*iskb); |
Jon Paul Maloy | c637c10 | 2015-02-05 08:36:41 -0500 | [diff] [blame] | 614 | *iskb = NULL; |
| 615 | return false; |
| 616 | } |
| 617 | |
| 618 | /** |
Jon Paul Maloy | 8db1bae | 2014-06-25 20:41:35 -0500 | [diff] [blame] | 619 | * tipc_msg_reverse(): swap source and destination addresses and add error code |
Jon Paul Maloy | 29042e1 | 2015-07-22 10:11:18 -0400 | [diff] [blame] | 620 | * @own_node: originating node id for reversed message |
Jon Maloy | 5cbdbd1 | 2018-09-28 20:23:18 +0200 | [diff] [blame] | 621 | * @skb: buffer containing message to be reversed; will be consumed |
Jon Paul Maloy | 29042e1 | 2015-07-22 10:11:18 -0400 | [diff] [blame] | 622 | * @err: error code to be set in message, if any |
Jon Maloy | 5cbdbd1 | 2018-09-28 20:23:18 +0200 | [diff] [blame] | 623 | * Replaces consumed buffer with new one when successful |
Randy Dunlap | 637b77f | 2020-11-29 10:32:48 -0800 | [diff] [blame] | 624 | * Return: true if success, otherwise false |
Jon Paul Maloy | 8db1bae | 2014-06-25 20:41:35 -0500 | [diff] [blame] | 625 | */ |
Jon Paul Maloy | bcd3ffd | 2015-07-22 10:11:19 -0400 | [diff] [blame] | 626 | bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err) |
Jon Paul Maloy | 8db1bae | 2014-06-25 20:41:35 -0500 | [diff] [blame] | 627 | { |
Jon Paul Maloy | 29042e1 | 2015-07-22 10:11:18 -0400 | [diff] [blame] | 628 | struct sk_buff *_skb = *skb; |
Jon Maloy | 5cbdbd1 | 2018-09-28 20:23:18 +0200 | [diff] [blame] | 629 | struct tipc_msg *_hdr, *hdr; |
| 630 | int hlen, dlen; |
Jon Paul Maloy | 8db1bae | 2014-06-25 20:41:35 -0500 | [diff] [blame] | 631 | |
Jon Paul Maloy | 29042e1 | 2015-07-22 10:11:18 -0400 | [diff] [blame] | 632 | if (skb_linearize(_skb)) |
Jon Paul Maloy | 8db1bae | 2014-06-25 20:41:35 -0500 | [diff] [blame] | 633 | goto exit; |
Jon Maloy | 5cbdbd1 | 2018-09-28 20:23:18 +0200 | [diff] [blame] | 634 | _hdr = buf_msg(_skb); |
| 635 | dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE); |
| 636 | hlen = msg_hdr_sz(_hdr); |
| 637 | |
| 638 | if (msg_dest_droppable(_hdr)) |
Jon Paul Maloy | ac0074e | 2014-06-25 20:41:41 -0500 | [diff] [blame] | 639 | goto exit; |
Jon Maloy | 5cbdbd1 | 2018-09-28 20:23:18 +0200 | [diff] [blame] | 640 | if (msg_errcode(_hdr)) |
Jon Paul Maloy | 8db1bae | 2014-06-25 20:41:35 -0500 | [diff] [blame] | 641 | goto exit; |
Jon Paul Maloy | 29042e1 | 2015-07-22 10:11:18 -0400 | [diff] [blame] | 642 | |
Jon Maloy | 5cbdbd1 | 2018-09-28 20:23:18 +0200 | [diff] [blame] | 643 | /* Never return SHORT header */ |
| 644 | if (hlen == SHORT_H_SIZE) |
| 645 | hlen = BASIC_H_SIZE; |
Jon Paul Maloy | 29042e1 | 2015-07-22 10:11:18 -0400 | [diff] [blame] | 646 | |
Tung Nguyen | 6787927 | 2018-09-28 20:23:22 +0200 | [diff] [blame] | 647 | /* Don't return data along with SYN+, - sender has a clone */ |
| 648 | if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD) |
| 649 | dlen = 0; |
| 650 | |
Jon Maloy | 5cbdbd1 | 2018-09-28 20:23:18 +0200 | [diff] [blame] | 651 | /* Allocate new buffer to return */ |
| 652 | *skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC); |
| 653 | if (!*skb) |
| 654 | goto exit; |
| 655 | memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr)); |
| 656 | memcpy((*skb)->data + hlen, msg_data(_hdr), dlen); |
Jon Paul Maloy | 29042e1 | 2015-07-22 10:11:18 -0400 | [diff] [blame] | 657 | |
Jon Maloy | 5cbdbd1 | 2018-09-28 20:23:18 +0200 | [diff] [blame] | 658 | /* Build reverse header in new buffer */ |
| 659 | hdr = buf_msg(*skb); |
| 660 | msg_set_hdr_sz(hdr, hlen); |
Jon Paul Maloy | 29042e1 | 2015-07-22 10:11:18 -0400 | [diff] [blame] | 661 | msg_set_errcode(hdr, err); |
Jon Paul Maloy | 59a361b | 2017-08-14 18:28:49 +0200 | [diff] [blame] | 662 | msg_set_non_seq(hdr, 0); |
Jon Maloy | 5cbdbd1 | 2018-09-28 20:23:18 +0200 | [diff] [blame] | 663 | msg_set_origport(hdr, msg_destport(_hdr)); |
| 664 | msg_set_destport(hdr, msg_origport(_hdr)); |
| 665 | msg_set_destnode(hdr, msg_prevnode(_hdr)); |
Jon Paul Maloy | 29042e1 | 2015-07-22 10:11:18 -0400 | [diff] [blame] | 666 | msg_set_prevnode(hdr, own_node); |
| 667 | msg_set_orignode(hdr, own_node); |
Jon Maloy | 5cbdbd1 | 2018-09-28 20:23:18 +0200 | [diff] [blame] | 668 | msg_set_size(hdr, hlen + dlen); |
Jon Paul Maloy | 29042e1 | 2015-07-22 10:11:18 -0400 | [diff] [blame] | 669 | skb_orphan(_skb); |
Jon Maloy | 5cbdbd1 | 2018-09-28 20:23:18 +0200 | [diff] [blame] | 670 | kfree_skb(_skb); |
Jon Paul Maloy | 8db1bae | 2014-06-25 20:41:35 -0500 | [diff] [blame] | 671 | return true; |
| 672 | exit: |
Jon Paul Maloy | 29042e1 | 2015-07-22 10:11:18 -0400 | [diff] [blame] | 673 | kfree_skb(_skb); |
| 674 | *skb = NULL; |
Jon Paul Maloy | 8db1bae | 2014-06-25 20:41:35 -0500 | [diff] [blame] | 675 | return false; |
| 676 | } |
Jon Paul Maloy | 5a37907 | 2014-06-25 20:41:36 -0500 | [diff] [blame] | 677 | |
Tung Nguyen | 6787927 | 2018-09-28 20:23:22 +0200 | [diff] [blame] | 678 | bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy) |
| 679 | { |
| 680 | struct sk_buff *skb, *_skb; |
| 681 | |
| 682 | skb_queue_walk(msg, skb) { |
| 683 | _skb = skb_clone(skb, GFP_ATOMIC); |
| 684 | if (!_skb) { |
| 685 | __skb_queue_purge(cpy); |
| 686 | pr_err_ratelimited("Failed to clone buffer chain\n"); |
| 687 | return false; |
| 688 | } |
| 689 | __skb_queue_tail(cpy, _skb); |
| 690 | } |
| 691 | return true; |
| 692 | } |
| 693 | |
Jon Paul Maloy | 5a37907 | 2014-06-25 20:41:36 -0500 | [diff] [blame] | 694 | /** |
Jon Paul Maloy | e3a7756 | 2015-02-05 08:36:39 -0500 | [diff] [blame] | 695 | * tipc_msg_lookup_dest(): try to find new destination for named message |
Randy Dunlap | 5fcb7d4 | 2020-11-29 10:32:50 -0800 | [diff] [blame] | 696 | * @net: pointer to associated network namespace |
Jon Paul Maloy | e3a7756 | 2015-02-05 08:36:39 -0500 | [diff] [blame] | 697 | * @skb: the buffer containing the message. |
Jon Paul Maloy | cda3696 | 2015-07-22 10:11:20 -0400 | [diff] [blame] | 698 | * @err: error code to be used by caller if lookup fails |
Jon Paul Maloy | 5a37907 | 2014-06-25 20:41:36 -0500 | [diff] [blame] | 699 | * Does not consume buffer |
Randy Dunlap | 637b77f | 2020-11-29 10:32:48 -0800 | [diff] [blame] | 700 | * Return: true if a destination is found, false otherwise |
Jon Paul Maloy | 5a37907 | 2014-06-25 20:41:36 -0500 | [diff] [blame] | 701 | */ |
Jon Paul Maloy | cda3696 | 2015-07-22 10:11:20 -0400 | [diff] [blame] | 702 | bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err) |
Jon Paul Maloy | 5a37907 | 2014-06-25 20:41:36 -0500 | [diff] [blame] | 703 | { |
Jon Paul Maloy | e3a7756 | 2015-02-05 08:36:39 -0500 | [diff] [blame] | 704 | struct tipc_msg *msg = buf_msg(skb); |
Jon Maloy | 908148b | 2021-03-16 22:06:15 -0400 | [diff] [blame] | 705 | u32 scope = msg_lookup_scope(msg); |
| 706 | u32 self = tipc_own_addr(net); |
| 707 | u32 inst = msg_nameinst(msg); |
| 708 | struct tipc_socket_addr sk; |
| 709 | struct tipc_uaddr ua; |
Jon Paul Maloy | 5a37907 | 2014-06-25 20:41:36 -0500 | [diff] [blame] | 710 | |
Jon Paul Maloy | e3a7756 | 2015-02-05 08:36:39 -0500 | [diff] [blame] | 711 | if (!msg_isdata(msg)) |
| 712 | return false; |
| 713 | if (!msg_named(msg)) |
| 714 | return false; |
Jon Paul Maloy | d482994 | 2015-03-27 10:19:19 -0400 | [diff] [blame] | 715 | if (msg_errcode(msg)) |
| 716 | return false; |
Parthasarathy Bhuvaragan | aad0621 | 2017-09-29 10:02:54 +0200 | [diff] [blame] | 717 | *err = TIPC_ERR_NO_NAME; |
Jon Paul Maloy | e3a7756 | 2015-02-05 08:36:39 -0500 | [diff] [blame] | 718 | if (skb_linearize(skb)) |
| 719 | return false; |
Erik Hugne | 4e3ae00 | 2015-09-18 10:46:31 +0200 | [diff] [blame] | 720 | msg = buf_msg(skb); |
Jon Paul Maloy | d482994 | 2015-03-27 10:19:19 -0400 | [diff] [blame] | 721 | if (msg_reroute_cnt(msg)) |
Jon Paul Maloy | e3a7756 | 2015-02-05 08:36:39 -0500 | [diff] [blame] | 722 | return false; |
Jon Maloy | 908148b | 2021-03-16 22:06:15 -0400 | [diff] [blame] | 723 | tipc_uaddr(&ua, TIPC_SERVICE_RANGE, scope, |
| 724 | msg_nametype(msg), inst, inst); |
| 725 | sk.node = tipc_scope2node(net, scope); |
| 726 | if (!tipc_nametbl_lookup_anycast(net, &ua, &sk)) |
Jon Paul Maloy | e3a7756 | 2015-02-05 08:36:39 -0500 | [diff] [blame] | 727 | return false; |
Jon Paul Maloy | 5a37907 | 2014-06-25 20:41:36 -0500 | [diff] [blame] | 728 | msg_incr_reroute_cnt(msg); |
Jon Maloy | 908148b | 2021-03-16 22:06:15 -0400 | [diff] [blame] | 729 | if (sk.node != self) |
| 730 | msg_set_prevnode(msg, self); |
| 731 | msg_set_destnode(msg, sk.node); |
| 732 | msg_set_destport(msg, sk.ref); |
Jon Paul Maloy | e3a7756 | 2015-02-05 08:36:39 -0500 | [diff] [blame] | 733 | *err = TIPC_OK; |
Jon Maloy | a9e2971 | 2017-10-07 15:07:20 +0200 | [diff] [blame] | 734 | |
Jon Paul Maloy | e3a7756 | 2015-02-05 08:36:39 -0500 | [diff] [blame] | 735 | return true; |
Jon Paul Maloy | 5a37907 | 2014-06-25 20:41:36 -0500 | [diff] [blame] | 736 | } |
Jon Paul Maloy | 078bec8 | 2014-07-16 20:41:00 -0400 | [diff] [blame] | 737 | |
Jon Maloy | 4c94cc2 | 2017-11-30 16:47:25 +0100 | [diff] [blame] | 738 | /* tipc_msg_assemble() - assemble chain of fragments into one message |
| 739 | */ |
| 740 | bool tipc_msg_assemble(struct sk_buff_head *list) |
| 741 | { |
| 742 | struct sk_buff *skb, *tmp = NULL; |
| 743 | |
| 744 | if (skb_queue_len(list) == 1) |
| 745 | return true; |
| 746 | |
| 747 | while ((skb = __skb_dequeue(list))) { |
| 748 | skb->next = NULL; |
| 749 | if (tipc_buf_append(&tmp, &skb)) { |
| 750 | __skb_queue_tail(list, skb); |
| 751 | return true; |
| 752 | } |
| 753 | if (!tmp) |
| 754 | break; |
| 755 | } |
| 756 | __skb_queue_purge(list); |
| 757 | __skb_queue_head_init(list); |
| 758 | pr_warn("Failed do assemble buffer\n"); |
| 759 | return false; |
| 760 | } |
| 761 | |
Jon Paul Maloy | 078bec8 | 2014-07-16 20:41:00 -0400 | [diff] [blame] | 762 | /* tipc_msg_reassemble() - clone a buffer chain of fragments and |
| 763 | * reassemble the clones into one message |
| 764 | */ |
Jon Paul Maloy | 2f56612 | 2015-10-22 08:51:39 -0400 | [diff] [blame] | 765 | bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq) |
Jon Paul Maloy | 078bec8 | 2014-07-16 20:41:00 -0400 | [diff] [blame] | 766 | { |
Jon Paul Maloy | 2f56612 | 2015-10-22 08:51:39 -0400 | [diff] [blame] | 767 | struct sk_buff *skb, *_skb; |
Ying Xue | a6ca109 | 2014-11-26 11:41:55 +0800 | [diff] [blame] | 768 | struct sk_buff *frag = NULL; |
Jon Paul Maloy | 078bec8 | 2014-07-16 20:41:00 -0400 | [diff] [blame] | 769 | struct sk_buff *head = NULL; |
Jon Paul Maloy | 2f56612 | 2015-10-22 08:51:39 -0400 | [diff] [blame] | 770 | int hdr_len; |
Jon Paul Maloy | 078bec8 | 2014-07-16 20:41:00 -0400 | [diff] [blame] | 771 | |
| 772 | /* Copy header if single buffer */ |
Ying Xue | a6ca109 | 2014-11-26 11:41:55 +0800 | [diff] [blame] | 773 | if (skb_queue_len(list) == 1) { |
| 774 | skb = skb_peek(list); |
Jon Paul Maloy | 2f56612 | 2015-10-22 08:51:39 -0400 | [diff] [blame] | 775 | hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb)); |
| 776 | _skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC); |
| 777 | if (!_skb) |
| 778 | return false; |
| 779 | __skb_queue_tail(rcvq, _skb); |
| 780 | return true; |
Jon Paul Maloy | 078bec8 | 2014-07-16 20:41:00 -0400 | [diff] [blame] | 781 | } |
| 782 | |
| 783 | /* Clone all fragments and reassemble */ |
Ying Xue | a6ca109 | 2014-11-26 11:41:55 +0800 | [diff] [blame] | 784 | skb_queue_walk(list, skb) { |
| 785 | frag = skb_clone(skb, GFP_ATOMIC); |
Jon Paul Maloy | 078bec8 | 2014-07-16 20:41:00 -0400 | [diff] [blame] | 786 | if (!frag) |
| 787 | goto error; |
| 788 | frag->next = NULL; |
| 789 | if (tipc_buf_append(&head, &frag)) |
| 790 | break; |
| 791 | if (!head) |
| 792 | goto error; |
Jon Paul Maloy | 078bec8 | 2014-07-16 20:41:00 -0400 | [diff] [blame] | 793 | } |
Jon Paul Maloy | 2f56612 | 2015-10-22 08:51:39 -0400 | [diff] [blame] | 794 | __skb_queue_tail(rcvq, frag); |
| 795 | return true; |
Jon Paul Maloy | 078bec8 | 2014-07-16 20:41:00 -0400 | [diff] [blame] | 796 | error: |
| 797 | pr_warn("Failed do clone local mcast rcv buffer\n"); |
| 798 | kfree_skb(head); |
Jon Paul Maloy | 2f56612 | 2015-10-22 08:51:39 -0400 | [diff] [blame] | 799 | return false; |
Jon Paul Maloy | 078bec8 | 2014-07-16 20:41:00 -0400 | [diff] [blame] | 800 | } |
Jon Paul Maloy | 8306f99 | 2015-10-15 14:52:43 -0400 | [diff] [blame] | 801 | |
Jon Paul Maloy | a853e4c | 2017-01-18 13:50:52 -0500 | [diff] [blame] | 802 | bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg, |
| 803 | struct sk_buff_head *cpy) |
| 804 | { |
| 805 | struct sk_buff *skb, *_skb; |
| 806 | |
| 807 | skb_queue_walk(msg, skb) { |
| 808 | _skb = pskb_copy(skb, GFP_ATOMIC); |
| 809 | if (!_skb) { |
| 810 | __skb_queue_purge(cpy); |
| 811 | return false; |
| 812 | } |
| 813 | msg_set_destnode(buf_msg(_skb), dst); |
| 814 | __skb_queue_tail(cpy, _skb); |
| 815 | } |
| 816 | return true; |
| 817 | } |
| 818 | |
Jon Paul Maloy | 8306f99 | 2015-10-15 14:52:43 -0400 | [diff] [blame] | 819 | /* tipc_skb_queue_sorted(); sort pkt into list according to sequence number |
| 820 | * @list: list to be appended to |
| 821 | * @seqno: sequence number of buffer to add |
| 822 | * @skb: buffer to add |
| 823 | */ |
Tuong Lien | 03b6fef | 2020-05-26 16:38:37 +0700 | [diff] [blame] | 824 | bool __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno, |
Jon Paul Maloy | 8306f99 | 2015-10-15 14:52:43 -0400 | [diff] [blame] | 825 | struct sk_buff *skb) |
| 826 | { |
| 827 | struct sk_buff *_skb, *tmp; |
| 828 | |
| 829 | if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) { |
| 830 | __skb_queue_head(list, skb); |
Tuong Lien | 03b6fef | 2020-05-26 16:38:37 +0700 | [diff] [blame] | 831 | return true; |
Jon Paul Maloy | 8306f99 | 2015-10-15 14:52:43 -0400 | [diff] [blame] | 832 | } |
| 833 | |
| 834 | if (more(seqno, buf_seqno(skb_peek_tail(list)))) { |
| 835 | __skb_queue_tail(list, skb); |
Tuong Lien | 03b6fef | 2020-05-26 16:38:37 +0700 | [diff] [blame] | 836 | return true; |
Jon Paul Maloy | 8306f99 | 2015-10-15 14:52:43 -0400 | [diff] [blame] | 837 | } |
| 838 | |
| 839 | skb_queue_walk_safe(list, _skb, tmp) { |
| 840 | if (more(seqno, buf_seqno(_skb))) |
| 841 | continue; |
| 842 | if (seqno == buf_seqno(_skb)) |
| 843 | break; |
| 844 | __skb_queue_before(list, _skb, skb); |
Tuong Lien | 03b6fef | 2020-05-26 16:38:37 +0700 | [diff] [blame] | 845 | return true; |
Jon Paul Maloy | 8306f99 | 2015-10-15 14:52:43 -0400 | [diff] [blame] | 846 | } |
| 847 | kfree_skb(skb); |
Tuong Lien | 03b6fef | 2020-05-26 16:38:37 +0700 | [diff] [blame] | 848 | return false; |
Jon Paul Maloy | 8306f99 | 2015-10-15 14:52:43 -0400 | [diff] [blame] | 849 | } |
Jon Maloy | 64ac5f5 | 2017-10-13 11:04:20 +0200 | [diff] [blame] | 850 | |
| 851 | void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb, |
| 852 | struct sk_buff_head *xmitq) |
| 853 | { |
| 854 | if (tipc_msg_reverse(tipc_own_addr(net), &skb, err)) |
| 855 | __skb_queue_tail(xmitq, skb); |
| 856 | } |