blob: d88e2683626e7eb9a3afdb20416f260c9f332311 [file] [log] [blame]
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +04001/*
2 * fs/cifs/smb2misc.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2011
5 * Etersoft, 2012
6 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Pavel Shilovsky (pshilovsky@samba.org) 2012
8 *
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23#include <linux/ctype.h>
24#include "smb2pdu.h"
25#include "cifsglob.h"
26#include "cifsproto.h"
27#include "smb2proto.h"
28#include "cifs_debug.h"
29#include "cifs_unicode.h"
30#include "smb2status.h"
Pavel Shilovsky31473fc2016-10-24 15:33:04 -070031#include "smb2glob.h"
Aurelien Apteld70e9fa2019-09-20 06:31:10 +020032#include "nterr.h"
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +040033
34static int
Pavel Shilovsky31473fc2016-10-24 15:33:04 -070035check_smb2_hdr(struct smb2_sync_hdr *shdr, __u64 mid)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +040036{
Pavel Shilovsky31473fc2016-10-24 15:33:04 -070037 __u64 wire_mid = le64_to_cpu(shdr->MessageId);
Sachin Prabhu9235d092014-12-09 17:37:00 +000038
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +040039 /*
40 * Make sure that this really is an SMB, that it is a response,
41 * and that the message ids match.
42 */
Pavel Shilovsky31473fc2016-10-24 15:33:04 -070043 if ((shdr->ProtocolId == SMB2_PROTO_NUMBER) &&
Sachin Prabhu9235d092014-12-09 17:37:00 +000044 (mid == wire_mid)) {
Pavel Shilovsky31473fc2016-10-24 15:33:04 -070045 if (shdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +040046 return 0;
47 else {
48 /* only one valid case where server sends us request */
Pavel Shilovsky31473fc2016-10-24 15:33:04 -070049 if (shdr->Command == SMB2_OPLOCK_BREAK)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +040050 return 0;
51 else
Joe Perchesf96637b2013-05-04 22:12:25 -050052 cifs_dbg(VFS, "Received Request not response\n");
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +040053 }
54 } else { /* bad signature or mid */
Pavel Shilovsky31473fc2016-10-24 15:33:04 -070055 if (shdr->ProtocolId != SMB2_PROTO_NUMBER)
Joe Perchesf96637b2013-05-04 22:12:25 -050056 cifs_dbg(VFS, "Bad protocol string signature header %x\n",
Pavel Shilovsky31473fc2016-10-24 15:33:04 -070057 le32_to_cpu(shdr->ProtocolId));
Sachin Prabhu9235d092014-12-09 17:37:00 +000058 if (mid != wire_mid)
Joe Perchesf96637b2013-05-04 22:12:25 -050059 cifs_dbg(VFS, "Mids do not match: %llu and %llu\n",
Sachin Prabhu9235d092014-12-09 17:37:00 +000060 mid, wire_mid);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +040061 }
Sachin Prabhu9235d092014-12-09 17:37:00 +000062 cifs_dbg(VFS, "Bad SMB detected. The Mid=%llu\n", wire_mid);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +040063 return 1;
64}
65
66/*
67 * The following table defines the expected "StructureSize" of SMB2 responses
68 * in order by SMB2 command. This is similar to "wct" in SMB/CIFS responses.
69 *
70 * Note that commands are defined in smb2pdu.h in le16 but the array below is
71 * indexed by command in host byte order
72 */
73static const __le16 smb2_rsp_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
Fabian Frederickbc09d142014-12-10 15:41:15 -080074 /* SMB2_NEGOTIATE */ cpu_to_le16(65),
75 /* SMB2_SESSION_SETUP */ cpu_to_le16(9),
76 /* SMB2_LOGOFF */ cpu_to_le16(4),
77 /* SMB2_TREE_CONNECT */ cpu_to_le16(16),
78 /* SMB2_TREE_DISCONNECT */ cpu_to_le16(4),
79 /* SMB2_CREATE */ cpu_to_le16(89),
80 /* SMB2_CLOSE */ cpu_to_le16(60),
81 /* SMB2_FLUSH */ cpu_to_le16(4),
82 /* SMB2_READ */ cpu_to_le16(17),
83 /* SMB2_WRITE */ cpu_to_le16(17),
84 /* SMB2_LOCK */ cpu_to_le16(4),
85 /* SMB2_IOCTL */ cpu_to_le16(49),
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +040086 /* BB CHECK this ... not listed in documentation */
Fabian Frederickbc09d142014-12-10 15:41:15 -080087 /* SMB2_CANCEL */ cpu_to_le16(0),
88 /* SMB2_ECHO */ cpu_to_le16(4),
89 /* SMB2_QUERY_DIRECTORY */ cpu_to_le16(9),
90 /* SMB2_CHANGE_NOTIFY */ cpu_to_le16(9),
91 /* SMB2_QUERY_INFO */ cpu_to_le16(9),
92 /* SMB2_SET_INFO */ cpu_to_le16(2),
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +040093 /* BB FIXME can also be 44 for lease break */
Fabian Frederickbc09d142014-12-10 15:41:15 -080094 /* SMB2_OPLOCK_BREAK */ cpu_to_le16(24)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +040095};
96
Ronnie Sahlberg98170fb2018-05-31 07:43:34 +100097static __u32 get_neg_ctxt_len(struct smb2_sync_hdr *hdr, __u32 len,
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +100098 __u32 non_ctxlen)
Steve French136ff1b2018-04-08 16:14:31 -050099{
100 __u16 neg_count;
101 __u32 nc_offset, size_of_pad_before_neg_ctxts;
102 struct smb2_negotiate_rsp *pneg_rsp = (struct smb2_negotiate_rsp *)hdr;
103
104 /* Negotiate contexts are only valid for latest dialect SMB3.11 */
105 neg_count = le16_to_cpu(pneg_rsp->NegotiateContextCount);
106 if ((neg_count == 0) ||
107 (pneg_rsp->DialectRevision != cpu_to_le16(SMB311_PROT_ID)))
108 return 0;
109
110 /* Make sure that negotiate contexts start after gss security blob */
111 nc_offset = le32_to_cpu(pneg_rsp->NegotiateContextOffset);
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +1000112 if (nc_offset < non_ctxlen) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700113 pr_warn_once("Invalid negotiate context offset\n");
Steve French136ff1b2018-04-08 16:14:31 -0500114 return 0;
115 }
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +1000116 size_of_pad_before_neg_ctxts = nc_offset - non_ctxlen;
Steve French136ff1b2018-04-08 16:14:31 -0500117
118 /* Verify that at least minimal negotiate contexts fit within frame */
119 if (len < nc_offset + (neg_count * sizeof(struct smb2_neg_context))) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700120 pr_warn_once("negotiate context goes beyond end\n");
Steve French136ff1b2018-04-08 16:14:31 -0500121 return 0;
122 }
123
124 cifs_dbg(FYI, "length of negcontexts %d pad %d\n",
125 len - nc_offset, size_of_pad_before_neg_ctxts);
126
127 /* length of negcontexts including pad from end of sec blob to them */
128 return (len - nc_offset) + size_of_pad_before_neg_ctxts;
129}
Steve French136ff1b2018-04-08 16:14:31 -0500130
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400131int
Ronnie Sahlberg98170fb2018-05-31 07:43:34 +1000132smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400133{
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +1000134 struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)buf;
Ronnie Sahlberg98170fb2018-05-31 07:43:34 +1000135 struct smb2_sync_pdu *pdu = (struct smb2_sync_pdu *)shdr;
Steve French373512e2015-12-18 13:05:30 -0600136 __u64 mid;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400137 __u32 clc_len; /* calculated length */
138 int command;
Ronnie Sahlberg98170fb2018-05-31 07:43:34 +1000139 int pdu_size = sizeof(struct smb2_sync_pdu);
140 int hdr_size = sizeof(struct smb2_sync_hdr);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400141
142 /*
143 * Add function to do table lookup of StructureSize by command
144 * ie Validate the wct via smb2_struct_sizes table above
145 */
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700146 if (shdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
Steve French373512e2015-12-18 13:05:30 -0600147 struct smb2_transform_hdr *thdr =
148 (struct smb2_transform_hdr *)buf;
149 struct cifs_ses *ses = NULL;
150 struct list_head *tmp;
151
152 /* decrypt frame now that it is completely read in */
153 spin_lock(&cifs_tcp_ses_lock);
154 list_for_each(tmp, &srvr->smb_ses_list) {
155 ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
156 if (ses->Suid == thdr->SessionId)
157 break;
158
159 ses = NULL;
160 }
161 spin_unlock(&cifs_tcp_ses_lock);
162 if (ses == NULL) {
163 cifs_dbg(VFS, "no decryption - session id not found\n");
164 return 1;
165 }
166 }
167
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700168 mid = le64_to_cpu(shdr->MessageId);
Ronnie Sahlberg98170fb2018-05-31 07:43:34 +1000169 if (len < pdu_size) {
170 if ((len >= hdr_size)
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700171 && (shdr->Status != 0)) {
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400172 pdu->StructureSize2 = 0;
173 /*
174 * As with SMB/CIFS, on some error cases servers may
175 * not return wct properly
176 */
177 return 0;
178 } else {
Joe Perchesf96637b2013-05-04 22:12:25 -0500179 cifs_dbg(VFS, "Length less than SMB header size\n");
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400180 }
181 return 1;
182 }
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +1000183 if (len > CIFSMaxBufSize + MAX_SMB2_HDR_SIZE) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500184 cifs_dbg(VFS, "SMB length greater than maximum, mid=%llu\n",
185 mid);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400186 return 1;
187 }
188
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700189 if (check_smb2_hdr(shdr, mid))
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400190 return 1;
191
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700192 if (shdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700193 cifs_dbg(VFS, "Invalid structure size %u\n",
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700194 le16_to_cpu(shdr->StructureSize));
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400195 return 1;
196 }
197
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700198 command = le16_to_cpu(shdr->Command);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400199 if (command >= NUMBER_OF_SMB2_COMMANDS) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700200 cifs_dbg(VFS, "Invalid SMB2 command %d\n", command);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400201 return 1;
202 }
203
204 if (smb2_rsp_struct_sizes[command] != pdu->StructureSize2) {
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700205 if (command != SMB2_OPLOCK_BREAK_HE && (shdr->Status == 0 ||
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700206 pdu->StructureSize2 != SMB2_ERROR_STRUCTURE_SIZE2)) {
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400207 /* error packets have 9 byte structure size */
Joe Perchesa0a30362020-04-14 22:42:53 -0700208 cifs_dbg(VFS, "Invalid response size %u for command %d\n",
Joe Perchesf96637b2013-05-04 22:12:25 -0500209 le16_to_cpu(pdu->StructureSize2), command);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400210 return 1;
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700211 } else if (command == SMB2_OPLOCK_BREAK_HE
212 && (shdr->Status == 0)
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700213 && (le16_to_cpu(pdu->StructureSize2) != 44)
214 && (le16_to_cpu(pdu->StructureSize2) != 36)) {
215 /* special case for SMB2.1 lease break message */
Joe Perchesa0a30362020-04-14 22:42:53 -0700216 cifs_dbg(VFS, "Invalid response size %d for oplock break\n",
Joe Perchesf96637b2013-05-04 22:12:25 -0500217 le16_to_cpu(pdu->StructureSize2));
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700218 return 1;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400219 }
220 }
221
Ronnie Sahlberg98170fb2018-05-31 07:43:34 +1000222 clc_len = smb2_calc_size(buf, srvr);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400223
Steve French136ff1b2018-04-08 16:14:31 -0500224 if (shdr->Command == SMB2_NEGOTIATE)
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +1000225 clc_len += get_neg_ctxt_len(shdr, len, clc_len);
Steve French0fdfef92018-06-28 19:30:23 -0500226
Ronnie Sahlberg98170fb2018-05-31 07:43:34 +1000227 if (len != clc_len) {
228 cifs_dbg(FYI, "Calculated size %u length %u mismatch mid %llu\n",
229 clc_len, len, mid);
Pavel Shilovskyb42bf882013-08-14 19:25:21 +0400230 /* create failed on symlink */
231 if (command == SMB2_CREATE_HE &&
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700232 shdr->Status == STATUS_STOPPED_ON_SYMLINK)
Pavel Shilovskyb42bf882013-08-14 19:25:21 +0400233 return 0;
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700234 /* Windows 7 server returns 24 bytes more */
Ronnie Sahlberg98170fb2018-05-31 07:43:34 +1000235 if (clc_len + 24 == len && command == SMB2_OPLOCK_BREAK_HE)
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700236 return 0;
Steve French754789a2014-08-15 23:49:01 -0500237 /* server can return one byte more due to implied bcc[0] */
Ronnie Sahlberg98170fb2018-05-31 07:43:34 +1000238 if (clc_len == len + 1)
Pavel Shilovsky74112862012-07-27 01:20:41 +0400239 return 0;
Steve French754789a2014-08-15 23:49:01 -0500240
241 /*
Ronnie Sahlberge6c47dd2018-08-22 12:19:24 +1000242 * Some windows servers (win2016) will pad also the final
243 * PDU in a compound to 8 bytes.
244 */
245 if (((clc_len + 7) & ~7) == len)
246 return 0;
247
248 /*
Steve French754789a2014-08-15 23:49:01 -0500249 * MacOS server pads after SMB2.1 write response with 3 bytes
250 * of junk. Other servers match RFC1001 len to actual
251 * SMB2/SMB3 frame length (header + smb2 response specific data)
Steve French25f25732018-08-29 09:22:22 -0500252 * Some windows servers also pad up to 8 bytes when compounding.
Steve French754789a2014-08-15 23:49:01 -0500253 */
Steve French037d0502019-11-08 01:01:35 -0600254 if (clc_len < len)
Steve French754789a2014-08-15 23:49:01 -0500255 return 0;
Steve French037d0502019-11-08 01:01:35 -0600256
Steve French25f25732018-08-29 09:22:22 -0500257 pr_warn_once(
258 "srv rsp too short, len %d not %d. cmd:%d mid:%llu\n",
259 len, clc_len, command, mid);
Steve French754789a2014-08-15 23:49:01 -0500260
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400261 return 1;
262 }
263 return 0;
264}
265
266/*
267 * The size of the variable area depends on the offset and length fields
268 * located in different fields for various SMB2 responses. SMB2 responses
269 * with no variable length info, show an offset of zero for the offset field.
270 */
271static const bool has_smb2_data_area[NUMBER_OF_SMB2_COMMANDS] = {
272 /* SMB2_NEGOTIATE */ true,
273 /* SMB2_SESSION_SETUP */ true,
274 /* SMB2_LOGOFF */ false,
275 /* SMB2_TREE_CONNECT */ false,
276 /* SMB2_TREE_DISCONNECT */ false,
277 /* SMB2_CREATE */ true,
278 /* SMB2_CLOSE */ false,
279 /* SMB2_FLUSH */ false,
280 /* SMB2_READ */ true,
281 /* SMB2_WRITE */ false,
282 /* SMB2_LOCK */ false,
283 /* SMB2_IOCTL */ true,
284 /* SMB2_CANCEL */ false, /* BB CHECK this not listed in documentation */
285 /* SMB2_ECHO */ false,
286 /* SMB2_QUERY_DIRECTORY */ true,
287 /* SMB2_CHANGE_NOTIFY */ true,
288 /* SMB2_QUERY_INFO */ true,
289 /* SMB2_SET_INFO */ false,
290 /* SMB2_OPLOCK_BREAK */ false
291};
292
293/*
294 * Returns the pointer to the beginning of the data area. Length of the data
295 * area and the offset to it (from the beginning of the smb are also returned.
296 */
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400297char *
Ronnie Sahlberge4dc31f2018-06-01 10:53:05 +1000298smb2_get_data_area_len(int *off, int *len, struct smb2_sync_hdr *shdr)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400299{
300 *off = 0;
301 *len = 0;
302
303 /* error responses do not have data area */
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700304 if (shdr->Status && shdr->Status != STATUS_MORE_PROCESSING_REQUIRED &&
Ronnie Sahlberge4dc31f2018-06-01 10:53:05 +1000305 (((struct smb2_err_rsp *)shdr)->StructureSize) ==
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400306 SMB2_ERROR_STRUCTURE_SIZE2)
307 return NULL;
308
309 /*
310 * Following commands have data areas so we have to get the location
311 * of the data buffer offset and data buffer length for the particular
312 * command.
313 */
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700314 switch (shdr->Command) {
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400315 case SMB2_NEGOTIATE:
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400316 *off = le16_to_cpu(
Ronnie Sahlberge4dc31f2018-06-01 10:53:05 +1000317 ((struct smb2_negotiate_rsp *)shdr)->SecurityBufferOffset);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400318 *len = le16_to_cpu(
Ronnie Sahlberge4dc31f2018-06-01 10:53:05 +1000319 ((struct smb2_negotiate_rsp *)shdr)->SecurityBufferLength);
Pavel Shilovskyec2e4522011-12-27 16:12:43 +0400320 break;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400321 case SMB2_SESSION_SETUP:
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400322 *off = le16_to_cpu(
Ronnie Sahlberge4dc31f2018-06-01 10:53:05 +1000323 ((struct smb2_sess_setup_rsp *)shdr)->SecurityBufferOffset);
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400324 *len = le16_to_cpu(
Ronnie Sahlberge4dc31f2018-06-01 10:53:05 +1000325 ((struct smb2_sess_setup_rsp *)shdr)->SecurityBufferLength);
Pavel Shilovsky5478f9b2011-12-27 16:22:00 +0400326 break;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400327 case SMB2_CREATE:
Pavel Shilovsky2503a0db2011-12-26 22:58:46 +0400328 *off = le32_to_cpu(
Ronnie Sahlberge4dc31f2018-06-01 10:53:05 +1000329 ((struct smb2_create_rsp *)shdr)->CreateContextsOffset);
Pavel Shilovsky2503a0db2011-12-26 22:58:46 +0400330 *len = le32_to_cpu(
Ronnie Sahlberge4dc31f2018-06-01 10:53:05 +1000331 ((struct smb2_create_rsp *)shdr)->CreateContextsLength);
Pavel Shilovsky2503a0db2011-12-26 22:58:46 +0400332 break;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400333 case SMB2_QUERY_INFO:
Pavel Shilovskybe4cb9e32011-12-29 17:06:33 +0400334 *off = le16_to_cpu(
Ronnie Sahlberge4dc31f2018-06-01 10:53:05 +1000335 ((struct smb2_query_info_rsp *)shdr)->OutputBufferOffset);
Pavel Shilovskybe4cb9e32011-12-29 17:06:33 +0400336 *len = le32_to_cpu(
Ronnie Sahlberge4dc31f2018-06-01 10:53:05 +1000337 ((struct smb2_query_info_rsp *)shdr)->OutputBufferLength);
Pavel Shilovskybe4cb9e32011-12-29 17:06:33 +0400338 break;
339 case SMB2_READ:
Ronnie Sahlberge4dc31f2018-06-01 10:53:05 +1000340 /* TODO: is this a bug ? */
341 *off = ((struct smb2_read_rsp *)shdr)->DataOffset;
342 *len = le32_to_cpu(((struct smb2_read_rsp *)shdr)->DataLength);
Pavel Shilovsky09a47072012-09-18 16:20:29 -0700343 break;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400344 case SMB2_QUERY_DIRECTORY:
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -0700345 *off = le16_to_cpu(
Ronnie Sahlberge4dc31f2018-06-01 10:53:05 +1000346 ((struct smb2_query_directory_rsp *)shdr)->OutputBufferOffset);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -0700347 *len = le32_to_cpu(
Ronnie Sahlberge4dc31f2018-06-01 10:53:05 +1000348 ((struct smb2_query_directory_rsp *)shdr)->OutputBufferLength);
Pavel Shilovskyd324f08d2012-09-18 16:20:33 -0700349 break;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400350 case SMB2_IOCTL:
Steve French4a72daf2013-06-25 00:20:49 -0500351 *off = le32_to_cpu(
Ronnie Sahlberge4dc31f2018-06-01 10:53:05 +1000352 ((struct smb2_ioctl_rsp *)shdr)->OutputOffset);
353 *len = le32_to_cpu(
354 ((struct smb2_ioctl_rsp *)shdr)->OutputCount);
Steve French4a72daf2013-06-25 00:20:49 -0500355 break;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400356 case SMB2_CHANGE_NOTIFY:
Steve French86681152020-07-07 23:43:39 -0500357 *off = le16_to_cpu(
358 ((struct smb2_change_notify_rsp *)shdr)->OutputBufferOffset);
359 *len = le32_to_cpu(
360 ((struct smb2_change_notify_rsp *)shdr)->OutputBufferLength);
361 break;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400362 default:
Steve French86681152020-07-07 23:43:39 -0500363 cifs_dbg(VFS, "no length check for command %d\n", le16_to_cpu(shdr->Command));
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400364 break;
365 }
366
367 /*
368 * Invalid length or offset probably means data area is invalid, but
369 * we have little choice but to ignore the data area in this case.
370 */
371 if (*off > 4096) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500372 cifs_dbg(VFS, "offset %d too large, data area ignored\n", *off);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400373 *len = 0;
374 *off = 0;
375 } else if (*off < 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500376 cifs_dbg(VFS, "negative offset %d to data invalid ignore data area\n",
377 *off);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400378 *off = 0;
379 *len = 0;
380 } else if (*len < 0) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500381 cifs_dbg(VFS, "negative data length %d invalid, data area ignored\n",
382 *len);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400383 *len = 0;
384 } else if (*len > 128 * 1024) {
Joe Perchesf96637b2013-05-04 22:12:25 -0500385 cifs_dbg(VFS, "data area larger than 128K: %d\n", *len);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400386 *len = 0;
387 }
388
389 /* return pointer to beginning of data area, ie offset from SMB start */
390 if ((*off != 0) && (*len != 0))
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700391 return (char *)shdr + *off;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400392 else
393 return NULL;
394}
395
396/*
397 * Calculate the size of the SMB message based on the fixed header
398 * portion, the number of word parameters and the data portion of the message.
399 */
400unsigned int
Ronnie Sahlberg9ec672b2018-04-22 15:30:12 -0600401smb2_calc_size(void *buf, struct TCP_Server_Info *srvr)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400402{
Ronnie Sahlberg84f0cbf2018-06-01 10:53:04 +1000403 struct smb2_sync_pdu *pdu = (struct smb2_sync_pdu *)buf;
404 struct smb2_sync_hdr *shdr = &pdu->sync_hdr;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400405 int offset; /* the offset from the beginning of SMB to data area */
406 int data_length; /* the length of the variable length data area */
407 /* Structure Size has already been checked to make sure it is 64 */
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +1000408 int len = le16_to_cpu(shdr->StructureSize);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400409
410 /*
411 * StructureSize2, ie length of fixed parameter area has already
412 * been checked to make sure it is the correct length.
413 */
414 len += le16_to_cpu(pdu->StructureSize2);
415
Pavel Shilovsky31473fc2016-10-24 15:33:04 -0700416 if (has_smb2_data_area[le16_to_cpu(shdr->Command)] == false)
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400417 goto calc_size_exit;
418
Ronnie Sahlberge4dc31f2018-06-01 10:53:05 +1000419 smb2_get_data_area_len(&offset, &data_length, shdr);
Joe Perchesf96637b2013-05-04 22:12:25 -0500420 cifs_dbg(FYI, "SMB2 data length %d offset %d\n", data_length, offset);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400421
422 if (data_length > 0) {
423 /*
424 * Check to make sure that data area begins after fixed area,
425 * Note that last byte of the fixed area is part of data area
426 * for some commands, typically those with odd StructureSize,
Ronnie Sahlberg84f0cbf2018-06-01 10:53:04 +1000427 * so we must add one to the calculation.
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400428 */
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +1000429 if (offset + 1 < len) {
430 cifs_dbg(VFS, "data area offset %d overlaps SMB2 header %d\n",
431 offset + 1, len);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400432 data_length = 0;
433 } else {
Ronnie Sahlberg1fc6ad22018-06-01 10:53:07 +1000434 len = offset + data_length;
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400435 }
436 }
437calc_size_exit:
Joe Perchesf96637b2013-05-04 22:12:25 -0500438 cifs_dbg(FYI, "SMB2 len %d\n", len);
Pavel Shilovsky093b2bd2011-06-08 15:51:07 +0400439 return len;
440}
Pavel Shilovsky2503a0db2011-12-26 22:58:46 +0400441
442/* Note: caller must free return buffer */
443__le16 *
444cifs_convert_path_to_utf16(const char *from, struct cifs_sb_info *cifs_sb)
445{
446 int len;
447 const char *start_of_path;
448 __le16 *to;
Steve Frencha4153cb2014-09-25 14:01:34 -0500449 int map_type;
450
451 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
452 map_type = SFM_MAP_UNI_RSVD;
453 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
454 map_type = SFU_MAP_UNI_RSVD;
455 else
456 map_type = NO_MAP_UNI_RSVD;
Pavel Shilovsky2503a0db2011-12-26 22:58:46 +0400457
458 /* Windows doesn't allow paths beginning with \ */
459 if (from[0] == '\\')
460 start_of_path = from + 1;
Steve French0fdfef92018-06-28 19:30:23 -0500461
Steve Frenchce558b02018-05-31 19:16:54 -0500462 /* SMB311 POSIX extensions paths do not include leading slash */
Aurelien Aptel8ddecf52018-06-04 22:29:35 +0200463 else if (cifs_sb_master_tlink(cifs_sb) &&
Steve Frenchd819d292018-06-14 22:30:56 -0500464 cifs_sb_master_tcon(cifs_sb)->posix_extensions &&
465 (from[0] == '/')) {
Steve Frenchce558b02018-05-31 19:16:54 -0500466 start_of_path = from + 1;
Steve French0fdfef92018-06-28 19:30:23 -0500467 } else
Pavel Shilovsky2503a0db2011-12-26 22:58:46 +0400468 start_of_path = from;
Steve Frenchce558b02018-05-31 19:16:54 -0500469
Pavel Shilovsky2503a0db2011-12-26 22:58:46 +0400470 to = cifs_strndup_to_utf16(start_of_path, PATH_MAX, &len,
Steve Frencha4153cb2014-09-25 14:01:34 -0500471 cifs_sb->local_nls, map_type);
Pavel Shilovsky2503a0db2011-12-26 22:58:46 +0400472 return to;
473}
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700474
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700475__le32
476smb2_get_lease_state(struct cifsInodeInfo *cinode)
477{
Pavel Shilovsky53ef1012013-09-05 16:11:28 +0400478 __le32 lease = 0;
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700479
Pavel Shilovsky53ef1012013-09-05 16:11:28 +0400480 if (CIFS_CACHE_WRITE(cinode))
481 lease |= SMB2_LEASE_WRITE_CACHING;
482 if (CIFS_CACHE_HANDLE(cinode))
483 lease |= SMB2_LEASE_HANDLE_CACHING;
484 if (CIFS_CACHE_READ(cinode))
485 lease |= SMB2_LEASE_READ_CACHING;
486 return lease;
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700487}
488
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700489struct smb2_lease_break_work {
490 struct work_struct lease_break;
491 struct tcon_link *tlink;
492 __u8 lease_key[16];
493 __le32 lease_state;
494};
495
496static void
497cifs_ses_oplock_break(struct work_struct *work)
498{
499 struct smb2_lease_break_work *lw = container_of(work,
500 struct smb2_lease_break_work, lease_break);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000501 int rc = 0;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700502
503 rc = SMB2_lease_break(0, tlink_tcon(lw->tlink), lw->lease_key,
504 lw->lease_state);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000505
Joe Perchesf96637b2013-05-04 22:12:25 -0500506 cifs_dbg(FYI, "Lease release rc %d\n", rc);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700507 cifs_put_tlink(lw->tlink);
508 kfree(lw);
509}
510
Paul Aurichbaf57b52020-07-09 22:01:16 -0700511static void
512smb2_queue_pending_open_break(struct tcon_link *tlink, __u8 *lease_key,
513 __le32 new_lease_state)
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400514{
Paul Aurichbaf57b52020-07-09 22:01:16 -0700515 struct smb2_lease_break_work *lw;
516
517 lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL);
518 if (!lw) {
519 cifs_put_tlink(tlink);
520 return;
521 }
522
523 INIT_WORK(&lw->lease_break, cifs_ses_oplock_break);
524 lw->tlink = tlink;
525 lw->lease_state = new_lease_state;
526 memcpy(lw->lease_key, lease_key, SMB2_LEASE_KEY_SIZE);
527 queue_work(cifsiod_wq, &lw->lease_break);
528}
529
530static bool
531smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp)
532{
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400533 __u8 lease_state;
534 struct list_head *tmp;
535 struct cifsFileInfo *cfile;
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400536 struct cifsInodeInfo *cinode;
537 int ack_req = le32_to_cpu(rsp->Flags &
538 SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED);
539
Pavel Shilovsky53ef1012013-09-05 16:11:28 +0400540 lease_state = le32_to_cpu(rsp->NewLeaseState);
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400541
542 list_for_each(tmp, &tcon->openFileList) {
543 cfile = list_entry(tmp, struct cifsFileInfo, tlist);
David Howells2b0143b2015-03-17 22:25:59 +0000544 cinode = CIFS_I(d_inode(cfile->dentry));
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400545
546 if (memcmp(cinode->lease_key, rsp->LeaseKey,
547 SMB2_LEASE_KEY_SIZE))
548 continue;
549
550 cifs_dbg(FYI, "found in the open list\n");
Steve French59b04c52014-08-02 21:16:48 -0500551 cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
Pavel Shilovsky9bd45402019-10-29 16:51:19 -0700552 lease_state);
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400553
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400554 if (ack_req)
555 cfile->oplock_break_cancelled = false;
556 else
557 cfile->oplock_break_cancelled = true;
558
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -0800559 set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
560
Pavel Shilovsky9bd45402019-10-29 16:51:19 -0700561 cfile->oplock_epoch = le16_to_cpu(rsp->Epoch);
562 cfile->oplock_level = lease_state;
Pavel Shilovsky7b9b9ed2019-02-13 15:43:08 -0800563
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100564 cifs_queue_oplock_break(cfile);
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400565 return true;
566 }
567
Paul Aurichbaf57b52020-07-09 22:01:16 -0700568 return false;
569}
570
571static struct cifs_pending_open *
572smb2_tcon_find_pending_open_lease(struct cifs_tcon *tcon,
573 struct smb2_lease_break *rsp)
574{
575 __u8 lease_state = le32_to_cpu(rsp->NewLeaseState);
576 int ack_req = le32_to_cpu(rsp->Flags &
577 SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED);
578 struct cifs_pending_open *open;
579 struct cifs_pending_open *found = NULL;
580
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400581 list_for_each_entry(open, &tcon->pending_opens, olist) {
582 if (memcmp(open->lease_key, rsp->LeaseKey,
583 SMB2_LEASE_KEY_SIZE))
584 continue;
585
586 if (!found && ack_req) {
Paul Aurichbaf57b52020-07-09 22:01:16 -0700587 found = open;
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400588 }
589
590 cifs_dbg(FYI, "found in the pending open list\n");
Steve French59b04c52014-08-02 21:16:48 -0500591 cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
Pavel Shilovsky9bd45402019-10-29 16:51:19 -0700592 lease_state);
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400593
594 open->oplock = lease_state;
595 }
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000596
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400597 return found;
598}
599
600static bool
601smb2_is_valid_lease_break(char *buffer)
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700602{
603 struct smb2_lease_break *rsp = (struct smb2_lease_break *)buffer;
604 struct list_head *tmp, *tmp1, *tmp2;
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400605 struct TCP_Server_Info *server;
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700606 struct cifs_ses *ses;
607 struct cifs_tcon *tcon;
Paul Aurichbaf57b52020-07-09 22:01:16 -0700608 struct cifs_pending_open *open;
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700609
Joe Perchesf96637b2013-05-04 22:12:25 -0500610 cifs_dbg(FYI, "Checking for lease break\n");
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700611
612 /* look up tcon based on tid & uid */
613 spin_lock(&cifs_tcp_ses_lock);
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400614 list_for_each(tmp, &cifs_tcp_ses_list) {
615 server = list_entry(tmp, struct TCP_Server_Info, tcp_ses_list);
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700616
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400617 list_for_each(tmp1, &server->smb_ses_list) {
618 ses = list_entry(tmp1, struct cifs_ses, smb_ses_list);
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700619
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400620 list_for_each(tmp2, &ses->tcon_list) {
621 tcon = list_entry(tmp2, struct cifs_tcon,
622 tcon_list);
Steve French3afca262016-09-22 18:58:16 -0500623 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400624 cifs_stats_inc(
625 &tcon->stats.cifs_stats.num_oplock_brks);
Paul Aurichbaf57b52020-07-09 22:01:16 -0700626 if (smb2_tcon_has_lease(tcon, rsp)) {
Steve French3afca262016-09-22 18:58:16 -0500627 spin_unlock(&tcon->open_file_lock);
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400628 spin_unlock(&cifs_tcp_ses_lock);
629 return true;
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700630 }
Paul Aurichbaf57b52020-07-09 22:01:16 -0700631 open = smb2_tcon_find_pending_open_lease(tcon,
632 rsp);
633 if (open) {
634 __u8 lease_key[SMB2_LEASE_KEY_SIZE];
635 struct tcon_link *tlink;
636
637 tlink = cifs_get_tlink(open->tlink);
638 memcpy(lease_key, open->lease_key,
639 SMB2_LEASE_KEY_SIZE);
640 spin_unlock(&tcon->open_file_lock);
641 spin_unlock(&cifs_tcp_ses_lock);
642 smb2_queue_pending_open_break(tlink,
643 lease_key,
644 rsp->NewLeaseState);
645 return true;
646 }
Steve French3afca262016-09-22 18:58:16 -0500647 spin_unlock(&tcon->open_file_lock);
Ronnie Sahlberga93864d2018-06-14 06:48:35 +1000648
649 if (tcon->crfid.is_valid &&
650 !memcmp(rsp->LeaseKey,
651 tcon->crfid.fid->lease_key,
652 SMB2_LEASE_KEY_SIZE)) {
653 INIT_WORK(&tcon->crfid.lease_break,
654 smb2_cached_lease_break);
655 queue_work(cifsiod_wq,
656 &tcon->crfid.lease_break);
657 spin_unlock(&cifs_tcp_ses_lock);
658 return true;
659 }
Pavel Shilovsky233839b2012-09-19 06:22:45 -0700660 }
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700661 }
662 }
663 spin_unlock(&cifs_tcp_ses_lock);
Joe Perchesf96637b2013-05-04 22:12:25 -0500664 cifs_dbg(FYI, "Can not process lease break - no lease matched\n");
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700665 return false;
666}
667
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700668bool
669smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
670{
Ronnie Sahlberg0d5a2882018-06-01 10:53:03 +1000671 struct smb2_oplock_break *rsp = (struct smb2_oplock_break *)buffer;
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700672 struct list_head *tmp, *tmp1, *tmp2;
673 struct cifs_ses *ses;
674 struct cifs_tcon *tcon;
675 struct cifsInodeInfo *cinode;
676 struct cifsFileInfo *cfile;
677
Joe Perchesf96637b2013-05-04 22:12:25 -0500678 cifs_dbg(FYI, "Checking for oplock break\n");
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700679
Ronnie Sahlberg0d5a2882018-06-01 10:53:03 +1000680 if (rsp->sync_hdr.Command != SMB2_OPLOCK_BREAK)
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700681 return false;
682
Steve French12e8a202012-09-19 09:19:39 -0700683 if (rsp->StructureSize !=
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700684 smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) {
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700685 if (le16_to_cpu(rsp->StructureSize) == 44)
Pavel Shilovsky933d4b32013-09-05 15:00:07 +0400686 return smb2_is_valid_lease_break(buffer);
Pavel Shilovsky0822f512012-09-19 06:22:45 -0700687 else
688 return false;
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700689 }
690
Steve French59b04c52014-08-02 21:16:48 -0500691 cifs_dbg(FYI, "oplock level 0x%x\n", rsp->OplockLevel);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700692
693 /* look up tcon based on tid & uid */
694 spin_lock(&cifs_tcp_ses_lock);
695 list_for_each(tmp, &server->smb_ses_list) {
696 ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
Pavel Shilovskyfa9c2362019-10-31 14:18:57 -0700697
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700698 list_for_each(tmp1, &ses->tcon_list) {
699 tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
700
Steve French3afca262016-09-22 18:58:16 -0500701 spin_lock(&tcon->open_file_lock);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700702 list_for_each(tmp2, &tcon->openFileList) {
703 cfile = list_entry(tmp2, struct cifsFileInfo,
704 tlist);
705 if (rsp->PersistentFid !=
706 cfile->fid.persistent_fid ||
707 rsp->VolatileFid !=
708 cfile->fid.volatile_fid)
709 continue;
710
Joe Perchesf96637b2013-05-04 22:12:25 -0500711 cifs_dbg(FYI, "file id match, oplock break\n");
Pavel Shilovskyfa9c2362019-10-31 14:18:57 -0700712 cifs_stats_inc(
713 &tcon->stats.cifs_stats.num_oplock_brks);
David Howells2b0143b2015-03-17 22:25:59 +0000714 cinode = CIFS_I(d_inode(cfile->dentry));
Steve French3afca262016-09-22 18:58:16 -0500715 spin_lock(&cfile->file_info_lock);
Pavel Shilovsky18cceb62013-09-05 13:01:06 +0400716 if (!CIFS_CACHE_WRITE(cinode) &&
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700717 rsp->OplockLevel == SMB2_OPLOCK_LEVEL_NONE)
718 cfile->oplock_break_cancelled = true;
719 else
720 cfile->oplock_break_cancelled = false;
721
Sachin Prabhuc11f1df2014-03-11 16:11:47 +0000722 set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
723 &cinode->flags);
724
Pavel Shilovsky9bd45402019-10-29 16:51:19 -0700725 cfile->oplock_epoch = 0;
726 cfile->oplock_level = rsp->OplockLevel;
727
Steve French3afca262016-09-22 18:58:16 -0500728 spin_unlock(&cfile->file_info_lock);
Aurelien Aptelb98749c2019-03-29 10:49:12 +0100729
730 cifs_queue_oplock_break(cfile);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700731
Steve French3afca262016-09-22 18:58:16 -0500732 spin_unlock(&tcon->open_file_lock);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700733 spin_unlock(&cifs_tcp_ses_lock);
734 return true;
735 }
Steve French3afca262016-09-22 18:58:16 -0500736 spin_unlock(&tcon->open_file_lock);
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700737 }
738 }
739 spin_unlock(&cifs_tcp_ses_lock);
Joe Perchesf96637b2013-05-04 22:12:25 -0500740 cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
Pavel Shilovsky983c88a2012-09-18 16:20:33 -0700741 return false;
742}
Sachin Prabhu38bd4902017-03-03 15:41:38 -0800743
744void
745smb2_cancelled_close_fid(struct work_struct *work)
746{
747 struct close_cancelled_open *cancelled = container_of(work,
748 struct close_cancelled_open, work);
Ronnie Sahlberg87bc2372019-11-14 12:32:12 -0600749 struct cifs_tcon *tcon = cancelled->tcon;
750 int rc;
Sachin Prabhu38bd4902017-03-03 15:41:38 -0800751
Ronnie Sahlberg87bc2372019-11-14 12:32:12 -0600752 if (cancelled->mid)
753 cifs_tcon_dbg(VFS, "Close unmatched open for MID:%llx\n",
754 cancelled->mid);
755 else
756 cifs_tcon_dbg(VFS, "Close interrupted close\n");
Sachin Prabhu38bd4902017-03-03 15:41:38 -0800757
Ronnie Sahlberg87bc2372019-11-14 12:32:12 -0600758 rc = SMB2_close(0, tcon, cancelled->fid.persistent_fid,
759 cancelled->fid.volatile_fid);
760 if (rc)
761 cifs_tcon_dbg(VFS, "Close cancelled mid failed rc:%d\n", rc);
762
763 cifs_put_tcon(tcon);
Sachin Prabhu38bd4902017-03-03 15:41:38 -0800764 kfree(cancelled);
765}
766
Ronnie Sahlberg87bc2372019-11-14 12:32:12 -0600767/*
768 * Caller should already has an extra reference to @tcon
769 * This function is used to queue work to close a handle to prevent leaks
770 * on the server.
771 * We handle two cases. If an open was interrupted after we sent the
772 * SMB2_CREATE to the server but before we processed the reply, and second
773 * if a close was interrupted before we sent the SMB2_CLOSE to the server.
774 */
Pavel Shilovsky9150c3a2019-11-21 11:35:12 -0800775static int
Ronnie Sahlberg87bc2372019-11-14 12:32:12 -0600776__smb2_handle_cancelled_cmd(struct cifs_tcon *tcon, __u16 cmd, __u64 mid,
777 __u64 persistent_fid, __u64 volatile_fid)
Pavel Shilovsky9150c3a2019-11-21 11:35:12 -0800778{
779 struct close_cancelled_open *cancelled;
780
Paulo Alcantara (SUSE)0a5a9882020-01-13 17:46:59 -0300781 cancelled = kzalloc(sizeof(*cancelled), GFP_ATOMIC);
Pavel Shilovsky9150c3a2019-11-21 11:35:12 -0800782 if (!cancelled)
783 return -ENOMEM;
784
785 cancelled->fid.persistent_fid = persistent_fid;
786 cancelled->fid.volatile_fid = volatile_fid;
787 cancelled->tcon = tcon;
Ronnie Sahlberg87bc2372019-11-14 12:32:12 -0600788 cancelled->cmd = cmd;
789 cancelled->mid = mid;
Pavel Shilovsky9150c3a2019-11-21 11:35:12 -0800790 INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
791 WARN_ON(queue_work(cifsiod_wq, &cancelled->work) == false);
792
793 return 0;
794}
795
796int
797smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid,
798 __u64 volatile_fid)
799{
800 int rc;
801
802 cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count);
803 spin_lock(&cifs_tcp_ses_lock);
Aurelien Aptele79b0332020-04-07 11:49:55 +0200804 if (tcon->tc_count <= 0) {
805 struct TCP_Server_Info *server = NULL;
806
807 WARN_ONCE(tcon->tc_count < 0, "tcon refcount is negative");
808 spin_unlock(&cifs_tcp_ses_lock);
809
810 if (tcon->ses)
811 server = tcon->ses->server;
812
813 cifs_server_dbg(FYI, "tid=%u: tcon is closing, skipping async close retry of fid %llu %llu\n",
814 tcon->tid, persistent_fid, volatile_fid);
815
816 return 0;
817 }
Pavel Shilovsky9150c3a2019-11-21 11:35:12 -0800818 tcon->tc_count++;
819 spin_unlock(&cifs_tcp_ses_lock);
820
Ronnie Sahlberg87bc2372019-11-14 12:32:12 -0600821 rc = __smb2_handle_cancelled_cmd(tcon, SMB2_CLOSE_HE, 0,
822 persistent_fid, volatile_fid);
Pavel Shilovsky9150c3a2019-11-21 11:35:12 -0800823 if (rc)
824 cifs_put_tcon(tcon);
825
826 return rc;
827}
828
Sachin Prabhu38bd4902017-03-03 15:41:38 -0800829int
830smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server)
831{
Ronnie Sahlberg49f466b2018-06-01 10:53:06 +1000832 struct smb2_sync_hdr *sync_hdr = (struct smb2_sync_hdr *)buffer;
Sachin Prabhu38bd4902017-03-03 15:41:38 -0800833 struct smb2_create_rsp *rsp = (struct smb2_create_rsp *)buffer;
834 struct cifs_tcon *tcon;
Pavel Shilovsky9150c3a2019-11-21 11:35:12 -0800835 int rc;
Sachin Prabhu38bd4902017-03-03 15:41:38 -0800836
837 if (sync_hdr->Command != SMB2_CREATE ||
838 sync_hdr->Status != STATUS_SUCCESS)
839 return 0;
840
Sachin Prabhu38bd4902017-03-03 15:41:38 -0800841 tcon = smb2_find_smb_tcon(server, sync_hdr->SessionId,
842 sync_hdr->TreeId);
Pavel Shilovsky9150c3a2019-11-21 11:35:12 -0800843 if (!tcon)
Sachin Prabhu38bd4902017-03-03 15:41:38 -0800844 return -ENOENT;
Sachin Prabhu38bd4902017-03-03 15:41:38 -0800845
Ronnie Sahlberg87bc2372019-11-14 12:32:12 -0600846 rc = __smb2_handle_cancelled_cmd(tcon,
847 le16_to_cpu(sync_hdr->Command),
848 le64_to_cpu(sync_hdr->MessageId),
849 rsp->PersistentFileId,
850 rsp->VolatileFileId);
Pavel Shilovsky9150c3a2019-11-21 11:35:12 -0800851 if (rc)
852 cifs_put_tcon(tcon);
Sachin Prabhu38bd4902017-03-03 15:41:38 -0800853
Pavel Shilovsky9150c3a2019-11-21 11:35:12 -0800854 return rc;
Sachin Prabhu38bd4902017-03-03 15:41:38 -0800855}
Aurelien Aptel8bd68c62018-02-16 19:19:29 +0100856
Aurelien Aptel8bd68c62018-02-16 19:19:29 +0100857/**
858 * smb311_update_preauth_hash - update @ses hash with the packet data in @iov
859 *
860 * Assumes @iov does not contain the rfc1002 length and iov[0] has the
861 * SMB2 header.
862 */
863int
864smb311_update_preauth_hash(struct cifs_ses *ses, struct kvec *iov, int nvec)
865{
866 int i, rc;
867 struct sdesc *d;
868 struct smb2_sync_hdr *hdr;
Aurelien Apteld70e9fa2019-09-20 06:31:10 +0200869 struct TCP_Server_Info *server = cifs_ses_server(ses);
Aurelien Aptel8bd68c62018-02-16 19:19:29 +0100870
Aurelien Apteld70e9fa2019-09-20 06:31:10 +0200871 hdr = (struct smb2_sync_hdr *)iov[0].iov_base;
872 /* neg prot are always taken */
873 if (hdr->Command == SMB2_NEGOTIATE)
874 goto ok;
Aurelien Aptel8bd68c62018-02-16 19:19:29 +0100875
Aurelien Apteld70e9fa2019-09-20 06:31:10 +0200876 /*
877 * If we process a command which wasn't a negprot it means the
878 * neg prot was already done, so the server dialect was set
879 * and we can test it. Preauth requires 3.1.1 for now.
880 */
881 if (server->dialect != SMB311_PROT_ID)
882 return 0;
Aurelien Aptel8bd68c62018-02-16 19:19:29 +0100883
Aurelien Apteld70e9fa2019-09-20 06:31:10 +0200884 if (hdr->Command != SMB2_SESSION_SETUP)
885 return 0;
886
887 /* skip last sess setup response */
888 if ((hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR)
889 && (hdr->Status == NT_STATUS_OK
890 || (hdr->Status !=
891 cpu_to_le32(NT_STATUS_MORE_PROCESSING_REQUIRED))))
892 return 0;
893
894ok:
895 rc = smb311_crypto_shash_allocate(server);
Aurelien Aptel8bd68c62018-02-16 19:19:29 +0100896 if (rc)
897 return rc;
898
Aurelien Apteld70e9fa2019-09-20 06:31:10 +0200899 d = server->secmech.sdescsha512;
Aurelien Aptel8bd68c62018-02-16 19:19:29 +0100900 rc = crypto_shash_init(&d->shash);
901 if (rc) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700902 cifs_dbg(VFS, "%s: Could not init sha512 shash\n", __func__);
Aurelien Aptel8bd68c62018-02-16 19:19:29 +0100903 return rc;
904 }
905
906 rc = crypto_shash_update(&d->shash, ses->preauth_sha_hash,
907 SMB2_PREAUTH_HASH_SIZE);
908 if (rc) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700909 cifs_dbg(VFS, "%s: Could not update sha512 shash\n", __func__);
Aurelien Aptel8bd68c62018-02-16 19:19:29 +0100910 return rc;
911 }
912
913 for (i = 0; i < nvec; i++) {
914 rc = crypto_shash_update(&d->shash,
915 iov[i].iov_base, iov[i].iov_len);
916 if (rc) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700917 cifs_dbg(VFS, "%s: Could not update sha512 shash\n",
Aurelien Aptel8bd68c62018-02-16 19:19:29 +0100918 __func__);
919 return rc;
920 }
921 }
922
923 rc = crypto_shash_final(&d->shash, ses->preauth_sha_hash);
924 if (rc) {
Joe Perchesa0a30362020-04-14 22:42:53 -0700925 cifs_dbg(VFS, "%s: Could not finalize sha512 shash\n",
Aurelien Aptel8bd68c62018-02-16 19:19:29 +0100926 __func__);
927 return rc;
928 }
929
930 return 0;
931}