blob: 2bbb8d38d2bf5f6eeb87a5771aeb92683d25543f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/net/sunrpc/xdr.c
3 *
4 * Generic XDR support.
5 *
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7 */
8
Chuck Levera246b012005-08-11 16:25:23 -04009#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/string.h>
13#include <linux/kernel.h>
14#include <linux/pagemap.h>
15#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/sunrpc/xdr.h>
17#include <linux/sunrpc/msg_prot.h>
Trond Myklebust9d96acb2018-09-13 12:22:04 -040018#include <linux/bvec.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
20/*
21 * XDR functions for basic NFS types
22 */
Alexey Dobriyand8ed0292006-09-26 22:29:38 -070023__be32 *
24xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
Linus Torvalds1da177e2005-04-16 15:20:36 -070025{
26 unsigned int quadlen = XDR_QUADLEN(obj->len);
27
28 p[quadlen] = 0; /* zero trailing bytes */
Benny Halevy9f162d22009-08-14 17:18:44 +030029 *p++ = cpu_to_be32(obj->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 memcpy(p, obj->data, obj->len);
31 return p + XDR_QUADLEN(obj->len);
32}
Trond Myklebust468039e2008-12-23 15:21:31 -050033EXPORT_SYMBOL_GPL(xdr_encode_netobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
Alexey Dobriyand8ed0292006-09-26 22:29:38 -070035__be32 *
36xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
Linus Torvalds1da177e2005-04-16 15:20:36 -070037{
38 unsigned int len;
39
Benny Halevy98866b52009-08-14 17:18:49 +030040 if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 return NULL;
42 obj->len = len;
43 obj->data = (u8 *) p;
44 return p + XDR_QUADLEN(len);
45}
Trond Myklebust468039e2008-12-23 15:21:31 -050046EXPORT_SYMBOL_GPL(xdr_decode_netobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
48/**
49 * xdr_encode_opaque_fixed - Encode fixed length opaque data
Pavel Pisa4dc3b162005-05-01 08:59:25 -070050 * @p: pointer to current position in XDR buffer.
51 * @ptr: pointer to data to encode (or NULL)
52 * @nbytes: size of data.
Linus Torvalds1da177e2005-04-16 15:20:36 -070053 *
54 * Copy the array of data of length nbytes at ptr to the XDR buffer
55 * at position p, then align to the next 32-bit boundary by padding
56 * with zero bytes (see RFC1832).
57 * Note: if ptr is NULL, only the padding is performed.
58 *
59 * Returns the updated current XDR buffer position
60 *
61 */
Alexey Dobriyand8ed0292006-09-26 22:29:38 -070062__be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -070063{
64 if (likely(nbytes != 0)) {
65 unsigned int quadlen = XDR_QUADLEN(nbytes);
66 unsigned int padding = (quadlen << 2) - nbytes;
67
68 if (ptr != NULL)
69 memcpy(p, ptr, nbytes);
70 if (padding != 0)
71 memset((char *)p + nbytes, 0, padding);
72 p += quadlen;
73 }
74 return p;
75}
Trond Myklebust468039e2008-12-23 15:21:31 -050076EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
78/**
79 * xdr_encode_opaque - Encode variable length opaque data
Pavel Pisa4dc3b162005-05-01 08:59:25 -070080 * @p: pointer to current position in XDR buffer.
81 * @ptr: pointer to data to encode (or NULL)
82 * @nbytes: size of data.
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 *
84 * Returns the updated current XDR buffer position
85 */
Alexey Dobriyand8ed0292006-09-26 22:29:38 -070086__be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -070087{
Benny Halevy9f162d22009-08-14 17:18:44 +030088 *p++ = cpu_to_be32(nbytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 return xdr_encode_opaque_fixed(p, ptr, nbytes);
90}
Trond Myklebust468039e2008-12-23 15:21:31 -050091EXPORT_SYMBOL_GPL(xdr_encode_opaque);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
Alexey Dobriyand8ed0292006-09-26 22:29:38 -070093__be32 *
94xdr_encode_string(__be32 *p, const char *string)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
96 return xdr_encode_array(p, string, strlen(string));
97}
Trond Myklebust468039e2008-12-23 15:21:31 -050098EXPORT_SYMBOL_GPL(xdr_encode_string);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
Alexey Dobriyand8ed0292006-09-26 22:29:38 -0700100__be32 *
Chuck Levere5cff482007-11-01 16:56:47 -0400101xdr_decode_string_inplace(__be32 *p, char **sp,
102 unsigned int *lenp, unsigned int maxlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103{
Chuck Levere5cff482007-11-01 16:56:47 -0400104 u32 len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
Benny Halevy98866b52009-08-14 17:18:49 +0300106 len = be32_to_cpu(*p++);
Chuck Levere5cff482007-11-01 16:56:47 -0400107 if (len > maxlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 return NULL;
109 *lenp = len;
110 *sp = (char *) p;
111 return p + XDR_QUADLEN(len);
112}
Trond Myklebust468039e2008-12-23 15:21:31 -0500113EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
Chuck Leverb4687da2010-09-21 16:55:48 -0400115/**
116 * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
117 * @buf: XDR buffer where string resides
118 * @len: length of string, in bytes
119 *
120 */
121void
122xdr_terminate_string(struct xdr_buf *buf, const u32 len)
123{
124 char *kaddr;
125
Cong Wangb8541782011-11-25 23:14:40 +0800126 kaddr = kmap_atomic(buf->pages[0]);
Chuck Leverb4687da2010-09-21 16:55:48 -0400127 kaddr[buf->page_base + len] = '\0';
Cong Wangb8541782011-11-25 23:14:40 +0800128 kunmap_atomic(kaddr);
Chuck Leverb4687da2010-09-21 16:55:48 -0400129}
Trond Myklebust0d961aa2011-07-13 19:24:15 -0400130EXPORT_SYMBOL_GPL(xdr_terminate_string);
Chuck Leverb4687da2010-09-21 16:55:48 -0400131
Trond Myklebust9d96acb2018-09-13 12:22:04 -0400132size_t
133xdr_buf_pagecount(struct xdr_buf *buf)
134{
135 if (!buf->page_len)
136 return 0;
137 return (buf->page_base + buf->page_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
138}
139
140int
141xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp)
142{
143 size_t i, n = xdr_buf_pagecount(buf);
144
145 if (n != 0 && buf->bvec == NULL) {
146 buf->bvec = kmalloc_array(n, sizeof(buf->bvec[0]), gfp);
147 if (!buf->bvec)
148 return -ENOMEM;
149 for (i = 0; i < n; i++) {
150 buf->bvec[i].bv_page = buf->pages[i];
151 buf->bvec[i].bv_len = PAGE_SIZE;
152 buf->bvec[i].bv_offset = 0;
153 }
154 }
155 return 0;
156}
157
158void
159xdr_free_bvec(struct xdr_buf *buf)
160{
161 kfree(buf->bvec);
162 buf->bvec = NULL;
163}
164
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
167 struct page **pages, unsigned int base, unsigned int len)
168{
169 struct kvec *head = xdr->head;
170 struct kvec *tail = xdr->tail;
171 char *buf = (char *)head->iov_base;
172 unsigned int buflen = head->iov_len;
173
174 head->iov_len = offset;
175
176 xdr->pages = pages;
177 xdr->page_base = base;
178 xdr->page_len = len;
179
180 tail->iov_base = buf + offset;
181 tail->iov_len = buflen - offset;
182
183 xdr->buflen += len;
184}
Trond Myklebust468039e2008-12-23 15:21:31 -0500185EXPORT_SYMBOL_GPL(xdr_inline_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187/*
188 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
Ben Hutchings2c530402012-07-10 10:55:09 +0000189 */
190
191/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 * _shift_data_right_pages
193 * @pages: vector of pages containing both the source and dest memory area.
194 * @pgto_base: page vector address of destination
195 * @pgfrom_base: page vector address of source
196 * @len: number of bytes to copy
197 *
198 * Note: the addresses pgto_base and pgfrom_base are both calculated in
199 * the same way:
200 * if a memory area starts at byte 'base' in page 'pages[i]',
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +0300201 * then its address is given as (i << PAGE_SHIFT) + base
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 * Also note: pgfrom_base must be < pgto_base, but the memory areas
203 * they point to may overlap.
204 */
205static void
206_shift_data_right_pages(struct page **pages, size_t pgto_base,
207 size_t pgfrom_base, size_t len)
208{
209 struct page **pgfrom, **pgto;
210 char *vfrom, *vto;
211 size_t copy;
212
213 BUG_ON(pgto_base <= pgfrom_base);
214
215 pgto_base += len;
216 pgfrom_base += len;
217
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300218 pgto = pages + (pgto_base >> PAGE_SHIFT);
219 pgfrom = pages + (pgfrom_base >> PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300221 pgto_base &= ~PAGE_MASK;
222 pgfrom_base &= ~PAGE_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
224 do {
225 /* Are any pointers crossing a page boundary? */
226 if (pgto_base == 0) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300227 pgto_base = PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 pgto--;
229 }
230 if (pgfrom_base == 0) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300231 pgfrom_base = PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 pgfrom--;
233 }
234
235 copy = len;
236 if (copy > pgto_base)
237 copy = pgto_base;
238 if (copy > pgfrom_base)
239 copy = pgfrom_base;
240 pgto_base -= copy;
241 pgfrom_base -= copy;
242
Cong Wangb8541782011-11-25 23:14:40 +0800243 vto = kmap_atomic(*pgto);
Trond Myklebust347e2232013-08-28 13:35:13 -0400244 if (*pgto != *pgfrom) {
245 vfrom = kmap_atomic(*pgfrom);
246 memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
247 kunmap_atomic(vfrom);
248 } else
249 memmove(vto + pgto_base, vto + pgfrom_base, copy);
Trond Myklebustbce34812006-07-05 13:17:12 -0400250 flush_dcache_page(*pgto);
Cong Wangb8541782011-11-25 23:14:40 +0800251 kunmap_atomic(vto);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
253 } while ((len -= copy) != 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254}
255
Ben Hutchings2c530402012-07-10 10:55:09 +0000256/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 * _copy_to_pages
258 * @pages: array of pages
259 * @pgbase: page vector address of destination
260 * @p: pointer to source data
261 * @len: length
262 *
263 * Copies data from an arbitrary memory location into an array of pages
264 * The copy is assumed to be non-overlapping.
265 */
266static void
267_copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
268{
269 struct page **pgto;
270 char *vto;
271 size_t copy;
272
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300273 pgto = pages + (pgbase >> PAGE_SHIFT);
274 pgbase &= ~PAGE_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275
Trond Myklebustdaeba892008-03-31 17:02:02 -0400276 for (;;) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300277 copy = PAGE_SIZE - pgbase;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 if (copy > len)
279 copy = len;
280
Cong Wangb8541782011-11-25 23:14:40 +0800281 vto = kmap_atomic(*pgto);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 memcpy(vto + pgbase, p, copy);
Cong Wangb8541782011-11-25 23:14:40 +0800283 kunmap_atomic(vto);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284
Trond Myklebustdaeba892008-03-31 17:02:02 -0400285 len -= copy;
286 if (len == 0)
287 break;
288
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 pgbase += copy;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300290 if (pgbase == PAGE_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 flush_dcache_page(*pgto);
292 pgbase = 0;
293 pgto++;
294 }
295 p += copy;
Trond Myklebustdaeba892008-03-31 17:02:02 -0400296 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 flush_dcache_page(*pgto);
298}
299
Ben Hutchings2c530402012-07-10 10:55:09 +0000300/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 * _copy_from_pages
302 * @p: pointer to destination
303 * @pages: array of pages
304 * @pgbase: offset of source data
305 * @len: length
306 *
307 * Copies data into an arbitrary memory location from an array of pages
308 * The copy is assumed to be non-overlapping.
309 */
Andy Adamsonbf118a32011-12-07 11:55:27 -0500310void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311_copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
312{
313 struct page **pgfrom;
314 char *vfrom;
315 size_t copy;
316
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300317 pgfrom = pages + (pgbase >> PAGE_SHIFT);
318 pgbase &= ~PAGE_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319
320 do {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300321 copy = PAGE_SIZE - pgbase;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 if (copy > len)
323 copy = len;
324
Cong Wangb8541782011-11-25 23:14:40 +0800325 vfrom = kmap_atomic(*pgfrom);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 memcpy(p, vfrom + pgbase, copy);
Cong Wangb8541782011-11-25 23:14:40 +0800327 kunmap_atomic(vfrom);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328
329 pgbase += copy;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300330 if (pgbase == PAGE_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 pgbase = 0;
332 pgfrom++;
333 }
334 p += copy;
335
336 } while ((len -= copy) != 0);
337}
Andy Adamsonbf118a32011-12-07 11:55:27 -0500338EXPORT_SYMBOL_GPL(_copy_from_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339
Ben Hutchings2c530402012-07-10 10:55:09 +0000340/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 * xdr_shrink_bufhead
342 * @buf: xdr_buf
343 * @len: bytes to remove from buf->head[0]
344 *
YOSHIFUJI Hideakicca51722007-02-09 15:38:13 -0800345 * Shrinks XDR buffer's header kvec buf->head[0] by
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 * 'len' bytes. The extra data is not lost, but is instead
347 * moved into the inlined pages and/or the tail.
348 */
349static void
350xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
351{
352 struct kvec *head, *tail;
353 size_t copy, offs;
354 unsigned int pglen = buf->page_len;
355
356 tail = buf->tail;
357 head = buf->head;
Weston Andros Adamson18e624a2012-10-23 10:43:42 -0400358
359 WARN_ON_ONCE(len > head->iov_len);
360 if (len > head->iov_len)
361 len = head->iov_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
363 /* Shift the tail first */
364 if (tail->iov_len != 0) {
365 if (tail->iov_len > len) {
366 copy = tail->iov_len - len;
367 memmove((char *)tail->iov_base + len,
368 tail->iov_base, copy);
369 }
370 /* Copy from the inlined pages into the tail */
371 copy = len;
372 if (copy > pglen)
373 copy = pglen;
374 offs = len - copy;
375 if (offs >= tail->iov_len)
376 copy = 0;
377 else if (copy > tail->iov_len - offs)
378 copy = tail->iov_len - offs;
379 if (copy != 0)
380 _copy_from_pages((char *)tail->iov_base + offs,
381 buf->pages,
382 buf->page_base + pglen + offs - len,
383 copy);
384 /* Do we also need to copy data from the head into the tail ? */
385 if (len > pglen) {
386 offs = copy = len - pglen;
387 if (copy > tail->iov_len)
388 copy = tail->iov_len;
389 memcpy(tail->iov_base,
390 (char *)head->iov_base +
391 head->iov_len - offs,
392 copy);
393 }
394 }
395 /* Now handle pages */
396 if (pglen != 0) {
397 if (pglen > len)
398 _shift_data_right_pages(buf->pages,
399 buf->page_base + len,
400 buf->page_base,
401 pglen - len);
402 copy = len;
403 if (len > pglen)
404 copy = pglen;
405 _copy_to_pages(buf->pages, buf->page_base,
406 (char *)head->iov_base + head->iov_len - len,
407 copy);
408 }
409 head->iov_len -= len;
410 buf->buflen -= len;
411 /* Have we truncated the message? */
412 if (buf->len > buf->buflen)
413 buf->len = buf->buflen;
414}
415
Ben Hutchings2c530402012-07-10 10:55:09 +0000416/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 * xdr_shrink_pagelen
418 * @buf: xdr_buf
419 * @len: bytes to remove from buf->pages
420 *
YOSHIFUJI Hideakicca51722007-02-09 15:38:13 -0800421 * Shrinks XDR buffer's page array buf->pages by
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 * 'len' bytes. The extra data is not lost, but is instead
423 * moved into the tail.
424 */
425static void
426xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
427{
428 struct kvec *tail;
429 size_t copy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 unsigned int pglen = buf->page_len;
Trond Myklebustcf187c22010-08-29 12:13:16 -0400431 unsigned int tailbuf_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432
433 tail = buf->tail;
434 BUG_ON (len > pglen);
435
Trond Myklebustcf187c22010-08-29 12:13:16 -0400436 tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len;
437
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 /* Shift the tail first */
Trond Myklebustcf187c22010-08-29 12:13:16 -0400439 if (tailbuf_len != 0) {
440 unsigned int free_space = tailbuf_len - tail->iov_len;
441
442 if (len < free_space)
443 free_space = len;
444 tail->iov_len += free_space;
445
Benny Halevy42d6d8a2010-08-29 12:13:15 -0400446 copy = len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 if (tail->iov_len > len) {
Benny Halevy0fe62a352010-08-29 12:13:15 -0400448 char *p = (char *)tail->iov_base + len;
Benny Halevy2e29ebb2010-08-29 12:13:15 -0400449 memmove(p, tail->iov_base, tail->iov_len - len);
Benny Halevy42d6d8a2010-08-29 12:13:15 -0400450 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 copy = tail->iov_len;
Benny Halevy42d6d8a2010-08-29 12:13:15 -0400452 /* Copy from the inlined pages into the tail */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 _copy_from_pages((char *)tail->iov_base,
454 buf->pages, buf->page_base + pglen - len,
455 copy);
456 }
457 buf->page_len -= len;
458 buf->buflen -= len;
459 /* Have we truncated the message? */
460 if (buf->len > buf->buflen)
461 buf->len = buf->buflen;
462}
463
464void
465xdr_shift_buf(struct xdr_buf *buf, size_t len)
466{
467 xdr_shrink_bufhead(buf, len);
468}
Trond Myklebust468039e2008-12-23 15:21:31 -0500469EXPORT_SYMBOL_GPL(xdr_shift_buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470
471/**
Trond Myklebust4517d522012-06-21 17:14:46 -0400472 * xdr_stream_pos - Return the current offset from the start of the xdr_stream
473 * @xdr: pointer to struct xdr_stream
474 */
475unsigned int xdr_stream_pos(const struct xdr_stream *xdr)
476{
477 return (unsigned int)(XDR_QUADLEN(xdr->buf->len) - xdr->nwords) << 2;
478}
479EXPORT_SYMBOL_GPL(xdr_stream_pos);
480
481/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
483 * @xdr: pointer to xdr_stream struct
484 * @buf: pointer to XDR buffer in which to encode data
485 * @p: current pointer inside XDR buffer
486 *
487 * Note: at the moment the RPC client only passes the length of our
488 * scratch buffer in the xdr_buf's header kvec. Previously this
489 * meant we needed to call xdr_adjust_iovec() after encoding the
490 * data. With the new scheme, the xdr_stream manages the details
491 * of the buffer length, and takes care of adjusting the kvec
492 * length for us.
493 */
Alexey Dobriyand8ed0292006-09-26 22:29:38 -0700494void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495{
496 struct kvec *iov = buf->head;
Trond Myklebust334ccfd2005-06-22 17:16:19 +0000497 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498
J. Bruce Fields2825a7f2013-08-26 16:04:46 -0400499 xdr_set_scratch_buffer(xdr, NULL, 0);
Trond Myklebust334ccfd2005-06-22 17:16:19 +0000500 BUG_ON(scratch_len < 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 xdr->buf = buf;
502 xdr->iov = iov;
Alexey Dobriyand8ed0292006-09-26 22:29:38 -0700503 xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
504 xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
Trond Myklebust334ccfd2005-06-22 17:16:19 +0000505 BUG_ON(iov->iov_len > scratch_len);
506
507 if (p != xdr->p && p != NULL) {
508 size_t len;
509
510 BUG_ON(p < xdr->p || p > xdr->end);
511 len = (char *)p - (char *)xdr->p;
512 xdr->p = p;
513 buf->len += len;
514 iov->iov_len += len;
515 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516}
Trond Myklebust468039e2008-12-23 15:21:31 -0500517EXPORT_SYMBOL_GPL(xdr_init_encode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518
519/**
J. Bruce Fields2825a7f2013-08-26 16:04:46 -0400520 * xdr_commit_encode - Ensure all data is written to buffer
521 * @xdr: pointer to xdr_stream
522 *
523 * We handle encoding across page boundaries by giving the caller a
524 * temporary location to write to, then later copying the data into
525 * place; xdr_commit_encode does that copying.
526 *
527 * Normally the caller doesn't need to call this directly, as the
528 * following xdr_reserve_space will do it. But an explicit call may be
529 * required at the end of encoding, or any other time when the xdr_buf
530 * data might be read.
531 */
532void xdr_commit_encode(struct xdr_stream *xdr)
533{
534 int shift = xdr->scratch.iov_len;
535 void *page;
536
537 if (shift == 0)
538 return;
539 page = page_address(*xdr->page_ptr);
540 memcpy(xdr->scratch.iov_base, page, shift);
541 memmove(page, page + shift, (void *)xdr->p - page);
542 xdr->scratch.iov_len = 0;
543}
544EXPORT_SYMBOL_GPL(xdr_commit_encode);
545
Trond Myklebust22cb4382014-07-12 18:01:02 -0400546static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
547 size_t nbytes)
J. Bruce Fields2825a7f2013-08-26 16:04:46 -0400548{
549 static __be32 *p;
550 int space_left;
551 int frag1bytes, frag2bytes;
552
553 if (nbytes > PAGE_SIZE)
554 return NULL; /* Bigger buffers require special handling */
555 if (xdr->buf->len + nbytes > xdr->buf->buflen)
556 return NULL; /* Sorry, we're totally out of space */
557 frag1bytes = (xdr->end - xdr->p) << 2;
558 frag2bytes = nbytes - frag1bytes;
559 if (xdr->iov)
560 xdr->iov->iov_len += frag1bytes;
J. Bruce Fields05638dc2014-06-02 12:05:47 -0400561 else
J. Bruce Fields2825a7f2013-08-26 16:04:46 -0400562 xdr->buf->page_len += frag1bytes;
J. Bruce Fields05638dc2014-06-02 12:05:47 -0400563 xdr->page_ptr++;
J. Bruce Fields2825a7f2013-08-26 16:04:46 -0400564 xdr->iov = NULL;
565 /*
566 * If the last encode didn't end exactly on a page boundary, the
567 * next one will straddle boundaries. Encode into the next
568 * page, then copy it back later in xdr_commit_encode. We use
569 * the "scratch" iov to track any temporarily unused fragment of
570 * space at the end of the previous buffer:
571 */
572 xdr->scratch.iov_base = xdr->p;
573 xdr->scratch.iov_len = frag1bytes;
574 p = page_address(*xdr->page_ptr);
575 /*
576 * Note this is where the next encode will start after we've
577 * shifted this one back:
578 */
579 xdr->p = (void *)p + frag2bytes;
580 space_left = xdr->buf->buflen - xdr->buf->len;
581 xdr->end = (void *)p + min_t(int, space_left, PAGE_SIZE);
582 xdr->buf->page_len += frag2bytes;
583 xdr->buf->len += nbytes;
584 return p;
585}
586
587/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 * xdr_reserve_space - Reserve buffer space for sending
589 * @xdr: pointer to xdr_stream
590 * @nbytes: number of bytes to reserve
591 *
592 * Checks that we have enough buffer space to encode 'nbytes' more
593 * bytes of data. If so, update the total xdr_buf length, and
594 * adjust the length of the current kvec.
595 */
Alexey Dobriyand8ed0292006-09-26 22:29:38 -0700596__be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597{
Alexey Dobriyand8ed0292006-09-26 22:29:38 -0700598 __be32 *p = xdr->p;
599 __be32 *q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600
J. Bruce Fields2825a7f2013-08-26 16:04:46 -0400601 xdr_commit_encode(xdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 /* align nbytes on the next 32-bit boundary */
603 nbytes += 3;
604 nbytes &= ~3;
605 q = p + (nbytes >> 2);
606 if (unlikely(q > xdr->end || q < p))
J. Bruce Fields2825a7f2013-08-26 16:04:46 -0400607 return xdr_get_next_encode_buffer(xdr, nbytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 xdr->p = q;
J. Bruce Fields2825a7f2013-08-26 16:04:46 -0400609 if (xdr->iov)
610 xdr->iov->iov_len += nbytes;
611 else
612 xdr->buf->page_len += nbytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 xdr->buf->len += nbytes;
614 return p;
615}
Trond Myklebust468039e2008-12-23 15:21:31 -0500616EXPORT_SYMBOL_GPL(xdr_reserve_space);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617
618/**
J. Bruce Fields3e19ce72014-02-25 17:44:21 -0500619 * xdr_truncate_encode - truncate an encode buffer
620 * @xdr: pointer to xdr_stream
621 * @len: new length of buffer
622 *
623 * Truncates the xdr stream, so that xdr->buf->len == len,
624 * and xdr->p points at offset len from the start of the buffer, and
625 * head, tail, and page lengths are adjusted to correspond.
626 *
627 * If this means moving xdr->p to a different buffer, we assume that
628 * that the end pointer should be set to the end of the current page,
629 * except in the case of the head buffer when we assume the head
630 * buffer's current length represents the end of the available buffer.
631 *
632 * This is *not* safe to use on a buffer that already has inlined page
633 * cache pages (as in a zero-copy server read reply), except for the
634 * simple case of truncating from one position in the tail to another.
635 *
636 */
637void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
638{
639 struct xdr_buf *buf = xdr->buf;
640 struct kvec *head = buf->head;
641 struct kvec *tail = buf->tail;
642 int fraglen;
J. Bruce Fields49a068f2014-12-22 16:14:51 -0500643 int new;
J. Bruce Fields3e19ce72014-02-25 17:44:21 -0500644
645 if (len > buf->len) {
646 WARN_ON_ONCE(1);
647 return;
648 }
J. Bruce Fields2825a7f2013-08-26 16:04:46 -0400649 xdr_commit_encode(xdr);
J. Bruce Fields3e19ce72014-02-25 17:44:21 -0500650
651 fraglen = min_t(int, buf->len - len, tail->iov_len);
652 tail->iov_len -= fraglen;
653 buf->len -= fraglen;
J. Bruce Fieldsed38c062014-09-19 17:21:35 -0400654 if (tail->iov_len) {
J. Bruce Fields3e19ce72014-02-25 17:44:21 -0500655 xdr->p = tail->iov_base + tail->iov_len;
J. Bruce Fields280caac2014-10-01 11:36:31 -0400656 WARN_ON_ONCE(!xdr->end);
657 WARN_ON_ONCE(!xdr->iov);
J. Bruce Fields3e19ce72014-02-25 17:44:21 -0500658 return;
659 }
660 WARN_ON_ONCE(fraglen);
661 fraglen = min_t(int, buf->len - len, buf->page_len);
662 buf->page_len -= fraglen;
663 buf->len -= fraglen;
664
665 new = buf->page_base + buf->page_len;
J. Bruce Fields49a068f2014-12-22 16:14:51 -0500666
667 xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT);
J. Bruce Fields3e19ce72014-02-25 17:44:21 -0500668
J. Bruce Fieldsed38c062014-09-19 17:21:35 -0400669 if (buf->page_len) {
J. Bruce Fields3e19ce72014-02-25 17:44:21 -0500670 xdr->p = page_address(*xdr->page_ptr);
671 xdr->end = (void *)xdr->p + PAGE_SIZE;
672 xdr->p = (void *)xdr->p + (new % PAGE_SIZE);
J. Bruce Fields280caac2014-10-01 11:36:31 -0400673 WARN_ON_ONCE(xdr->iov);
J. Bruce Fields3e19ce72014-02-25 17:44:21 -0500674 return;
675 }
J. Bruce Fields05638dc2014-06-02 12:05:47 -0400676 if (fraglen) {
J. Bruce Fields3e19ce72014-02-25 17:44:21 -0500677 xdr->end = head->iov_base + head->iov_len;
J. Bruce Fields05638dc2014-06-02 12:05:47 -0400678 xdr->page_ptr--;
679 }
J. Bruce Fields3e19ce72014-02-25 17:44:21 -0500680 /* (otherwise assume xdr->end is already set) */
681 head->iov_len = len;
682 buf->len = len;
683 xdr->p = head->iov_base + head->iov_len;
684 xdr->iov = buf->head;
685}
686EXPORT_SYMBOL(xdr_truncate_encode);
687
688/**
J. Bruce Fieldsdb3f58a2014-03-06 13:22:18 -0500689 * xdr_restrict_buflen - decrease available buffer space
690 * @xdr: pointer to xdr_stream
691 * @newbuflen: new maximum number of bytes available
692 *
693 * Adjust our idea of how much space is available in the buffer.
694 * If we've already used too much space in the buffer, returns -1.
695 * If the available space is already smaller than newbuflen, returns 0
696 * and does nothing. Otherwise, adjusts xdr->buf->buflen to newbuflen
697 * and ensures xdr->end is set at most offset newbuflen from the start
698 * of the buffer.
699 */
700int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen)
701{
702 struct xdr_buf *buf = xdr->buf;
703 int left_in_this_buf = (void *)xdr->end - (void *)xdr->p;
704 int end_offset = buf->len + left_in_this_buf;
705
706 if (newbuflen < 0 || newbuflen < buf->len)
707 return -1;
708 if (newbuflen > buf->buflen)
709 return 0;
710 if (newbuflen < end_offset)
711 xdr->end = (void *)xdr->end + newbuflen - end_offset;
712 buf->buflen = newbuflen;
713 return 0;
714}
715EXPORT_SYMBOL(xdr_restrict_buflen);
716
717/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
719 * @xdr: pointer to xdr_stream
720 * @pages: list of pages
721 * @base: offset of first byte
722 * @len: length of data in bytes
723 *
724 */
725void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
726 unsigned int len)
727{
728 struct xdr_buf *buf = xdr->buf;
729 struct kvec *iov = buf->tail;
730 buf->pages = pages;
731 buf->page_base = base;
732 buf->page_len = len;
733
734 iov->iov_base = (char *)xdr->p;
735 iov->iov_len = 0;
736 xdr->iov = iov;
737
738 if (len & 3) {
739 unsigned int pad = 4 - (len & 3);
740
741 BUG_ON(xdr->p >= xdr->end);
742 iov->iov_base = (char *)xdr->p + (len & 3);
743 iov->iov_len += pad;
744 len += pad;
745 *xdr->p++ = 0;
746 }
747 buf->buflen += len;
748 buf->len += len;
749}
Trond Myklebust468039e2008-12-23 15:21:31 -0500750EXPORT_SYMBOL_GPL(xdr_write_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751
Trond Myklebust66502392011-01-08 17:45:38 -0500752static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
Trond Myklebust15376932012-06-28 17:17:48 -0400753 unsigned int len)
Trond Myklebust66502392011-01-08 17:45:38 -0500754{
755 if (len > iov->iov_len)
756 len = iov->iov_len;
Trond Myklebust15376932012-06-28 17:17:48 -0400757 xdr->p = (__be32*)iov->iov_base;
Trond Myklebust66502392011-01-08 17:45:38 -0500758 xdr->end = (__be32*)(iov->iov_base + len);
759 xdr->iov = iov;
760 xdr->page_ptr = NULL;
761}
762
763static int xdr_set_page_base(struct xdr_stream *xdr,
764 unsigned int base, unsigned int len)
765{
766 unsigned int pgnr;
767 unsigned int maxlen;
768 unsigned int pgoff;
769 unsigned int pgend;
770 void *kaddr;
771
772 maxlen = xdr->buf->page_len;
773 if (base >= maxlen)
774 return -EINVAL;
775 maxlen -= base;
776 if (len > maxlen)
777 len = maxlen;
778
779 base += xdr->buf->page_base;
780
781 pgnr = base >> PAGE_SHIFT;
782 xdr->page_ptr = &xdr->buf->pages[pgnr];
783 kaddr = page_address(*xdr->page_ptr);
784
785 pgoff = base & ~PAGE_MASK;
786 xdr->p = (__be32*)(kaddr + pgoff);
787
788 pgend = pgoff + len;
789 if (pgend > PAGE_SIZE)
790 pgend = PAGE_SIZE;
791 xdr->end = (__be32*)(kaddr + pgend);
792 xdr->iov = NULL;
793 return 0;
794}
795
796static void xdr_set_next_page(struct xdr_stream *xdr)
797{
798 unsigned int newbase;
799
800 newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
801 newbase -= xdr->buf->page_base;
802
803 if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
Trond Myklebusta6cebd42016-09-20 14:33:43 -0400804 xdr_set_iov(xdr, xdr->buf->tail, xdr->nwords << 2);
Trond Myklebust66502392011-01-08 17:45:38 -0500805}
806
807static bool xdr_set_next_buffer(struct xdr_stream *xdr)
808{
809 if (xdr->page_ptr != NULL)
810 xdr_set_next_page(xdr);
811 else if (xdr->iov == xdr->buf->head) {
812 if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
Trond Myklebusta6cebd42016-09-20 14:33:43 -0400813 xdr_set_iov(xdr, xdr->buf->tail, xdr->nwords << 2);
Trond Myklebust66502392011-01-08 17:45:38 -0500814 }
815 return xdr->p != xdr->end;
816}
817
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818/**
819 * xdr_init_decode - Initialize an xdr_stream for decoding data.
820 * @xdr: pointer to xdr_stream struct
821 * @buf: pointer to XDR buffer from which to decode data
822 * @p: current pointer inside XDR buffer
823 */
Alexey Dobriyand8ed0292006-09-26 22:29:38 -0700824void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 xdr->buf = buf;
Trond Myklebust66502392011-01-08 17:45:38 -0500827 xdr->scratch.iov_base = NULL;
828 xdr->scratch.iov_len = 0;
Trond Myklebustbfeea1d2012-06-20 09:58:35 -0400829 xdr->nwords = XDR_QUADLEN(buf->len);
Trond Myklebust66502392011-01-08 17:45:38 -0500830 if (buf->head[0].iov_len != 0)
Trond Myklebust15376932012-06-28 17:17:48 -0400831 xdr_set_iov(xdr, buf->head, buf->len);
Trond Myklebust66502392011-01-08 17:45:38 -0500832 else if (buf->page_len != 0)
833 xdr_set_page_base(xdr, 0, buf->len);
Benjamin Coddington06ef26a2016-04-06 11:32:52 -0400834 else
835 xdr_set_iov(xdr, buf->head, buf->len);
Trond Myklebustbfeea1d2012-06-20 09:58:35 -0400836 if (p != NULL && p > xdr->p && xdr->end >= p) {
837 xdr->nwords -= p - xdr->p;
Trond Myklebust15376932012-06-28 17:17:48 -0400838 xdr->p = p;
Trond Myklebustbfeea1d2012-06-20 09:58:35 -0400839 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840}
Trond Myklebust468039e2008-12-23 15:21:31 -0500841EXPORT_SYMBOL_GPL(xdr_init_decode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842
Benny Halevyf7da7a12011-05-19 14:16:47 -0400843/**
Chuck Lever7ecce752017-04-11 13:23:59 -0400844 * xdr_init_decode_pages - Initialize an xdr_stream for decoding into pages
Benny Halevyf7da7a12011-05-19 14:16:47 -0400845 * @xdr: pointer to xdr_stream struct
846 * @buf: pointer to XDR buffer from which to decode data
847 * @pages: list of pages to decode into
848 * @len: length in bytes of buffer in pages
849 */
850void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
851 struct page **pages, unsigned int len)
852{
853 memset(buf, 0, sizeof(*buf));
854 buf->pages = pages;
855 buf->page_len = len;
856 buf->buflen = len;
857 buf->len = len;
858 xdr_init_decode(xdr, buf, NULL);
859}
860EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
861
Trond Myklebust66502392011-01-08 17:45:38 -0500862static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
Trond Myklebustba8e4522010-10-19 19:58:49 -0400863{
Trond Myklebustbfeea1d2012-06-20 09:58:35 -0400864 unsigned int nwords = XDR_QUADLEN(nbytes);
Trond Myklebustba8e4522010-10-19 19:58:49 -0400865 __be32 *p = xdr->p;
Trond Myklebustbfeea1d2012-06-20 09:58:35 -0400866 __be32 *q = p + nwords;
Trond Myklebustba8e4522010-10-19 19:58:49 -0400867
Trond Myklebustbfeea1d2012-06-20 09:58:35 -0400868 if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p))
Trond Myklebustba8e4522010-10-19 19:58:49 -0400869 return NULL;
Trond Myklebust66502392011-01-08 17:45:38 -0500870 xdr->p = q;
Trond Myklebustbfeea1d2012-06-20 09:58:35 -0400871 xdr->nwords -= nwords;
Trond Myklebustba8e4522010-10-19 19:58:49 -0400872 return p;
873}
Trond Myklebustba8e4522010-10-19 19:58:49 -0400874
875/**
Trond Myklebust66502392011-01-08 17:45:38 -0500876 * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
877 * @xdr: pointer to xdr_stream struct
878 * @buf: pointer to an empty buffer
879 * @buflen: size of 'buf'
880 *
881 * The scratch buffer is used when decoding from an array of pages.
882 * If an xdr_inline_decode() call spans across page boundaries, then
883 * we copy the data into the scratch buffer in order to allow linear
884 * access.
885 */
886void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
887{
888 xdr->scratch.iov_base = buf;
889 xdr->scratch.iov_len = buflen;
890}
891EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
892
893static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
894{
895 __be32 *p;
Trond Myklebustace0e142016-09-20 14:33:42 -0400896 char *cpdest = xdr->scratch.iov_base;
Trond Myklebust66502392011-01-08 17:45:38 -0500897 size_t cplen = (char *)xdr->end - (char *)xdr->p;
898
899 if (nbytes > xdr->scratch.iov_len)
900 return NULL;
Trond Myklebustace0e142016-09-20 14:33:42 -0400901 p = __xdr_inline_decode(xdr, cplen);
902 if (p == NULL)
903 return NULL;
904 memcpy(cpdest, p, cplen);
Trond Myklebust66502392011-01-08 17:45:38 -0500905 cpdest += cplen;
906 nbytes -= cplen;
907 if (!xdr_set_next_buffer(xdr))
908 return NULL;
909 p = __xdr_inline_decode(xdr, nbytes);
910 if (p == NULL)
911 return NULL;
912 memcpy(cpdest, p, nbytes);
913 return xdr->scratch.iov_base;
914}
915
916/**
917 * xdr_inline_decode - Retrieve XDR data to decode
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 * @xdr: pointer to xdr_stream struct
919 * @nbytes: number of bytes of data to decode
920 *
921 * Check if the input buffer is long enough to enable us to decode
922 * 'nbytes' more bytes of data starting at the current position.
923 * If so return the current pointer, then update the current
924 * pointer position.
925 */
Alexey Dobriyand8ed0292006-09-26 22:29:38 -0700926__be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927{
Trond Myklebust66502392011-01-08 17:45:38 -0500928 __be32 *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929
Trond Myklebust66502392011-01-08 17:45:38 -0500930 if (nbytes == 0)
931 return xdr->p;
932 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 return NULL;
Trond Myklebust66502392011-01-08 17:45:38 -0500934 p = __xdr_inline_decode(xdr, nbytes);
935 if (p != NULL)
936 return p;
937 return xdr_copy_to_scratch(xdr, nbytes);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938}
Trond Myklebust468039e2008-12-23 15:21:31 -0500939EXPORT_SYMBOL_GPL(xdr_inline_decode);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940
Trond Myklebust3994ee62012-06-26 12:34:05 -0400941static unsigned int xdr_align_pages(struct xdr_stream *xdr, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942{
943 struct xdr_buf *buf = xdr->buf;
944 struct kvec *iov;
Trond Myklebustbfeea1d2012-06-20 09:58:35 -0400945 unsigned int nwords = XDR_QUADLEN(len);
Trond Myklebustb760b312012-06-26 12:19:55 -0400946 unsigned int cur = xdr_stream_pos(xdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947
Trond Myklebustbfeea1d2012-06-20 09:58:35 -0400948 if (xdr->nwords == 0)
Trond Myklebustc337d362012-06-21 17:05:37 -0400949 return 0;
Trond Myklebusta11a2bf2012-08-02 13:21:43 -0400950 /* Realign pages to current pointer position */
951 iov = buf->head;
952 if (iov->iov_len > cur) {
953 xdr_shrink_bufhead(buf, iov->iov_len - cur);
954 xdr->nwords = XDR_QUADLEN(buf->len - cur);
955 }
956
Trond Myklebustbfeea1d2012-06-20 09:58:35 -0400957 if (nwords > xdr->nwords) {
958 nwords = xdr->nwords;
959 len = nwords << 2;
960 }
Trond Myklebusta11a2bf2012-08-02 13:21:43 -0400961 if (buf->page_len <= len)
Trond Myklebust8a9a8b82012-08-01 14:32:13 -0400962 len = buf->page_len;
Trond Myklebusta11a2bf2012-08-02 13:21:43 -0400963 else if (nwords < xdr->nwords) {
964 /* Truncate page data and move it into the tail */
965 xdr_shrink_pagelen(buf, buf->page_len - len);
966 xdr->nwords = XDR_QUADLEN(buf->len - cur);
967 }
Trond Myklebust3994ee62012-06-26 12:34:05 -0400968 return len;
969}
Trond Myklebustbd00f842012-06-26 13:50:43 -0400970
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971/**
972 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
973 * @xdr: pointer to xdr_stream struct
974 * @len: number of bytes of page data
975 *
976 * Moves data beyond the current pointer position from the XDR head[] buffer
977 * into the page list. Any data that lies beyond current position + "len"
978 * bytes is moved into the XDR tail[].
Trond Myklebust3994ee62012-06-26 12:34:05 -0400979 *
980 * Returns the number of XDR encoded bytes now contained in the pages
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 */
Trond Myklebust3994ee62012-06-26 12:34:05 -0400982unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983{
984 struct xdr_buf *buf = xdr->buf;
985 struct kvec *iov;
Trond Myklebust3994ee62012-06-26 12:34:05 -0400986 unsigned int nwords;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 unsigned int end;
Trond Myklebust3994ee62012-06-26 12:34:05 -0400988 unsigned int padding;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989
Trond Myklebust3994ee62012-06-26 12:34:05 -0400990 len = xdr_align_pages(xdr, len);
991 if (len == 0)
992 return 0;
993 nwords = XDR_QUADLEN(len);
Trond Myklebustbfeea1d2012-06-20 09:58:35 -0400994 padding = (nwords << 2) - len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 xdr->iov = iov = buf->tail;
996 /* Compute remaining message length. */
Trond Myklebustbd00f842012-06-26 13:50:43 -0400997 end = ((xdr->nwords - nwords) << 2) + padding;
998 if (end > iov->iov_len)
999 end = iov->iov_len;
1000
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 /*
1002 * Position current pointer at beginning of tail, and
1003 * set remaining message length.
1004 */
Alexey Dobriyand8ed0292006-09-26 22:29:38 -07001005 xdr->p = (__be32 *)((char *)iov->iov_base + padding);
1006 xdr->end = (__be32 *)((char *)iov->iov_base + end);
Trond Myklebust76cacaa2012-06-26 15:32:40 -04001007 xdr->page_ptr = NULL;
Trond Myklebustbfeea1d2012-06-20 09:58:35 -04001008 xdr->nwords = XDR_QUADLEN(end - padding);
Trond Myklebustc337d362012-06-21 17:05:37 -04001009 return len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010}
Trond Myklebust468039e2008-12-23 15:21:31 -05001011EXPORT_SYMBOL_GPL(xdr_read_pages);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012
Trond Myklebust8b23ea72006-06-09 09:34:21 -04001013/**
1014 * xdr_enter_page - decode data from the XDR page
1015 * @xdr: pointer to xdr_stream struct
1016 * @len: number of bytes of page data
1017 *
1018 * Moves data beyond the current pointer position from the XDR head[] buffer
1019 * into the page list. Any data that lies beyond current position + "len"
1020 * bytes is moved into the XDR tail[]. The current pointer is then
1021 * repositioned at the beginning of the first XDR page.
1022 */
1023void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
1024{
Trond Myklebustf8bb7f02012-06-21 14:53:10 -04001025 len = xdr_align_pages(xdr, len);
Trond Myklebust8b23ea72006-06-09 09:34:21 -04001026 /*
1027 * Position current pointer at beginning of tail, and
1028 * set remaining message length.
1029 */
Trond Myklebustf8bb7f02012-06-21 14:53:10 -04001030 if (len != 0)
1031 xdr_set_page_base(xdr, 0, len);
Trond Myklebust8b23ea72006-06-09 09:34:21 -04001032}
Trond Myklebust468039e2008-12-23 15:21:31 -05001033EXPORT_SYMBOL_GPL(xdr_enter_page);
Trond Myklebust8b23ea72006-06-09 09:34:21 -04001034
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
1036
1037void
1038xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
1039{
1040 buf->head[0] = *iov;
1041 buf->tail[0] = empty_iov;
1042 buf->page_len = 0;
1043 buf->buflen = buf->len = iov->iov_len;
1044}
Trond Myklebust468039e2008-12-23 15:21:31 -05001045EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046
J. Bruce Fieldsde4aee22014-02-25 17:21:08 -05001047/**
1048 * xdr_buf_subsegment - set subbuf to a portion of buf
1049 * @buf: an xdr buffer
1050 * @subbuf: the result buffer
1051 * @base: beginning of range in bytes
1052 * @len: length of range in bytes
1053 *
1054 * sets @subbuf to an xdr buffer representing the portion of @buf of
1055 * length @len starting at offset @base.
1056 *
1057 * @buf and @subbuf may be pointers to the same struct xdr_buf.
1058 *
1059 * Returns -1 if base of length are out of bounds.
1060 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061int
1062xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
Trond Myklebust1e789572006-08-31 15:09:19 -04001063 unsigned int base, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 subbuf->buflen = subbuf->len = len;
Trond Myklebust1e789572006-08-31 15:09:19 -04001066 if (base < buf->head[0].iov_len) {
1067 subbuf->head[0].iov_base = buf->head[0].iov_base + base;
1068 subbuf->head[0].iov_len = min_t(unsigned int, len,
1069 buf->head[0].iov_len - base);
1070 len -= subbuf->head[0].iov_len;
1071 base = 0;
1072 } else {
Trond Myklebust1e789572006-08-31 15:09:19 -04001073 base -= buf->head[0].iov_len;
J. Bruce Fieldsde4aee22014-02-25 17:21:08 -05001074 subbuf->head[0].iov_len = 0;
Trond Myklebust1e789572006-08-31 15:09:19 -04001075 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076
1077 if (base < buf->page_len) {
Trond Myklebust1e789572006-08-31 15:09:19 -04001078 subbuf->page_len = min(buf->page_len - base, len);
1079 base += buf->page_base;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001080 subbuf->page_base = base & ~PAGE_MASK;
1081 subbuf->pages = &buf->pages[base >> PAGE_SHIFT];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 len -= subbuf->page_len;
1083 base = 0;
1084 } else {
1085 base -= buf->page_len;
1086 subbuf->page_len = 0;
1087 }
1088
Trond Myklebust1e789572006-08-31 15:09:19 -04001089 if (base < buf->tail[0].iov_len) {
1090 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
1091 subbuf->tail[0].iov_len = min_t(unsigned int, len,
1092 buf->tail[0].iov_len - base);
1093 len -= subbuf->tail[0].iov_len;
1094 base = 0;
1095 } else {
Trond Myklebust1e789572006-08-31 15:09:19 -04001096 base -= buf->tail[0].iov_len;
J. Bruce Fieldsde4aee22014-02-25 17:21:08 -05001097 subbuf->tail[0].iov_len = 0;
Trond Myklebust1e789572006-08-31 15:09:19 -04001098 }
1099
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 if (base || len)
1101 return -1;
1102 return 0;
1103}
Trond Myklebust468039e2008-12-23 15:21:31 -05001104EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105
Jeff Layton4c190e22013-02-06 08:28:55 -05001106/**
1107 * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
1108 * @buf: buf to be trimmed
1109 * @len: number of bytes to reduce "buf" by
1110 *
1111 * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
1112 * that it's possible that we'll trim less than that amount if the xdr_buf is
1113 * too small, or if (for instance) it's all in the head and the parser has
1114 * already read too far into it.
1115 */
1116void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
1117{
1118 size_t cur;
1119 unsigned int trim = len;
1120
1121 if (buf->tail[0].iov_len) {
1122 cur = min_t(size_t, buf->tail[0].iov_len, trim);
1123 buf->tail[0].iov_len -= cur;
1124 trim -= cur;
1125 if (!trim)
1126 goto fix_len;
1127 }
1128
1129 if (buf->page_len) {
1130 cur = min_t(unsigned int, buf->page_len, trim);
1131 buf->page_len -= cur;
1132 trim -= cur;
1133 if (!trim)
1134 goto fix_len;
1135 }
1136
1137 if (buf->head[0].iov_len) {
1138 cur = min_t(size_t, buf->head[0].iov_len, trim);
1139 buf->head[0].iov_len -= cur;
1140 trim -= cur;
1141 }
1142fix_len:
1143 buf->len -= (len - trim);
1144}
1145EXPORT_SYMBOL_GPL(xdr_buf_trim);
1146
Trond Myklebust4e3e43a2006-10-17 13:47:24 -04001147static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148{
Trond Myklebust1e789572006-08-31 15:09:19 -04001149 unsigned int this_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150
Trond Myklebust4e3e43a2006-10-17 13:47:24 -04001151 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
1152 memcpy(obj, subbuf->head[0].iov_base, this_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 len -= this_len;
1154 obj += this_len;
Trond Myklebust4e3e43a2006-10-17 13:47:24 -04001155 this_len = min_t(unsigned int, len, subbuf->page_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 if (this_len)
Trond Myklebust4e3e43a2006-10-17 13:47:24 -04001157 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 len -= this_len;
1159 obj += this_len;
Trond Myklebust4e3e43a2006-10-17 13:47:24 -04001160 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
1161 memcpy(obj, subbuf->tail[0].iov_base, this_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162}
1163
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001164/* obj is assumed to point to allocated memory of size at least len: */
Trond Myklebust4e3e43a2006-10-17 13:47:24 -04001165int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001166{
1167 struct xdr_buf subbuf;
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001168 int status;
1169
1170 status = xdr_buf_subsegment(buf, &subbuf, base, len);
Trond Myklebust4e3e43a2006-10-17 13:47:24 -04001171 if (status != 0)
1172 return status;
1173 __read_bytes_from_xdr_buf(&subbuf, obj, len);
1174 return 0;
1175}
Trond Myklebust468039e2008-12-23 15:21:31 -05001176EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
Trond Myklebust4e3e43a2006-10-17 13:47:24 -04001177
1178static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
1179{
1180 unsigned int this_len;
1181
1182 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
1183 memcpy(subbuf->head[0].iov_base, obj, this_len);
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001184 len -= this_len;
1185 obj += this_len;
Trond Myklebust4e3e43a2006-10-17 13:47:24 -04001186 this_len = min_t(unsigned int, len, subbuf->page_len);
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001187 if (this_len)
Trond Myklebust4e3e43a2006-10-17 13:47:24 -04001188 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001189 len -= this_len;
1190 obj += this_len;
Trond Myklebust4e3e43a2006-10-17 13:47:24 -04001191 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
1192 memcpy(subbuf->tail[0].iov_base, obj, this_len);
1193}
1194
1195/* obj is assumed to point to allocated memory of size at least len: */
1196int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
1197{
1198 struct xdr_buf subbuf;
1199 int status;
1200
1201 status = xdr_buf_subsegment(buf, &subbuf, base, len);
1202 if (status != 0)
1203 return status;
1204 __write_bytes_to_xdr_buf(&subbuf, obj, len);
1205 return 0;
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001206}
Kevin Coffmanc43abae2010-03-17 13:02:58 -04001207EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001208
1209int
Trond Myklebust1e789572006-08-31 15:09:19 -04001210xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211{
Alexey Dobriyand8ed0292006-09-26 22:29:38 -07001212 __be32 raw;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 int status;
1214
1215 status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
1216 if (status)
1217 return status;
Benny Halevy98866b52009-08-14 17:18:49 +03001218 *obj = be32_to_cpu(raw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 return 0;
1220}
Trond Myklebust468039e2008-12-23 15:21:31 -05001221EXPORT_SYMBOL_GPL(xdr_decode_word);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001223int
Trond Myklebust1e789572006-08-31 15:09:19 -04001224xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001225{
Benny Halevy9f162d22009-08-14 17:18:44 +03001226 __be32 raw = cpu_to_be32(obj);
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001227
1228 return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
1229}
Trond Myklebust468039e2008-12-23 15:21:31 -05001230EXPORT_SYMBOL_GPL(xdr_encode_word);
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001231
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232/* If the netobj starting offset bytes from the start of xdr_buf is contained
1233 * entirely in the head or the tail, set object to point to it; otherwise
1234 * try to find space for it at the end of the tail, copy it there, and
1235 * set obj to point to it. */
Trond Myklebustbee57c92006-10-09 22:08:22 -04001236int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237{
Trond Myklebustbee57c92006-10-09 22:08:22 -04001238 struct xdr_buf subbuf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001240 if (xdr_decode_word(buf, offset, &obj->len))
Trond Myklebustbee57c92006-10-09 22:08:22 -04001241 return -EFAULT;
1242 if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
1243 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244
Trond Myklebustbee57c92006-10-09 22:08:22 -04001245 /* Is the obj contained entirely in the head? */
1246 obj->data = subbuf.head[0].iov_base;
1247 if (subbuf.head[0].iov_len == obj->len)
1248 return 0;
1249 /* ..or is the obj contained entirely in the tail? */
1250 obj->data = subbuf.tail[0].iov_base;
1251 if (subbuf.tail[0].iov_len == obj->len)
1252 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253
Trond Myklebustbee57c92006-10-09 22:08:22 -04001254 /* use end of tail as storage for obj:
1255 * (We don't copy to the beginning because then we'd have
1256 * to worry about doing a potentially overlapping copy.
1257 * This assumes the object is at most half the length of the
1258 * tail.) */
1259 if (obj->len > buf->buflen - buf->len)
1260 return -ENOMEM;
1261 if (buf->tail[0].iov_len != 0)
1262 obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
1263 else
1264 obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
1265 __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267}
Trond Myklebust468039e2008-12-23 15:21:31 -05001268EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001269
1270/* Returns 0 on success, or else a negative error code. */
1271static int
1272xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
1273 struct xdr_array2_desc *desc, int encode)
1274{
1275 char *elem = NULL, *c;
1276 unsigned int copied = 0, todo, avail_here;
1277 struct page **ppages = NULL;
1278 int err;
1279
1280 if (encode) {
1281 if (xdr_encode_word(buf, base, desc->array_len) != 0)
1282 return -EINVAL;
1283 } else {
1284 if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
Trond Myklebust58fcb8d2005-08-10 18:15:12 -04001285 desc->array_len > desc->array_maxlen ||
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001286 (unsigned long) base + 4 + desc->array_len *
1287 desc->elem_size > buf->len)
1288 return -EINVAL;
1289 }
1290 base += 4;
1291
1292 if (!desc->xcode)
1293 return 0;
1294
1295 todo = desc->array_len * desc->elem_size;
1296
1297 /* process head */
1298 if (todo && base < buf->head->iov_len) {
1299 c = buf->head->iov_base + base;
1300 avail_here = min_t(unsigned int, todo,
1301 buf->head->iov_len - base);
1302 todo -= avail_here;
1303
1304 while (avail_here >= desc->elem_size) {
1305 err = desc->xcode(desc, c);
1306 if (err)
1307 goto out;
1308 c += desc->elem_size;
1309 avail_here -= desc->elem_size;
1310 }
1311 if (avail_here) {
1312 if (!elem) {
1313 elem = kmalloc(desc->elem_size, GFP_KERNEL);
1314 err = -ENOMEM;
1315 if (!elem)
1316 goto out;
1317 }
1318 if (encode) {
1319 err = desc->xcode(desc, elem);
1320 if (err)
1321 goto out;
1322 memcpy(c, elem, avail_here);
1323 } else
1324 memcpy(elem, c, avail_here);
1325 copied = avail_here;
1326 }
1327 base = buf->head->iov_len; /* align to start of pages */
1328 }
1329
1330 /* process pages array */
1331 base -= buf->head->iov_len;
1332 if (todo && base < buf->page_len) {
1333 unsigned int avail_page;
1334
1335 avail_here = min(todo, buf->page_len - base);
1336 todo -= avail_here;
1337
1338 base += buf->page_base;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001339 ppages = buf->pages + (base >> PAGE_SHIFT);
1340 base &= ~PAGE_MASK;
1341 avail_page = min_t(unsigned int, PAGE_SIZE - base,
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001342 avail_here);
1343 c = kmap(*ppages) + base;
1344
1345 while (avail_here) {
1346 avail_here -= avail_page;
1347 if (copied || avail_page < desc->elem_size) {
1348 unsigned int l = min(avail_page,
1349 desc->elem_size - copied);
1350 if (!elem) {
1351 elem = kmalloc(desc->elem_size,
1352 GFP_KERNEL);
1353 err = -ENOMEM;
1354 if (!elem)
1355 goto out;
1356 }
1357 if (encode) {
1358 if (!copied) {
1359 err = desc->xcode(desc, elem);
1360 if (err)
1361 goto out;
1362 }
1363 memcpy(c, elem + copied, l);
1364 copied += l;
1365 if (copied == desc->elem_size)
1366 copied = 0;
1367 } else {
1368 memcpy(elem + copied, c, l);
1369 copied += l;
1370 if (copied == desc->elem_size) {
1371 err = desc->xcode(desc, elem);
1372 if (err)
1373 goto out;
1374 copied = 0;
1375 }
1376 }
1377 avail_page -= l;
1378 c += l;
1379 }
1380 while (avail_page >= desc->elem_size) {
1381 err = desc->xcode(desc, c);
1382 if (err)
1383 goto out;
1384 c += desc->elem_size;
1385 avail_page -= desc->elem_size;
1386 }
1387 if (avail_page) {
1388 unsigned int l = min(avail_page,
1389 desc->elem_size - copied);
1390 if (!elem) {
1391 elem = kmalloc(desc->elem_size,
1392 GFP_KERNEL);
1393 err = -ENOMEM;
1394 if (!elem)
1395 goto out;
1396 }
1397 if (encode) {
1398 if (!copied) {
1399 err = desc->xcode(desc, elem);
1400 if (err)
1401 goto out;
1402 }
1403 memcpy(c, elem + copied, l);
1404 copied += l;
1405 if (copied == desc->elem_size)
1406 copied = 0;
1407 } else {
1408 memcpy(elem + copied, c, l);
1409 copied += l;
1410 if (copied == desc->elem_size) {
1411 err = desc->xcode(desc, elem);
1412 if (err)
1413 goto out;
1414 copied = 0;
1415 }
1416 }
1417 }
1418 if (avail_here) {
1419 kunmap(*ppages);
1420 ppages++;
1421 c = kmap(*ppages);
1422 }
1423
1424 avail_page = min(avail_here,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001425 (unsigned int) PAGE_SIZE);
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001426 }
1427 base = buf->page_len; /* align to start of tail */
1428 }
1429
1430 /* process tail */
1431 base -= buf->page_len;
1432 if (todo) {
1433 c = buf->tail->iov_base + base;
1434 if (copied) {
1435 unsigned int l = desc->elem_size - copied;
1436
1437 if (encode)
1438 memcpy(c, elem + copied, l);
1439 else {
1440 memcpy(elem + copied, c, l);
1441 err = desc->xcode(desc, elem);
1442 if (err)
1443 goto out;
1444 }
1445 todo -= l;
1446 c += l;
1447 }
1448 while (todo) {
1449 err = desc->xcode(desc, c);
1450 if (err)
1451 goto out;
1452 c += desc->elem_size;
1453 todo -= desc->elem_size;
1454 }
1455 }
1456 err = 0;
1457
1458out:
Jesper Juhla51482b2005-11-08 09:41:34 -08001459 kfree(elem);
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001460 if (ppages)
1461 kunmap(*ppages);
1462 return err;
1463}
1464
1465int
1466xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1467 struct xdr_array2_desc *desc)
1468{
1469 if (base >= buf->len)
1470 return -EINVAL;
1471
1472 return xdr_xcode_array2(buf, base, desc, 0);
1473}
Trond Myklebust468039e2008-12-23 15:21:31 -05001474EXPORT_SYMBOL_GPL(xdr_decode_array2);
Andreas Gruenbacherbd8100e2005-06-22 17:16:24 +00001475
1476int
1477xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1478 struct xdr_array2_desc *desc)
1479{
1480 if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
1481 buf->head->iov_len + buf->page_len + buf->tail->iov_len)
1482 return -EINVAL;
1483
1484 return xdr_xcode_array2(buf, base, desc, 1);
1485}
Trond Myklebust468039e2008-12-23 15:21:31 -05001486EXPORT_SYMBOL_GPL(xdr_encode_array2);
Olga Kornievskaia37a4e6c2006-12-04 20:22:33 -05001487
1488int
1489xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
YOSHIFUJI Hideakicca51722007-02-09 15:38:13 -08001490 int (*actor)(struct scatterlist *, void *), void *data)
Olga Kornievskaia37a4e6c2006-12-04 20:22:33 -05001491{
1492 int i, ret = 0;
Eric Dumazet95c96172012-04-15 05:58:06 +00001493 unsigned int page_len, thislen, page_offset;
Olga Kornievskaia37a4e6c2006-12-04 20:22:33 -05001494 struct scatterlist sg[1];
1495
Herbert Xu68e3f5d2007-10-27 00:52:07 -07001496 sg_init_table(sg, 1);
1497
Olga Kornievskaia37a4e6c2006-12-04 20:22:33 -05001498 if (offset >= buf->head[0].iov_len) {
1499 offset -= buf->head[0].iov_len;
1500 } else {
1501 thislen = buf->head[0].iov_len - offset;
1502 if (thislen > len)
1503 thislen = len;
1504 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
1505 ret = actor(sg, data);
1506 if (ret)
1507 goto out;
1508 offset = 0;
1509 len -= thislen;
1510 }
1511 if (len == 0)
1512 goto out;
1513
1514 if (offset >= buf->page_len) {
1515 offset -= buf->page_len;
1516 } else {
1517 page_len = buf->page_len - offset;
1518 if (page_len > len)
1519 page_len = len;
1520 len -= page_len;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001521 page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1);
1522 i = (offset + buf->page_base) >> PAGE_SHIFT;
1523 thislen = PAGE_SIZE - page_offset;
Olga Kornievskaia37a4e6c2006-12-04 20:22:33 -05001524 do {
1525 if (thislen > page_len)
1526 thislen = page_len;
Jens Axboe642f149032007-10-24 11:20:47 +02001527 sg_set_page(sg, buf->pages[i], thislen, page_offset);
Olga Kornievskaia37a4e6c2006-12-04 20:22:33 -05001528 ret = actor(sg, data);
1529 if (ret)
1530 goto out;
1531 page_len -= thislen;
1532 i++;
1533 page_offset = 0;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001534 thislen = PAGE_SIZE;
Olga Kornievskaia37a4e6c2006-12-04 20:22:33 -05001535 } while (page_len != 0);
1536 offset = 0;
1537 }
1538 if (len == 0)
1539 goto out;
1540 if (offset < buf->tail[0].iov_len) {
1541 thislen = buf->tail[0].iov_len - offset;
1542 if (thislen > len)
1543 thislen = len;
1544 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
1545 ret = actor(sg, data);
1546 len -= thislen;
1547 }
1548 if (len != 0)
1549 ret = -EINVAL;
1550out:
1551 return ret;
1552}
Trond Myklebust468039e2008-12-23 15:21:31 -05001553EXPORT_SYMBOL_GPL(xdr_process_buf);
Olga Kornievskaia37a4e6c2006-12-04 20:22:33 -05001554
Trond Myklebust5c741d42017-02-19 16:08:31 -05001555/**
Trond Myklebust0e779aa2018-03-20 17:03:05 -04001556 * xdr_stream_decode_opaque - Decode variable length opaque
1557 * @xdr: pointer to xdr_stream
1558 * @ptr: location to store opaque data
1559 * @size: size of storage buffer @ptr
1560 *
1561 * Return values:
1562 * On success, returns size of object stored in *@ptr
1563 * %-EBADMSG on XDR buffer overflow
1564 * %-EMSGSIZE on overflow of storage buffer @ptr
1565 */
1566ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr, size_t size)
1567{
1568 ssize_t ret;
1569 void *p;
1570
1571 ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
1572 if (ret <= 0)
1573 return ret;
1574 memcpy(ptr, p, ret);
1575 return ret;
1576}
1577EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque);
1578
1579/**
1580 * xdr_stream_decode_opaque_dup - Decode and duplicate variable length opaque
1581 * @xdr: pointer to xdr_stream
1582 * @ptr: location to store pointer to opaque data
1583 * @maxlen: maximum acceptable object size
1584 * @gfp_flags: GFP mask to use
1585 *
1586 * Return values:
1587 * On success, returns size of object stored in *@ptr
1588 * %-EBADMSG on XDR buffer overflow
1589 * %-EMSGSIZE if the size of the object would exceed @maxlen
1590 * %-ENOMEM on memory allocation failure
1591 */
1592ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr,
1593 size_t maxlen, gfp_t gfp_flags)
1594{
1595 ssize_t ret;
1596 void *p;
1597
1598 ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
1599 if (ret > 0) {
1600 *ptr = kmemdup(p, ret, gfp_flags);
1601 if (*ptr != NULL)
1602 return ret;
1603 ret = -ENOMEM;
1604 }
1605 *ptr = NULL;
1606 return ret;
1607}
1608EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque_dup);
1609
1610/**
1611 * xdr_stream_decode_string - Decode variable length string
1612 * @xdr: pointer to xdr_stream
1613 * @str: location to store string
1614 * @size: size of storage buffer @str
1615 *
1616 * Return values:
1617 * On success, returns length of NUL-terminated string stored in *@str
1618 * %-EBADMSG on XDR buffer overflow
1619 * %-EMSGSIZE on overflow of storage buffer @str
1620 */
1621ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str, size_t size)
1622{
1623 ssize_t ret;
1624 void *p;
1625
1626 ret = xdr_stream_decode_opaque_inline(xdr, &p, size);
1627 if (ret > 0) {
1628 memcpy(str, p, ret);
1629 str[ret] = '\0';
1630 return strlen(str);
1631 }
1632 *str = '\0';
1633 return ret;
1634}
1635EXPORT_SYMBOL_GPL(xdr_stream_decode_string);
1636
1637/**
Trond Myklebust5c741d42017-02-19 16:08:31 -05001638 * xdr_stream_decode_string_dup - Decode and duplicate variable length string
1639 * @xdr: pointer to xdr_stream
1640 * @str: location to store pointer to string
1641 * @maxlen: maximum acceptable string length
1642 * @gfp_flags: GFP mask to use
1643 *
1644 * Return values:
1645 * On success, returns length of NUL-terminated string stored in *@ptr
1646 * %-EBADMSG on XDR buffer overflow
1647 * %-EMSGSIZE if the size of the string would exceed @maxlen
1648 * %-ENOMEM on memory allocation failure
1649 */
1650ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str,
1651 size_t maxlen, gfp_t gfp_flags)
1652{
1653 void *p;
1654 ssize_t ret;
1655
1656 ret = xdr_stream_decode_opaque_inline(xdr, &p, maxlen);
1657 if (ret > 0) {
1658 char *s = kmalloc(ret + 1, gfp_flags);
1659 if (s != NULL) {
1660 memcpy(s, p, ret);
1661 s[ret] = '\0';
1662 *str = s;
1663 return strlen(s);
1664 }
1665 ret = -ENOMEM;
1666 }
1667 *str = NULL;
1668 return ret;
1669}
1670EXPORT_SYMBOL_GPL(xdr_stream_decode_string_dup);