blob: 7bf4871fec8006ac09c686fc88413256312bf255 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Herbert Xu7a7ffe62015-08-20 15:21:45 +08002/*
3 * Symmetric key cipher operations.
4 *
5 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
6 * multiple page boundaries by using temporary blocks. In user context,
7 * the kernel is given a chance to schedule us once per page.
8 *
9 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
Herbert Xu7a7ffe62015-08-20 15:21:45 +080010 */
11
Herbert Xub286d8b2016-11-22 20:08:12 +080012#include <crypto/internal/aead.h>
Ard Biesheuvel0eb76ba2020-12-11 13:27:15 +010013#include <crypto/internal/cipher.h>
Herbert Xu7a7ffe62015-08-20 15:21:45 +080014#include <crypto/internal/skcipher.h>
Herbert Xub286d8b2016-11-22 20:08:12 +080015#include <crypto/scatterwalk.h>
Herbert Xu7a7ffe62015-08-20 15:21:45 +080016#include <linux/bug.h>
Herbert Xu4e6c3df2016-07-12 13:17:31 +080017#include <linux/cryptouser.h>
Gideon Israel Dsouzad8c34b92016-12-31 21:26:23 +053018#include <linux/compiler.h>
Herbert Xub286d8b2016-11-22 20:08:12 +080019#include <linux/list.h>
Herbert Xu7a7ffe62015-08-20 15:21:45 +080020#include <linux/module.h>
Herbert Xu4e6c3df2016-07-12 13:17:31 +080021#include <linux/rtnetlink.h>
22#include <linux/seq_file.h>
23#include <net/netlink.h>
Herbert Xu7a7ffe62015-08-20 15:21:45 +080024
25#include "internal.h"
26
Herbert Xub286d8b2016-11-22 20:08:12 +080027enum {
28 SKCIPHER_WALK_PHYS = 1 << 0,
29 SKCIPHER_WALK_SLOW = 1 << 1,
30 SKCIPHER_WALK_COPY = 1 << 2,
31 SKCIPHER_WALK_DIFF = 1 << 3,
32 SKCIPHER_WALK_SLEEP = 1 << 4,
33};
34
35struct skcipher_walk_buffer {
36 struct list_head entry;
37 struct scatter_walk dst;
38 unsigned int len;
39 u8 *data;
40 u8 buffer[];
41};
42
43static int skcipher_walk_next(struct skcipher_walk *walk);
44
Herbert Xub286d8b2016-11-22 20:08:12 +080045static inline void skcipher_map_src(struct skcipher_walk *walk)
46{
Ard Biesheuveld07bd952023-01-02 11:18:46 +010047 walk->src.virt.addr = scatterwalk_map(&walk->in);
Herbert Xub286d8b2016-11-22 20:08:12 +080048}
49
50static inline void skcipher_map_dst(struct skcipher_walk *walk)
51{
Ard Biesheuveld07bd952023-01-02 11:18:46 +010052 walk->dst.virt.addr = scatterwalk_map(&walk->out);
Herbert Xub286d8b2016-11-22 20:08:12 +080053}
54
55static inline void skcipher_unmap_src(struct skcipher_walk *walk)
56{
Ard Biesheuveld07bd952023-01-02 11:18:46 +010057 scatterwalk_unmap(walk->src.virt.addr);
Herbert Xub286d8b2016-11-22 20:08:12 +080058}
59
60static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
61{
Ard Biesheuveld07bd952023-01-02 11:18:46 +010062 scatterwalk_unmap(walk->dst.virt.addr);
Herbert Xub286d8b2016-11-22 20:08:12 +080063}
64
65static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
66{
67 return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
68}
69
70/* Get a spot of the specified length that does not straddle a page.
71 * The caller needs to ensure that there is enough space for this operation.
72 */
73static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
74{
75 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
76
77 return max(start, end_page);
78}
79
Herbert Xu0ba3c022019-09-06 13:13:06 +100080static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
Herbert Xub286d8b2016-11-22 20:08:12 +080081{
82 u8 *addr;
83
84 addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
85 addr = skcipher_get_spot(addr, bsize);
86 scatterwalk_copychunks(addr, &walk->out, bsize,
87 (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
Herbert Xu0ba3c022019-09-06 13:13:06 +100088 return 0;
Herbert Xub286d8b2016-11-22 20:08:12 +080089}
90
91int skcipher_walk_done(struct skcipher_walk *walk, int err)
92{
Herbert Xu0ba3c022019-09-06 13:13:06 +100093 unsigned int n = walk->nbytes;
94 unsigned int nbytes = 0;
Herbert Xub286d8b2016-11-22 20:08:12 +080095
Herbert Xu0ba3c022019-09-06 13:13:06 +100096 if (!n)
Eric Biggers8088d3d2018-07-23 10:54:56 -070097 goto finish;
Herbert Xub286d8b2016-11-22 20:08:12 +080098
Herbert Xu0ba3c022019-09-06 13:13:06 +100099 if (likely(err >= 0)) {
100 n -= err;
101 nbytes = walk->total - n;
102 }
Eric Biggers8088d3d2018-07-23 10:54:56 -0700103
104 if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
105 SKCIPHER_WALK_SLOW |
106 SKCIPHER_WALK_COPY |
107 SKCIPHER_WALK_DIFF)))) {
Herbert Xub286d8b2016-11-22 20:08:12 +0800108unmap_src:
109 skcipher_unmap_src(walk);
110 } else if (walk->flags & SKCIPHER_WALK_DIFF) {
111 skcipher_unmap_dst(walk);
112 goto unmap_src;
113 } else if (walk->flags & SKCIPHER_WALK_COPY) {
114 skcipher_map_dst(walk);
115 memcpy(walk->dst.virt.addr, walk->page, n);
116 skcipher_unmap_dst(walk);
117 } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
Herbert Xu0ba3c022019-09-06 13:13:06 +1000118 if (err > 0) {
Eric Biggersdcaca012019-03-31 13:04:15 -0700119 /*
120 * Didn't process all bytes. Either the algorithm is
121 * broken, or this was the last step and it turned out
122 * the message wasn't evenly divisible into blocks but
123 * the algorithm requires it.
124 */
Herbert Xub286d8b2016-11-22 20:08:12 +0800125 err = -EINVAL;
Herbert Xu0ba3c022019-09-06 13:13:06 +1000126 nbytes = 0;
127 } else
128 n = skcipher_done_slow(walk, n);
Herbert Xub286d8b2016-11-22 20:08:12 +0800129 }
130
Herbert Xu0ba3c022019-09-06 13:13:06 +1000131 if (err > 0)
132 err = 0;
133
134 walk->total = nbytes;
135 walk->nbytes = 0;
136
Herbert Xub286d8b2016-11-22 20:08:12 +0800137 scatterwalk_advance(&walk->in, n);
138 scatterwalk_advance(&walk->out, n);
Herbert Xu0ba3c022019-09-06 13:13:06 +1000139 scatterwalk_done(&walk->in, 0, nbytes);
140 scatterwalk_done(&walk->out, 1, nbytes);
Herbert Xub286d8b2016-11-22 20:08:12 +0800141
Herbert Xu0ba3c022019-09-06 13:13:06 +1000142 if (nbytes) {
Herbert Xub286d8b2016-11-22 20:08:12 +0800143 crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
144 CRYPTO_TFM_REQ_MAY_SLEEP : 0);
145 return skcipher_walk_next(walk);
146 }
147
Herbert Xu0ba3c022019-09-06 13:13:06 +1000148finish:
Herbert Xub286d8b2016-11-22 20:08:12 +0800149 /* Short-circuit for the common/fast path. */
150 if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
151 goto out;
152
153 if (walk->flags & SKCIPHER_WALK_PHYS)
154 goto out;
155
156 if (walk->iv != walk->oiv)
157 memcpy(walk->oiv, walk->iv, walk->ivsize);
158 if (walk->buffer != walk->page)
159 kfree(walk->buffer);
160 if (walk->page)
161 free_page((unsigned long)walk->page);
162
163out:
164 return err;
165}
166EXPORT_SYMBOL_GPL(skcipher_walk_done);
167
168void skcipher_walk_complete(struct skcipher_walk *walk, int err)
169{
170 struct skcipher_walk_buffer *p, *tmp;
171
172 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
173 u8 *data;
174
175 if (err)
176 goto done;
177
178 data = p->data;
179 if (!data) {
180 data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000181 data = skcipher_get_spot(data, walk->stride);
Herbert Xub286d8b2016-11-22 20:08:12 +0800182 }
183
184 scatterwalk_copychunks(data, &p->dst, p->len, 1);
185
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000186 if (offset_in_page(p->data) + p->len + walk->stride >
Herbert Xub286d8b2016-11-22 20:08:12 +0800187 PAGE_SIZE)
188 free_page((unsigned long)p->data);
189
190done:
191 list_del(&p->entry);
192 kfree(p);
193 }
194
195 if (!err && walk->iv != walk->oiv)
196 memcpy(walk->oiv, walk->iv, walk->ivsize);
197 if (walk->buffer != walk->page)
198 kfree(walk->buffer);
199 if (walk->page)
200 free_page((unsigned long)walk->page);
201}
202EXPORT_SYMBOL_GPL(skcipher_walk_complete);
203
204static void skcipher_queue_write(struct skcipher_walk *walk,
205 struct skcipher_walk_buffer *p)
206{
207 p->dst = walk->out;
208 list_add_tail(&p->entry, &walk->buffers);
209}
210
211static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
212{
213 bool phys = walk->flags & SKCIPHER_WALK_PHYS;
214 unsigned alignmask = walk->alignmask;
215 struct skcipher_walk_buffer *p;
216 unsigned a;
217 unsigned n;
218 u8 *buffer;
219 void *v;
220
221 if (!phys) {
Ard Biesheuvel18e615a2016-12-13 13:34:02 +0000222 if (!walk->buffer)
223 walk->buffer = walk->page;
224 buffer = walk->buffer;
Herbert Xub286d8b2016-11-22 20:08:12 +0800225 if (buffer)
226 goto ok;
227 }
228
229 /* Start with the minimum alignment of kmalloc. */
230 a = crypto_tfm_ctx_alignment() - 1;
231 n = bsize;
232
233 if (phys) {
234 /* Calculate the minimum alignment of p->buffer. */
235 a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
236 n += sizeof(*p);
237 }
238
239 /* Minimum size to align p->buffer by alignmask. */
240 n += alignmask & ~a;
241
242 /* Minimum size to ensure p->buffer does not straddle a page. */
243 n += (bsize - 1) & ~(alignmask | a);
244
245 v = kzalloc(n, skcipher_walk_gfp(walk));
246 if (!v)
247 return skcipher_walk_done(walk, -ENOMEM);
248
249 if (phys) {
250 p = v;
251 p->len = bsize;
252 skcipher_queue_write(walk, p);
253 buffer = p->buffer;
254 } else {
255 walk->buffer = v;
256 buffer = v;
257 }
258
259ok:
260 walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
261 walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
262 walk->src.virt.addr = walk->dst.virt.addr;
263
264 scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
265
266 walk->nbytes = bsize;
267 walk->flags |= SKCIPHER_WALK_SLOW;
268
269 return 0;
270}
271
272static int skcipher_next_copy(struct skcipher_walk *walk)
273{
274 struct skcipher_walk_buffer *p;
275 u8 *tmp = walk->page;
276
277 skcipher_map_src(walk);
278 memcpy(tmp, walk->src.virt.addr, walk->nbytes);
279 skcipher_unmap_src(walk);
280
281 walk->src.virt.addr = tmp;
282 walk->dst.virt.addr = tmp;
283
284 if (!(walk->flags & SKCIPHER_WALK_PHYS))
285 return 0;
286
287 p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
288 if (!p)
289 return -ENOMEM;
290
291 p->data = walk->page;
292 p->len = walk->nbytes;
293 skcipher_queue_write(walk, p);
294
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000295 if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
Herbert Xub286d8b2016-11-22 20:08:12 +0800296 PAGE_SIZE)
297 walk->page = NULL;
298 else
299 walk->page += walk->nbytes;
300
301 return 0;
302}
303
304static int skcipher_next_fast(struct skcipher_walk *walk)
305{
306 unsigned long diff;
307
308 walk->src.phys.page = scatterwalk_page(&walk->in);
309 walk->src.phys.offset = offset_in_page(walk->in.offset);
310 walk->dst.phys.page = scatterwalk_page(&walk->out);
311 walk->dst.phys.offset = offset_in_page(walk->out.offset);
312
313 if (walk->flags & SKCIPHER_WALK_PHYS)
314 return 0;
315
316 diff = walk->src.phys.offset - walk->dst.phys.offset;
317 diff |= walk->src.virt.page - walk->dst.virt.page;
318
319 skcipher_map_src(walk);
320 walk->dst.virt.addr = walk->src.virt.addr;
321
322 if (diff) {
323 walk->flags |= SKCIPHER_WALK_DIFF;
324 skcipher_map_dst(walk);
325 }
326
327 return 0;
328}
329
330static int skcipher_walk_next(struct skcipher_walk *walk)
331{
332 unsigned int bsize;
333 unsigned int n;
334 int err;
335
336 walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
337 SKCIPHER_WALK_DIFF);
338
339 n = walk->total;
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000340 bsize = min(walk->stride, max(n, walk->blocksize));
Herbert Xub286d8b2016-11-22 20:08:12 +0800341 n = scatterwalk_clamp(&walk->in, n);
342 n = scatterwalk_clamp(&walk->out, n);
343
344 if (unlikely(n < bsize)) {
345 if (unlikely(walk->total < walk->blocksize))
346 return skcipher_walk_done(walk, -EINVAL);
347
348slow_path:
349 err = skcipher_next_slow(walk, bsize);
350 goto set_phys_lowmem;
351 }
352
353 if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
354 if (!walk->page) {
355 gfp_t gfp = skcipher_walk_gfp(walk);
356
357 walk->page = (void *)__get_free_page(gfp);
358 if (!walk->page)
359 goto slow_path;
360 }
361
362 walk->nbytes = min_t(unsigned, n,
363 PAGE_SIZE - offset_in_page(walk->page));
364 walk->flags |= SKCIPHER_WALK_COPY;
365 err = skcipher_next_copy(walk);
366 goto set_phys_lowmem;
367 }
368
369 walk->nbytes = n;
370
371 return skcipher_next_fast(walk);
372
373set_phys_lowmem:
374 if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
375 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
376 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
377 walk->src.phys.offset &= PAGE_SIZE - 1;
378 walk->dst.phys.offset &= PAGE_SIZE - 1;
379 }
380 return err;
381}
Herbert Xub286d8b2016-11-22 20:08:12 +0800382
383static int skcipher_copy_iv(struct skcipher_walk *walk)
384{
385 unsigned a = crypto_tfm_ctx_alignment() - 1;
386 unsigned alignmask = walk->alignmask;
387 unsigned ivsize = walk->ivsize;
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000388 unsigned bs = walk->stride;
Herbert Xub286d8b2016-11-22 20:08:12 +0800389 unsigned aligned_bs;
390 unsigned size;
391 u8 *iv;
392
Eric Biggers0567fc92018-07-23 09:57:50 -0700393 aligned_bs = ALIGN(bs, alignmask + 1);
Herbert Xub286d8b2016-11-22 20:08:12 +0800394
395 /* Minimum size to align buffer by alignmask. */
396 size = alignmask & ~a;
397
398 if (walk->flags & SKCIPHER_WALK_PHYS)
399 size += ivsize;
400 else {
401 size += aligned_bs + ivsize;
402
403 /* Minimum size to ensure buffer does not straddle a page. */
404 size += (bs - 1) & ~(alignmask | a);
405 }
406
407 walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
408 if (!walk->buffer)
409 return -ENOMEM;
410
411 iv = PTR_ALIGN(walk->buffer, alignmask + 1);
412 iv = skcipher_get_spot(iv, bs) + aligned_bs;
413
414 walk->iv = memcpy(iv, walk->iv, walk->ivsize);
415 return 0;
416}
417
418static int skcipher_walk_first(struct skcipher_walk *walk)
419{
Changbin Duabfc7fa2021-08-14 09:11:14 +0800420 if (WARN_ON_ONCE(in_hardirq()))
Herbert Xub286d8b2016-11-22 20:08:12 +0800421 return -EDEADLK;
422
Herbert Xub286d8b2016-11-22 20:08:12 +0800423 walk->buffer = NULL;
424 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
425 int err = skcipher_copy_iv(walk);
426 if (err)
427 return err;
428 }
429
430 walk->page = NULL;
Herbert Xub286d8b2016-11-22 20:08:12 +0800431
432 return skcipher_walk_next(walk);
433}
434
435static int skcipher_walk_skcipher(struct skcipher_walk *walk,
436 struct skcipher_request *req)
437{
438 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
439
Herbert Xu0cabf2a2017-10-07 11:29:48 +0800440 walk->total = req->cryptlen;
441 walk->nbytes = 0;
Eric Biggers2b4f27c2017-11-29 01:18:57 -0800442 walk->iv = req->iv;
443 walk->oiv = req->iv;
Herbert Xu0cabf2a2017-10-07 11:29:48 +0800444
445 if (unlikely(!walk->total))
446 return 0;
447
Herbert Xub286d8b2016-11-22 20:08:12 +0800448 scatterwalk_start(&walk->in, req->src);
449 scatterwalk_start(&walk->out, req->dst);
450
Herbert Xub286d8b2016-11-22 20:08:12 +0800451 walk->flags &= ~SKCIPHER_WALK_SLEEP;
452 walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
453 SKCIPHER_WALK_SLEEP : 0;
454
455 walk->blocksize = crypto_skcipher_blocksize(tfm);
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000456 walk->stride = crypto_skcipher_walksize(tfm);
Herbert Xub286d8b2016-11-22 20:08:12 +0800457 walk->ivsize = crypto_skcipher_ivsize(tfm);
458 walk->alignmask = crypto_skcipher_alignmask(tfm);
459
460 return skcipher_walk_first(walk);
461}
462
463int skcipher_walk_virt(struct skcipher_walk *walk,
464 struct skcipher_request *req, bool atomic)
465{
466 int err;
467
Eric Biggersbb648292018-12-15 12:41:53 -0800468 might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
469
Herbert Xub286d8b2016-11-22 20:08:12 +0800470 walk->flags &= ~SKCIPHER_WALK_PHYS;
471
472 err = skcipher_walk_skcipher(walk, req);
473
474 walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
475
476 return err;
477}
478EXPORT_SYMBOL_GPL(skcipher_walk_virt);
479
Herbert Xub286d8b2016-11-22 20:08:12 +0800480int skcipher_walk_async(struct skcipher_walk *walk,
481 struct skcipher_request *req)
482{
483 walk->flags |= SKCIPHER_WALK_PHYS;
484
485 INIT_LIST_HEAD(&walk->buffers);
486
487 return skcipher_walk_skcipher(walk, req);
488}
489EXPORT_SYMBOL_GPL(skcipher_walk_async);
490
Herbert Xu34bc0852016-11-30 21:14:07 +0800491static int skcipher_walk_aead_common(struct skcipher_walk *walk,
492 struct aead_request *req, bool atomic)
Herbert Xub286d8b2016-11-22 20:08:12 +0800493{
494 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
495 int err;
496
Herbert Xu0cabf2a2017-10-07 11:29:48 +0800497 walk->nbytes = 0;
Eric Biggers2b4f27c2017-11-29 01:18:57 -0800498 walk->iv = req->iv;
499 walk->oiv = req->iv;
Herbert Xu0cabf2a2017-10-07 11:29:48 +0800500
501 if (unlikely(!walk->total))
502 return 0;
503
Ard Biesheuvel3cbf61f2016-11-29 13:05:31 +0000504 walk->flags &= ~SKCIPHER_WALK_PHYS;
505
Herbert Xub286d8b2016-11-22 20:08:12 +0800506 scatterwalk_start(&walk->in, req->src);
507 scatterwalk_start(&walk->out, req->dst);
508
509 scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
510 scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
511
Ondrej Mosnáčekc14ca832017-11-23 13:49:06 +0100512 scatterwalk_done(&walk->in, 0, walk->total);
513 scatterwalk_done(&walk->out, 0, walk->total);
514
Herbert Xub286d8b2016-11-22 20:08:12 +0800515 if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
516 walk->flags |= SKCIPHER_WALK_SLEEP;
517 else
518 walk->flags &= ~SKCIPHER_WALK_SLEEP;
519
520 walk->blocksize = crypto_aead_blocksize(tfm);
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000521 walk->stride = crypto_aead_chunksize(tfm);
Herbert Xub286d8b2016-11-22 20:08:12 +0800522 walk->ivsize = crypto_aead_ivsize(tfm);
523 walk->alignmask = crypto_aead_alignmask(tfm);
524
525 err = skcipher_walk_first(walk);
526
527 if (atomic)
528 walk->flags &= ~SKCIPHER_WALK_SLEEP;
529
530 return err;
531}
Herbert Xu34bc0852016-11-30 21:14:07 +0800532
Herbert Xu34bc0852016-11-30 21:14:07 +0800533int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
534 struct aead_request *req, bool atomic)
535{
536 walk->total = req->cryptlen;
537
538 return skcipher_walk_aead_common(walk, req, atomic);
539}
540EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
541
542int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
543 struct aead_request *req, bool atomic)
544{
545 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
546
547 walk->total = req->cryptlen - crypto_aead_authsize(tfm);
548
549 return skcipher_walk_aead_common(walk, req, atomic);
550}
551EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
552
Eric Biggersb1f6b4b2019-01-06 18:47:43 -0800553static void skcipher_set_needkey(struct crypto_skcipher *tfm)
554{
Eric Biggers9ac0d132019-11-29 10:23:04 -0800555 if (crypto_skcipher_max_keysize(tfm) != 0)
Eric Biggersb1f6b4b2019-01-06 18:47:43 -0800556 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
557}
558
Herbert Xu9933e112017-05-10 03:48:23 +0800559static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
560 const u8 *key, unsigned int keylen)
561{
562 unsigned long alignmask = crypto_skcipher_alignmask(tfm);
563 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
564 u8 *buffer, *alignbuffer;
565 unsigned long absize;
566 int ret;
567
568 absize = keylen + alignmask;
569 buffer = kmalloc(absize, GFP_ATOMIC);
570 if (!buffer)
571 return -ENOMEM;
572
573 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
574 memcpy(alignbuffer, key, keylen);
575 ret = cipher->setkey(tfm, alignbuffer, keylen);
Waiman Long453431a2020-08-06 23:18:13 -0700576 kfree_sensitive(buffer);
Herbert Xu9933e112017-05-10 03:48:23 +0800577 return ret;
578}
579
Eric Biggers15252d92019-11-29 10:23:05 -0800580int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
Herbert Xu9933e112017-05-10 03:48:23 +0800581 unsigned int keylen)
582{
583 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
584 unsigned long alignmask = crypto_skcipher_alignmask(tfm);
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800585 int err;
Herbert Xu9933e112017-05-10 03:48:23 +0800586
Eric Biggers674f3682019-12-30 21:19:36 -0600587 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize)
Herbert Xu9933e112017-05-10 03:48:23 +0800588 return -EINVAL;
Herbert Xu9933e112017-05-10 03:48:23 +0800589
590 if ((unsigned long)key & alignmask)
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800591 err = skcipher_setkey_unaligned(tfm, key, keylen);
592 else
593 err = cipher->setkey(tfm, key, keylen);
Herbert Xu9933e112017-05-10 03:48:23 +0800594
Eric Biggersb1f6b4b2019-01-06 18:47:43 -0800595 if (unlikely(err)) {
596 skcipher_set_needkey(tfm);
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800597 return err;
Eric Biggersb1f6b4b2019-01-06 18:47:43 -0800598 }
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800599
600 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
601 return 0;
Herbert Xu9933e112017-05-10 03:48:23 +0800602}
Eric Biggers15252d92019-11-29 10:23:05 -0800603EXPORT_SYMBOL_GPL(crypto_skcipher_setkey);
Herbert Xu9933e112017-05-10 03:48:23 +0800604
Eric Biggers81bcbb12019-06-02 22:45:51 -0700605int crypto_skcipher_encrypt(struct skcipher_request *req)
606{
607 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
608 struct crypto_alg *alg = tfm->base.__crt_alg;
609 unsigned int cryptlen = req->cryptlen;
610 int ret;
611
612 crypto_stats_get(alg);
613 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
614 ret = -ENOKEY;
615 else
Eric Biggers848755e2019-11-29 10:23:06 -0800616 ret = crypto_skcipher_alg(tfm)->encrypt(req);
Eric Biggers81bcbb12019-06-02 22:45:51 -0700617 crypto_stats_skcipher_encrypt(cryptlen, ret, alg);
618 return ret;
619}
620EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
621
622int crypto_skcipher_decrypt(struct skcipher_request *req)
623{
624 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
625 struct crypto_alg *alg = tfm->base.__crt_alg;
626 unsigned int cryptlen = req->cryptlen;
627 int ret;
628
629 crypto_stats_get(alg);
630 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
631 ret = -ENOKEY;
632 else
Eric Biggers7e1c1092019-11-29 10:23:07 -0800633 ret = crypto_skcipher_alg(tfm)->decrypt(req);
Eric Biggers81bcbb12019-06-02 22:45:51 -0700634 crypto_stats_skcipher_decrypt(cryptlen, ret, alg);
635 return ret;
636}
637EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
638
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800639static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
640{
641 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
642 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
643
644 alg->exit(skcipher);
645}
646
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800647static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
648{
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800649 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
650 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
651
Eric Biggersb1f6b4b2019-01-06 18:47:43 -0800652 skcipher_set_needkey(skcipher);
Eric Biggersf8d33fa2018-01-03 11:16:29 -0800653
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800654 if (alg->exit)
655 skcipher->base.exit = crypto_skcipher_exit_tfm;
656
657 if (alg->init)
658 return alg->init(skcipher);
659
660 return 0;
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800661}
662
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800663static void crypto_skcipher_free_instance(struct crypto_instance *inst)
664{
665 struct skcipher_instance *skcipher =
666 container_of(inst, struct skcipher_instance, s.base);
667
668 skcipher->free(skcipher);
669}
670
671static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
Gideon Israel Dsouzad8c34b92016-12-31 21:26:23 +0530672 __maybe_unused;
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800673static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
674{
675 struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
676 base);
677
678 seq_printf(m, "type : skcipher\n");
679 seq_printf(m, "async : %s\n",
680 alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no");
681 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
682 seq_printf(m, "min keysize : %u\n", skcipher->min_keysize);
683 seq_printf(m, "max keysize : %u\n", skcipher->max_keysize);
684 seq_printf(m, "ivsize : %u\n", skcipher->ivsize);
685 seq_printf(m, "chunksize : %u\n", skcipher->chunksize);
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000686 seq_printf(m, "walksize : %u\n", skcipher->walksize);
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800687}
688
689#ifdef CONFIG_NET
690static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
691{
692 struct crypto_report_blkcipher rblkcipher;
693 struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg,
694 base);
695
Eric Biggers37db69e2018-11-03 14:56:03 -0700696 memset(&rblkcipher, 0, sizeof(rblkcipher));
697
698 strscpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
699 strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800700
701 rblkcipher.blocksize = alg->cra_blocksize;
702 rblkcipher.min_keysize = skcipher->min_keysize;
703 rblkcipher.max_keysize = skcipher->max_keysize;
704 rblkcipher.ivsize = skcipher->ivsize;
705
Eric Biggers37db69e2018-11-03 14:56:03 -0700706 return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
707 sizeof(rblkcipher), &rblkcipher);
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800708}
709#else
710static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
711{
712 return -ENOSYS;
713}
714#endif
715
Eric Biggers53253062019-10-25 12:41:11 -0700716static const struct crypto_type crypto_skcipher_type = {
Eric Biggers89873b42019-11-29 10:23:08 -0800717 .extsize = crypto_alg_extsize,
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800718 .init_tfm = crypto_skcipher_init_tfm,
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800719 .free = crypto_skcipher_free_instance,
720#ifdef CONFIG_PROC_FS
721 .show = crypto_skcipher_show,
722#endif
723 .report = crypto_skcipher_report,
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800724 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
Eric Biggersc65058b2019-10-25 12:41:12 -0700725 .maskset = CRYPTO_ALG_TYPE_MASK,
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800726 .type = CRYPTO_ALG_TYPE_SKCIPHER,
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800727 .tfmsize = offsetof(struct crypto_skcipher, base),
728};
729
Herbert Xu3a01d0e2016-07-12 13:17:50 +0800730int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
Eric Biggersb9f76dd2020-01-02 19:58:45 -0800731 struct crypto_instance *inst,
732 const char *name, u32 type, u32 mask)
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800733{
Eric Biggers53253062019-10-25 12:41:11 -0700734 spawn->base.frontend = &crypto_skcipher_type;
Eric Biggersde95c952020-01-02 19:58:48 -0800735 return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800736}
Herbert Xu3a01d0e2016-07-12 13:17:50 +0800737EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800738
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800739struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
740 u32 type, u32 mask)
741{
Eric Biggers53253062019-10-25 12:41:11 -0700742 return crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800743}
744EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
745
Kees Cookb350bee2018-09-18 19:10:38 -0700746struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
747 const char *alg_name, u32 type, u32 mask)
748{
749 struct crypto_skcipher *tfm;
750
751 /* Only sync algorithms allowed. */
Herbert Xue6cb02b2022-11-11 18:05:41 +0800752 mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE;
Kees Cookb350bee2018-09-18 19:10:38 -0700753
Eric Biggers53253062019-10-25 12:41:11 -0700754 tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
Kees Cookb350bee2018-09-18 19:10:38 -0700755
756 /*
757 * Make sure we do not allocate something that might get used with
758 * an on-stack request: check the request size.
759 */
760 if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) >
761 MAX_SYNC_SKCIPHER_REQSIZE)) {
762 crypto_free_skcipher(tfm);
763 return ERR_PTR(-EINVAL);
764 }
765
766 return (struct crypto_sync_skcipher *)tfm;
767}
768EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher);
769
Eric Biggersd3ca75a2019-10-25 12:41:09 -0700770int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask)
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800771{
Eric Biggers53253062019-10-25 12:41:11 -0700772 return crypto_type_has_alg(alg_name, &crypto_skcipher_type, type, mask);
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800773}
Eric Biggersd3ca75a2019-10-25 12:41:09 -0700774EXPORT_SYMBOL_GPL(crypto_has_skcipher);
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800775
776static int skcipher_prepare_alg(struct skcipher_alg *alg)
777{
778 struct crypto_alg *base = &alg->base;
779
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000780 if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
781 alg->walksize > PAGE_SIZE / 8)
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800782 return -EINVAL;
783
784 if (!alg->chunksize)
785 alg->chunksize = base->cra_blocksize;
Ard Biesheuvelc821f6a2016-12-29 14:09:08 +0000786 if (!alg->walksize)
787 alg->walksize = alg->chunksize;
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800788
Eric Biggers53253062019-10-25 12:41:11 -0700789 base->cra_type = &crypto_skcipher_type;
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800790 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
791 base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
792
793 return 0;
794}
795
796int crypto_register_skcipher(struct skcipher_alg *alg)
797{
798 struct crypto_alg *base = &alg->base;
799 int err;
800
801 err = skcipher_prepare_alg(alg);
802 if (err)
803 return err;
804
805 return crypto_register_alg(base);
806}
807EXPORT_SYMBOL_GPL(crypto_register_skcipher);
808
809void crypto_unregister_skcipher(struct skcipher_alg *alg)
810{
811 crypto_unregister_alg(&alg->base);
812}
813EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
814
815int crypto_register_skciphers(struct skcipher_alg *algs, int count)
816{
817 int i, ret;
818
819 for (i = 0; i < count; i++) {
820 ret = crypto_register_skcipher(&algs[i]);
821 if (ret)
822 goto err;
823 }
824
825 return 0;
826
827err:
828 for (--i; i >= 0; --i)
829 crypto_unregister_skcipher(&algs[i]);
830
831 return ret;
832}
833EXPORT_SYMBOL_GPL(crypto_register_skciphers);
834
835void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
836{
837 int i;
838
839 for (i = count - 1; i >= 0; --i)
840 crypto_unregister_skcipher(&algs[i]);
841}
842EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
843
844int skcipher_register_instance(struct crypto_template *tmpl,
845 struct skcipher_instance *inst)
846{
847 int err;
848
Eric Biggersd4fdc2d2020-01-02 20:04:40 -0800849 if (WARN_ON(!inst->free))
850 return -EINVAL;
851
Herbert Xu4e6c3df2016-07-12 13:17:31 +0800852 err = skcipher_prepare_alg(&inst->alg);
853 if (err)
854 return err;
855
856 return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
857}
858EXPORT_SYMBOL_GPL(skcipher_register_instance);
859
Eric Biggers0872da12019-01-03 20:16:14 -0800860static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key,
861 unsigned int keylen)
862{
863 struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
Eric Biggers0872da12019-01-03 20:16:14 -0800864
865 crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
866 crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) &
867 CRYPTO_TFM_REQ_MASK);
Eric Biggersaf5034e2019-12-30 21:19:38 -0600868 return crypto_cipher_setkey(cipher, key, keylen);
Eric Biggers0872da12019-01-03 20:16:14 -0800869}
870
871static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm)
872{
873 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
Eric Biggersd5ed3b62020-01-02 19:59:05 -0800874 struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst);
Eric Biggers0872da12019-01-03 20:16:14 -0800875 struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
876 struct crypto_cipher *cipher;
877
878 cipher = crypto_spawn_cipher(spawn);
879 if (IS_ERR(cipher))
880 return PTR_ERR(cipher);
881
882 ctx->cipher = cipher;
883 return 0;
884}
885
886static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm)
887{
888 struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
889
890 crypto_free_cipher(ctx->cipher);
891}
892
893static void skcipher_free_instance_simple(struct skcipher_instance *inst)
894{
Eric Biggersaacd5b42020-01-02 19:59:00 -0800895 crypto_drop_cipher(skcipher_instance_ctx(inst));
Eric Biggers0872da12019-01-03 20:16:14 -0800896 kfree(inst);
897}
898
899/**
900 * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode
901 *
902 * Allocate an skcipher_instance for a simple block cipher mode of operation,
903 * e.g. cbc or ecb. The instance context will have just a single crypto_spawn,
904 * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize,
905 * alignmask, and priority are set from the underlying cipher but can be
906 * overridden if needed. The tfm context defaults to skcipher_ctx_simple, and
907 * default ->setkey(), ->init(), and ->exit() methods are installed.
908 *
909 * @tmpl: the template being instantiated
910 * @tb: the template parameters
Eric Biggers0872da12019-01-03 20:16:14 -0800911 *
912 * Return: a pointer to the new instance, or an ERR_PTR(). The caller still
913 * needs to register the instance.
914 */
Herbert Xub3c16bf2019-12-20 13:29:40 +0800915struct skcipher_instance *skcipher_alloc_instance_simple(
916 struct crypto_template *tmpl, struct rtattr **tb)
Eric Biggers0872da12019-01-03 20:16:14 -0800917{
Eric Biggers0872da12019-01-03 20:16:14 -0800918 u32 mask;
Eric Biggersaacd5b42020-01-02 19:59:00 -0800919 struct skcipher_instance *inst;
920 struct crypto_cipher_spawn *spawn;
921 struct crypto_alg *cipher_alg;
Eric Biggers0872da12019-01-03 20:16:14 -0800922 int err;
923
Eric Biggers7bcb2c92020-07-09 23:20:38 -0700924 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
925 if (err)
926 return ERR_PTR(err);
Eric Biggers0872da12019-01-03 20:16:14 -0800927
928 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
Eric Biggersaacd5b42020-01-02 19:59:00 -0800929 if (!inst)
930 return ERR_PTR(-ENOMEM);
Eric Biggers0872da12019-01-03 20:16:14 -0800931 spawn = skcipher_instance_ctx(inst);
932
Eric Biggersaacd5b42020-01-02 19:59:00 -0800933 err = crypto_grab_cipher(spawn, skcipher_crypto_instance(inst),
934 crypto_attr_alg_name(tb[1]), 0, mask);
935 if (err)
936 goto err_free_inst;
937 cipher_alg = crypto_spawn_cipher_alg(spawn);
938
Eric Biggers0872da12019-01-03 20:16:14 -0800939 err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name,
940 cipher_alg);
941 if (err)
942 goto err_free_inst;
943
Eric Biggers0872da12019-01-03 20:16:14 -0800944 inst->free = skcipher_free_instance_simple;
945
946 /* Default algorithm properties, can be overridden */
947 inst->alg.base.cra_blocksize = cipher_alg->cra_blocksize;
948 inst->alg.base.cra_alignmask = cipher_alg->cra_alignmask;
949 inst->alg.base.cra_priority = cipher_alg->cra_priority;
950 inst->alg.min_keysize = cipher_alg->cra_cipher.cia_min_keysize;
951 inst->alg.max_keysize = cipher_alg->cra_cipher.cia_max_keysize;
952 inst->alg.ivsize = cipher_alg->cra_blocksize;
953
954 /* Use skcipher_ctx_simple by default, can be overridden */
955 inst->alg.base.cra_ctxsize = sizeof(struct skcipher_ctx_simple);
956 inst->alg.setkey = skcipher_setkey_simple;
957 inst->alg.init = skcipher_init_tfm_simple;
958 inst->alg.exit = skcipher_exit_tfm_simple;
959
Eric Biggers0872da12019-01-03 20:16:14 -0800960 return inst;
961
962err_free_inst:
Eric Biggersaacd5b42020-01-02 19:59:00 -0800963 skcipher_free_instance_simple(inst);
Eric Biggers0872da12019-01-03 20:16:14 -0800964 return ERR_PTR(err);
965}
966EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple);
967
Herbert Xu7a7ffe62015-08-20 15:21:45 +0800968MODULE_LICENSE("GPL");
969MODULE_DESCRIPTION("Symmetric key cipher type");
Ard Biesheuvel0eb76ba2020-12-11 13:27:15 +0100970MODULE_IMPORT_NS(CRYPTO_INTERNAL);