blob: 4c799df3e8838bd585695a0a1ed967d43bf735d9 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Javier Martin5de88752013-03-01 12:37:53 +01002/*
3 * Cryptographic API.
4 *
5 * Support for SAHARA cryptographic accelerator.
6 *
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01007 * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
Javier Martin5de88752013-03-01 12:37:53 +01008 * Copyright (c) 2013 Vista Silicon S.L.
9 * Author: Javier Martin <javier.martin@vista-silicon.com>
10 *
Javier Martin5de88752013-03-01 12:37:53 +010011 * Based on omap-aes.c and tegra-aes.c
12 */
13
Javier Martin5de88752013-03-01 12:37:53 +010014#include <crypto/aes.h>
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +010015#include <crypto/internal/hash.h>
Herbert Xu678adec2016-06-29 18:04:05 +080016#include <crypto/internal/skcipher.h>
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +010017#include <crypto/scatterwalk.h>
Eric Biggersa24d22b2020-11-12 21:20:21 -080018#include <crypto/sha1.h>
19#include <crypto/sha2.h>
Javier Martin5de88752013-03-01 12:37:53 +010020
21#include <linux/clk.h>
Herbert Xu0c3dc782020-08-19 21:58:20 +100022#include <linux/dma-mapping.h>
Javier Martin5de88752013-03-01 12:37:53 +010023#include <linux/interrupt.h>
24#include <linux/io.h>
25#include <linux/irq.h>
26#include <linux/kernel.h>
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +010027#include <linux/kthread.h>
Javier Martin5de88752013-03-01 12:37:53 +010028#include <linux/module.h>
29#include <linux/of.h>
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +010030#include <linux/of_device.h>
Javier Martin5de88752013-03-01 12:37:53 +010031#include <linux/platform_device.h>
Zhengchao Shao108586e2022-07-25 12:09:28 +080032#include <linux/spinlock.h>
Javier Martin5de88752013-03-01 12:37:53 +010033
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +010034#define SHA_BUFFER_LEN PAGE_SIZE
35#define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
36
Javier Martin5de88752013-03-01 12:37:53 +010037#define SAHARA_NAME "sahara"
38#define SAHARA_VERSION_3 3
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +010039#define SAHARA_VERSION_4 4
Javier Martin5de88752013-03-01 12:37:53 +010040#define SAHARA_TIMEOUT_MS 1000
41#define SAHARA_MAX_HW_DESC 2
42#define SAHARA_MAX_HW_LINK 20
43
44#define FLAGS_MODE_MASK 0x000f
45#define FLAGS_ENCRYPT BIT(0)
46#define FLAGS_CBC BIT(1)
47#define FLAGS_NEW_KEY BIT(3)
Javier Martin5de88752013-03-01 12:37:53 +010048
49#define SAHARA_HDR_BASE 0x00800000
50#define SAHARA_HDR_SKHA_ALG_AES 0
51#define SAHARA_HDR_SKHA_OP_ENC (1 << 2)
52#define SAHARA_HDR_SKHA_MODE_ECB (0 << 3)
53#define SAHARA_HDR_SKHA_MODE_CBC (1 << 3)
54#define SAHARA_HDR_FORM_DATA (5 << 16)
55#define SAHARA_HDR_FORM_KEY (8 << 16)
56#define SAHARA_HDR_LLO (1 << 24)
57#define SAHARA_HDR_CHA_SKHA (1 << 28)
58#define SAHARA_HDR_CHA_MDHA (2 << 28)
59#define SAHARA_HDR_PARITY_BIT (1 << 31)
60
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +010061#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
62#define SAHARA_HDR_MDHA_SET_MODE_HASH 0x208D0000
63#define SAHARA_HDR_MDHA_HASH 0xA0850000
64#define SAHARA_HDR_MDHA_STORE_DIGEST 0x20820000
65#define SAHARA_HDR_MDHA_ALG_SHA1 0
66#define SAHARA_HDR_MDHA_ALG_MD5 1
67#define SAHARA_HDR_MDHA_ALG_SHA256 2
68#define SAHARA_HDR_MDHA_ALG_SHA224 3
69#define SAHARA_HDR_MDHA_PDATA (1 << 2)
70#define SAHARA_HDR_MDHA_HMAC (1 << 3)
71#define SAHARA_HDR_MDHA_INIT (1 << 5)
72#define SAHARA_HDR_MDHA_IPAD (1 << 6)
73#define SAHARA_HDR_MDHA_OPAD (1 << 7)
74#define SAHARA_HDR_MDHA_SWAP (1 << 8)
75#define SAHARA_HDR_MDHA_MAC_FULL (1 << 9)
76#define SAHARA_HDR_MDHA_SSL (1 << 10)
77
Javier Martin5de88752013-03-01 12:37:53 +010078/* SAHARA can only process one request at a time */
79#define SAHARA_QUEUE_LENGTH 1
80
81#define SAHARA_REG_VERSION 0x00
82#define SAHARA_REG_DAR 0x04
83#define SAHARA_REG_CONTROL 0x08
84#define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24)
85#define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16)
86#define SAHARA_CONTROL_RNG_AUTORSD (1 << 7)
87#define SAHARA_CONTROL_ENABLE_INT (1 << 4)
88#define SAHARA_REG_CMD 0x0C
89#define SAHARA_CMD_RESET (1 << 0)
90#define SAHARA_CMD_CLEAR_INT (1 << 8)
91#define SAHARA_CMD_CLEAR_ERR (1 << 9)
92#define SAHARA_CMD_SINGLE_STEP (1 << 10)
93#define SAHARA_CMD_MODE_BATCH (1 << 16)
94#define SAHARA_CMD_MODE_DEBUG (1 << 18)
95#define SAHARA_REG_STATUS 0x10
96#define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7)
97#define SAHARA_STATE_IDLE 0
98#define SAHARA_STATE_BUSY 1
99#define SAHARA_STATE_ERR 2
100#define SAHARA_STATE_FAULT 3
101#define SAHARA_STATE_COMPLETE 4
102#define SAHARA_STATE_COMP_FLAG (1 << 2)
103#define SAHARA_STATUS_DAR_FULL (1 << 3)
104#define SAHARA_STATUS_ERROR (1 << 4)
105#define SAHARA_STATUS_SECURE (1 << 5)
106#define SAHARA_STATUS_FAIL (1 << 6)
107#define SAHARA_STATUS_INIT (1 << 7)
108#define SAHARA_STATUS_RNG_RESEED (1 << 8)
109#define SAHARA_STATUS_ACTIVE_RNG (1 << 9)
110#define SAHARA_STATUS_ACTIVE_MDHA (1 << 10)
111#define SAHARA_STATUS_ACTIVE_SKHA (1 << 11)
112#define SAHARA_STATUS_MODE_BATCH (1 << 16)
113#define SAHARA_STATUS_MODE_DEDICATED (1 << 17)
114#define SAHARA_STATUS_MODE_DEBUG (1 << 18)
115#define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff)
116#define SAHARA_REG_ERRSTATUS 0x14
117#define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf)
118#define SAHARA_ERRSOURCE_CHA 14
119#define SAHARA_ERRSOURCE_DMA 15
120#define SAHARA_ERRSTATUS_DMA_DIR (1 << 8)
121#define SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
122#define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
123#define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff)
124#define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3)
125#define SAHARA_REG_FADDR 0x18
126#define SAHARA_REG_CDAR 0x1C
127#define SAHARA_REG_IDAR 0x20
128
129struct sahara_hw_desc {
Arnd Bergmann75d3f812015-12-08 16:23:51 +0100130 u32 hdr;
131 u32 len1;
132 u32 p1;
133 u32 len2;
134 u32 p2;
135 u32 next;
Javier Martin5de88752013-03-01 12:37:53 +0100136};
137
138struct sahara_hw_link {
Arnd Bergmann75d3f812015-12-08 16:23:51 +0100139 u32 len;
140 u32 p;
141 u32 next;
Javier Martin5de88752013-03-01 12:37:53 +0100142};
143
144struct sahara_ctx {
Javier Martin5de88752013-03-01 12:37:53 +0100145 unsigned long flags;
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100146
147 /* AES-specific context */
Javier Martin5de88752013-03-01 12:37:53 +0100148 int keylen;
149 u8 key[AES_KEYSIZE_128];
Ard Biesheuvel56ca4992020-07-07 09:32:02 +0300150 struct crypto_skcipher *fallback;
Javier Martin5de88752013-03-01 12:37:53 +0100151};
152
153struct sahara_aes_reqctx {
154 unsigned long mode;
Ard Biesheuvel56ca4992020-07-07 09:32:02 +0300155 struct skcipher_request fallback_req; // keep at the end
Javier Martin5de88752013-03-01 12:37:53 +0100156};
157
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100158/*
159 * struct sahara_sha_reqctx - private data per request
160 * @buf: holds data for requests smaller than block_size
161 * @rembuf: used to prepare one block_size-aligned request
162 * @context: hw-specific context for request. Digest is extracted from this
163 * @mode: specifies what type of hw-descriptor needs to be built
164 * @digest_size: length of digest for this request
165 * @context_size: length of hw-context for this request.
166 * Always digest_size + 4
167 * @buf_cnt: number of bytes saved in buf
168 * @sg_in_idx: number of hw links
169 * @in_sg: scatterlist for input data
170 * @in_sg_chain: scatterlists for chained input data
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100171 * @total: total number of bytes for transfer
172 * @last: is this the last block
173 * @first: is this the first block
174 * @active: inside a transfer
175 */
176struct sahara_sha_reqctx {
177 u8 buf[SAHARA_MAX_SHA_BLOCK_SIZE];
178 u8 rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
179 u8 context[SHA256_DIGEST_SIZE + 4];
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100180 unsigned int mode;
181 unsigned int digest_size;
182 unsigned int context_size;
183 unsigned int buf_cnt;
184 unsigned int sg_in_idx;
185 struct scatterlist *in_sg;
186 struct scatterlist in_sg_chain[2];
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100187 size_t total;
188 unsigned int last;
189 unsigned int first;
190 unsigned int active;
191};
192
Javier Martin5de88752013-03-01 12:37:53 +0100193struct sahara_dev {
194 struct device *device;
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +0100195 unsigned int version;
Javier Martin5de88752013-03-01 12:37:53 +0100196 void __iomem *regs_base;
197 struct clk *clk_ipg;
198 struct clk *clk_ahb;
Zhengchao Shao108586e2022-07-25 12:09:28 +0800199 spinlock_t queue_spinlock;
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100200 struct task_struct *kthread;
201 struct completion dma_completion;
Javier Martin5de88752013-03-01 12:37:53 +0100202
203 struct sahara_ctx *ctx;
Javier Martin5de88752013-03-01 12:37:53 +0100204 struct crypto_queue queue;
205 unsigned long flags;
206
Javier Martin5de88752013-03-01 12:37:53 +0100207 struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC];
208 dma_addr_t hw_phys_desc[SAHARA_MAX_HW_DESC];
209
210 u8 *key_base;
211 dma_addr_t key_phys_base;
212
213 u8 *iv_base;
214 dma_addr_t iv_phys_base;
215
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100216 u8 *context_base;
217 dma_addr_t context_phys_base;
218
Javier Martin5de88752013-03-01 12:37:53 +0100219 struct sahara_hw_link *hw_link[SAHARA_MAX_HW_LINK];
220 dma_addr_t hw_phys_link[SAHARA_MAX_HW_LINK];
221
Javier Martin5de88752013-03-01 12:37:53 +0100222 size_t total;
223 struct scatterlist *in_sg;
LABBE Corentinf8e28a02015-11-19 13:38:17 +0100224 int nb_in_sg;
Javier Martin5de88752013-03-01 12:37:53 +0100225 struct scatterlist *out_sg;
LABBE Corentinf8e28a02015-11-19 13:38:17 +0100226 int nb_out_sg;
Javier Martin5de88752013-03-01 12:37:53 +0100227
228 u32 error;
Javier Martin5de88752013-03-01 12:37:53 +0100229};
230
231static struct sahara_dev *dev_ptr;
232
233static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
234{
235 writel(data, dev->regs_base + reg);
236}
237
238static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
239{
240 return readl(dev->regs_base + reg);
241}
242
243static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
244{
245 u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
246 SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
247 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
248
249 if (dev->flags & FLAGS_CBC) {
250 hdr |= SAHARA_HDR_SKHA_MODE_CBC;
251 hdr ^= SAHARA_HDR_PARITY_BIT;
252 }
253
254 if (dev->flags & FLAGS_ENCRYPT) {
255 hdr |= SAHARA_HDR_SKHA_OP_ENC;
256 hdr ^= SAHARA_HDR_PARITY_BIT;
257 }
258
259 return hdr;
260}
261
262static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
263{
264 return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
265 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
266}
267
LABBE Corentincac367b2015-10-14 21:14:19 +0200268static const char *sahara_err_src[16] = {
Javier Martin5de88752013-03-01 12:37:53 +0100269 "No error",
270 "Header error",
271 "Descriptor length error",
272 "Descriptor length or pointer error",
273 "Link length error",
274 "Link pointer error",
275 "Input buffer error",
276 "Output buffer error",
277 "Output buffer starvation",
278 "Internal state fault",
279 "General descriptor problem",
280 "Reserved",
281 "Descriptor address error",
282 "Link address error",
283 "CHA error",
284 "DMA error"
285};
286
LABBE Corentincac367b2015-10-14 21:14:19 +0200287static const char *sahara_err_dmasize[4] = {
Javier Martin5de88752013-03-01 12:37:53 +0100288 "Byte transfer",
289 "Half-word transfer",
290 "Word transfer",
291 "Reserved"
292};
293
LABBE Corentincac367b2015-10-14 21:14:19 +0200294static const char *sahara_err_dmasrc[8] = {
Javier Martin5de88752013-03-01 12:37:53 +0100295 "No error",
296 "AHB bus error",
297 "Internal IP bus error",
298 "Parity error",
299 "DMA crosses 256 byte boundary",
300 "DMA is busy",
301 "Reserved",
302 "DMA HW error"
303};
304
LABBE Corentincac367b2015-10-14 21:14:19 +0200305static const char *sahara_cha_errsrc[12] = {
Javier Martin5de88752013-03-01 12:37:53 +0100306 "Input buffer non-empty",
307 "Illegal address",
308 "Illegal mode",
309 "Illegal data size",
310 "Illegal key size",
311 "Write during processing",
312 "CTX read during processing",
313 "HW error",
314 "Input buffer disabled/underflow",
315 "Output buffer disabled/overflow",
316 "DES key parity error",
317 "Reserved"
318};
319
LABBE Corentincac367b2015-10-14 21:14:19 +0200320static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
Javier Martin5de88752013-03-01 12:37:53 +0100321
322static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
323{
324 u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
325 u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
326
327 dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
328
329 dev_err(dev->device, " - %s.\n", sahara_err_src[source]);
330
331 if (source == SAHARA_ERRSOURCE_DMA) {
332 if (error & SAHARA_ERRSTATUS_DMA_DIR)
333 dev_err(dev->device, " * DMA read.\n");
334 else
335 dev_err(dev->device, " * DMA write.\n");
336
337 dev_err(dev->device, " * %s.\n",
338 sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
339 dev_err(dev->device, " * %s.\n",
340 sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
341 } else if (source == SAHARA_ERRSOURCE_CHA) {
342 dev_err(dev->device, " * %s.\n",
343 sahara_cha_errsrc[chasrc]);
344 dev_err(dev->device, " * %s.\n",
345 sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
346 }
347 dev_err(dev->device, "\n");
348}
349
LABBE Corentincac367b2015-10-14 21:14:19 +0200350static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
Javier Martin5de88752013-03-01 12:37:53 +0100351
352static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
353{
354 u8 state;
355
Joe Perches222f6b82019-04-09 09:33:13 -0700356 if (!__is_defined(DEBUG))
Javier Martin5de88752013-03-01 12:37:53 +0100357 return;
358
359 state = SAHARA_STATUS_GET_STATE(status);
360
361 dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
362 __func__, status);
363
364 dev_dbg(dev->device, " - State = %d:\n", state);
365 if (state & SAHARA_STATE_COMP_FLAG)
366 dev_dbg(dev->device, " * Descriptor completed. IRQ pending.\n");
367
368 dev_dbg(dev->device, " * %s.\n",
369 sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
370
371 if (status & SAHARA_STATUS_DAR_FULL)
372 dev_dbg(dev->device, " - DAR Full.\n");
373 if (status & SAHARA_STATUS_ERROR)
374 dev_dbg(dev->device, " - Error.\n");
375 if (status & SAHARA_STATUS_SECURE)
376 dev_dbg(dev->device, " - Secure.\n");
377 if (status & SAHARA_STATUS_FAIL)
378 dev_dbg(dev->device, " - Fail.\n");
379 if (status & SAHARA_STATUS_RNG_RESEED)
380 dev_dbg(dev->device, " - RNG Reseed Request.\n");
381 if (status & SAHARA_STATUS_ACTIVE_RNG)
382 dev_dbg(dev->device, " - RNG Active.\n");
383 if (status & SAHARA_STATUS_ACTIVE_MDHA)
384 dev_dbg(dev->device, " - MDHA Active.\n");
385 if (status & SAHARA_STATUS_ACTIVE_SKHA)
386 dev_dbg(dev->device, " - SKHA Active.\n");
387
388 if (status & SAHARA_STATUS_MODE_BATCH)
389 dev_dbg(dev->device, " - Batch Mode.\n");
390 else if (status & SAHARA_STATUS_MODE_DEDICATED)
Colin Ian King9ae811f2016-10-25 12:07:27 +0100391 dev_dbg(dev->device, " - Dedicated Mode.\n");
Javier Martin5de88752013-03-01 12:37:53 +0100392 else if (status & SAHARA_STATUS_MODE_DEBUG)
393 dev_dbg(dev->device, " - Debug Mode.\n");
394
395 dev_dbg(dev->device, " - Internal state = 0x%02x\n",
396 SAHARA_STATUS_GET_ISTATE(status));
397
398 dev_dbg(dev->device, "Current DAR: 0x%08x\n",
399 sahara_read(dev, SAHARA_REG_CDAR));
400 dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
401 sahara_read(dev, SAHARA_REG_IDAR));
402}
403
404static void sahara_dump_descriptors(struct sahara_dev *dev)
405{
406 int i;
407
Joe Perches222f6b82019-04-09 09:33:13 -0700408 if (!__is_defined(DEBUG))
Javier Martin5de88752013-03-01 12:37:53 +0100409 return;
410
411 for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
Arnd Bergmannd4b98f22015-12-08 16:24:22 +0100412 dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
413 i, &dev->hw_phys_desc[i]);
Javier Martin5de88752013-03-01 12:37:53 +0100414 dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
415 dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
416 dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
417 dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
418 dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
419 dev_dbg(dev->device, "\tnext = 0x%08x\n",
420 dev->hw_desc[i]->next);
421 }
422 dev_dbg(dev->device, "\n");
423}
424
425static void sahara_dump_links(struct sahara_dev *dev)
426{
427 int i;
428
Joe Perches222f6b82019-04-09 09:33:13 -0700429 if (!__is_defined(DEBUG))
Javier Martin5de88752013-03-01 12:37:53 +0100430 return;
431
432 for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
Arnd Bergmannd4b98f22015-12-08 16:24:22 +0100433 dev_dbg(dev->device, "Link (%d) (%pad):\n",
434 i, &dev->hw_phys_link[i]);
Javier Martin5de88752013-03-01 12:37:53 +0100435 dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
436 dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
437 dev_dbg(dev->device, "\tnext = 0x%08x\n",
438 dev->hw_link[i]->next);
439 }
440 dev_dbg(dev->device, "\n");
441}
442
Javier Martin5de88752013-03-01 12:37:53 +0100443static int sahara_hw_descriptor_create(struct sahara_dev *dev)
444{
445 struct sahara_ctx *ctx = dev->ctx;
446 struct scatterlist *sg;
447 int ret;
448 int i, j;
Steffen Trumtrar17110452015-04-07 17:13:42 +0200449 int idx = 0;
Javier Martin5de88752013-03-01 12:37:53 +0100450
451 /* Copy new key if necessary */
452 if (ctx->flags & FLAGS_NEW_KEY) {
453 memcpy(dev->key_base, ctx->key, ctx->keylen);
454 ctx->flags &= ~FLAGS_NEW_KEY;
455
456 if (dev->flags & FLAGS_CBC) {
Steffen Trumtrar17110452015-04-07 17:13:42 +0200457 dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
458 dev->hw_desc[idx]->p1 = dev->iv_phys_base;
Javier Martin5de88752013-03-01 12:37:53 +0100459 } else {
Steffen Trumtrar17110452015-04-07 17:13:42 +0200460 dev->hw_desc[idx]->len1 = 0;
461 dev->hw_desc[idx]->p1 = 0;
Javier Martin5de88752013-03-01 12:37:53 +0100462 }
Steffen Trumtrar17110452015-04-07 17:13:42 +0200463 dev->hw_desc[idx]->len2 = ctx->keylen;
464 dev->hw_desc[idx]->p2 = dev->key_phys_base;
465 dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
466
467 dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
468
469 idx++;
Javier Martin5de88752013-03-01 12:37:53 +0100470 }
Javier Martin5de88752013-03-01 12:37:53 +0100471
LABBE Corentind23afa12015-09-18 14:57:11 +0200472 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
LABBE Corentin6c2b74d2015-11-04 21:13:35 +0100473 if (dev->nb_in_sg < 0) {
474 dev_err(dev->device, "Invalid numbers of src SG.\n");
475 return dev->nb_in_sg;
476 }
LABBE Corentind23afa12015-09-18 14:57:11 +0200477 dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
LABBE Corentin6c2b74d2015-11-04 21:13:35 +0100478 if (dev->nb_out_sg < 0) {
479 dev_err(dev->device, "Invalid numbers of dst SG.\n");
480 return dev->nb_out_sg;
481 }
Javier Martin5de88752013-03-01 12:37:53 +0100482 if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
483 dev_err(dev->device, "not enough hw links (%d)\n",
484 dev->nb_in_sg + dev->nb_out_sg);
485 return -EINVAL;
486 }
487
488 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
489 DMA_TO_DEVICE);
Jack Wang66f0b6b2022-08-19 08:07:50 +0200490 if (!ret) {
Javier Martin5de88752013-03-01 12:37:53 +0100491 dev_err(dev->device, "couldn't map in sg\n");
492 goto unmap_in;
493 }
494 ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
495 DMA_FROM_DEVICE);
Jack Wang66f0b6b2022-08-19 08:07:50 +0200496 if (!ret) {
Javier Martin5de88752013-03-01 12:37:53 +0100497 dev_err(dev->device, "couldn't map out sg\n");
498 goto unmap_out;
499 }
500
501 /* Create input links */
Steffen Trumtrar17110452015-04-07 17:13:42 +0200502 dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
Javier Martin5de88752013-03-01 12:37:53 +0100503 sg = dev->in_sg;
504 for (i = 0; i < dev->nb_in_sg; i++) {
505 dev->hw_link[i]->len = sg->length;
506 dev->hw_link[i]->p = sg->dma_address;
507 if (i == (dev->nb_in_sg - 1)) {
508 dev->hw_link[i]->next = 0;
509 } else {
510 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
511 sg = sg_next(sg);
512 }
513 }
514
515 /* Create output links */
Steffen Trumtrar17110452015-04-07 17:13:42 +0200516 dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
Javier Martin5de88752013-03-01 12:37:53 +0100517 sg = dev->out_sg;
518 for (j = i; j < dev->nb_out_sg + i; j++) {
519 dev->hw_link[j]->len = sg->length;
520 dev->hw_link[j]->p = sg->dma_address;
521 if (j == (dev->nb_out_sg + i - 1)) {
522 dev->hw_link[j]->next = 0;
523 } else {
524 dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
525 sg = sg_next(sg);
526 }
527 }
528
529 /* Fill remaining fields of hw_desc[1] */
Steffen Trumtrar17110452015-04-07 17:13:42 +0200530 dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
531 dev->hw_desc[idx]->len1 = dev->total;
532 dev->hw_desc[idx]->len2 = dev->total;
533 dev->hw_desc[idx]->next = 0;
Javier Martin5de88752013-03-01 12:37:53 +0100534
535 sahara_dump_descriptors(dev);
536 sahara_dump_links(dev);
537
Javier Martin5de88752013-03-01 12:37:53 +0100538 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
539
540 return 0;
541
542unmap_out:
543 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
Mogens Lauridsen1e320412017-08-03 15:34:12 +0200544 DMA_FROM_DEVICE);
Javier Martin5de88752013-03-01 12:37:53 +0100545unmap_in:
546 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
Mogens Lauridsen1e320412017-08-03 15:34:12 +0200547 DMA_TO_DEVICE);
Javier Martin5de88752013-03-01 12:37:53 +0100548
549 return -EINVAL;
550}
551
Ard Biesheuvel44c10a832019-11-09 18:09:43 +0100552static int sahara_aes_process(struct skcipher_request *req)
Javier Martin5de88752013-03-01 12:37:53 +0100553{
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100554 struct sahara_dev *dev = dev_ptr;
Javier Martin5de88752013-03-01 12:37:53 +0100555 struct sahara_ctx *ctx;
556 struct sahara_aes_reqctx *rctx;
Javier Martin5de88752013-03-01 12:37:53 +0100557 int ret;
Nicholas Mc Guire58ed7982015-02-07 06:17:13 -0500558 unsigned long timeout;
Javier Martin5de88752013-03-01 12:37:53 +0100559
Javier Martin5de88752013-03-01 12:37:53 +0100560 /* Request is ready to be dispatched by the device */
561 dev_dbg(dev->device,
562 "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
Ard Biesheuvel44c10a832019-11-09 18:09:43 +0100563 req->cryptlen, req->src, req->dst);
Javier Martin5de88752013-03-01 12:37:53 +0100564
565 /* assign new request to device */
Ard Biesheuvel44c10a832019-11-09 18:09:43 +0100566 dev->total = req->cryptlen;
Javier Martin5de88752013-03-01 12:37:53 +0100567 dev->in_sg = req->src;
568 dev->out_sg = req->dst;
569
Ard Biesheuvel44c10a832019-11-09 18:09:43 +0100570 rctx = skcipher_request_ctx(req);
571 ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
Javier Martin5de88752013-03-01 12:37:53 +0100572 rctx->mode &= FLAGS_MODE_MASK;
573 dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
574
Ard Biesheuvel44c10a832019-11-09 18:09:43 +0100575 if ((dev->flags & FLAGS_CBC) && req->iv)
576 memcpy(dev->iv_base, req->iv, AES_KEYSIZE_128);
Javier Martin5de88752013-03-01 12:37:53 +0100577
578 /* assign new context to device */
Javier Martin5de88752013-03-01 12:37:53 +0100579 dev->ctx = ctx;
580
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100581 reinit_completion(&dev->dma_completion);
582
Javier Martin5de88752013-03-01 12:37:53 +0100583 ret = sahara_hw_descriptor_create(dev);
Nicholas Mc Guire6cf02fc2015-02-07 06:27:45 -0500584 if (ret)
585 return -EINVAL;
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100586
Nicholas Mc Guire58ed7982015-02-07 06:17:13 -0500587 timeout = wait_for_completion_timeout(&dev->dma_completion,
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100588 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
Nicholas Mc Guire58ed7982015-02-07 06:17:13 -0500589 if (!timeout) {
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100590 dev_err(dev->device, "AES timeout\n");
591 return -ETIMEDOUT;
Javier Martin5de88752013-03-01 12:37:53 +0100592 }
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100593
594 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100595 DMA_FROM_DEVICE);
Mogens Lauridsen1e320412017-08-03 15:34:12 +0200596 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
597 DMA_TO_DEVICE);
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100598
599 return 0;
Javier Martin5de88752013-03-01 12:37:53 +0100600}
601
Ard Biesheuvel44c10a832019-11-09 18:09:43 +0100602static int sahara_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
Javier Martin5de88752013-03-01 12:37:53 +0100603 unsigned int keylen)
604{
Ard Biesheuvel44c10a832019-11-09 18:09:43 +0100605 struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
Javier Martin5de88752013-03-01 12:37:53 +0100606
607 ctx->keylen = keylen;
608
609 /* SAHARA only supports 128bit keys */
610 if (keylen == AES_KEYSIZE_128) {
611 memcpy(ctx->key, key, keylen);
612 ctx->flags |= FLAGS_NEW_KEY;
613 return 0;
614 }
615
Herbert Xu678adec2016-06-29 18:04:05 +0800616 if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
Javier Martin5de88752013-03-01 12:37:53 +0100617 return -EINVAL;
618
619 /*
620 * The requested key size is not supported by HW, do a fallback.
621 */
Ard Biesheuvel56ca4992020-07-07 09:32:02 +0300622 crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
623 crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
Herbert Xu678adec2016-06-29 18:04:05 +0800624 CRYPTO_TFM_REQ_MASK);
Ard Biesheuvel56ca4992020-07-07 09:32:02 +0300625 return crypto_skcipher_setkey(ctx->fallback, key, keylen);
Javier Martin5de88752013-03-01 12:37:53 +0100626}
627
Ard Biesheuvel44c10a832019-11-09 18:09:43 +0100628static int sahara_aes_crypt(struct skcipher_request *req, unsigned long mode)
Javier Martin5de88752013-03-01 12:37:53 +0100629{
Ard Biesheuvel44c10a832019-11-09 18:09:43 +0100630 struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
Javier Martin5de88752013-03-01 12:37:53 +0100631 struct sahara_dev *dev = dev_ptr;
632 int err = 0;
Javier Martin5de88752013-03-01 12:37:53 +0100633
634 dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
Ard Biesheuvel44c10a832019-11-09 18:09:43 +0100635 req->cryptlen, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
Javier Martin5de88752013-03-01 12:37:53 +0100636
Ard Biesheuvel44c10a832019-11-09 18:09:43 +0100637 if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE)) {
Javier Martin5de88752013-03-01 12:37:53 +0100638 dev_err(dev->device,
639 "request size is not exact amount of AES blocks\n");
640 return -EINVAL;
641 }
642
Javier Martin5de88752013-03-01 12:37:53 +0100643 rctx->mode = mode;
Javier Martin5de88752013-03-01 12:37:53 +0100644
Zhengchao Shao108586e2022-07-25 12:09:28 +0800645 spin_lock_bh(&dev->queue_spinlock);
Ard Biesheuvel44c10a832019-11-09 18:09:43 +0100646 err = crypto_enqueue_request(&dev->queue, &req->base);
Zhengchao Shao108586e2022-07-25 12:09:28 +0800647 spin_unlock_bh(&dev->queue_spinlock);
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +0100648
649 wake_up_process(dev->kthread);
Javier Martin5de88752013-03-01 12:37:53 +0100650
651 return err;
652}
653
Ard Biesheuvel44c10a832019-11-09 18:09:43 +0100654static int sahara_aes_ecb_encrypt(struct skcipher_request *req)
Javier Martin5de88752013-03-01 12:37:53 +0100655{
Ard Biesheuvel56ca4992020-07-07 09:32:02 +0300656 struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
Ard Biesheuvel44c10a832019-11-09 18:09:43 +0100657 struct sahara_ctx *ctx = crypto_skcipher_ctx(
658 crypto_skcipher_reqtfm(req));
Javier Martin5de88752013-03-01 12:37:53 +0100659
660 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
Ard Biesheuvel56ca4992020-07-07 09:32:02 +0300661 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
662 skcipher_request_set_callback(&rctx->fallback_req,
663 req->base.flags,
664 req->base.complete,
665 req->base.data);
666 skcipher_request_set_crypt(&rctx->fallback_req, req->src,
667 req->dst, req->cryptlen, req->iv);
668 return crypto_skcipher_encrypt(&rctx->fallback_req);
Javier Martin5de88752013-03-01 12:37:53 +0100669 }
670
671 return sahara_aes_crypt(req, FLAGS_ENCRYPT);
672}
673
Ard Biesheuvel44c10a832019-11-09 18:09:43 +0100674static int sahara_aes_ecb_decrypt(struct skcipher_request *req)
Javier Martin5de88752013-03-01 12:37:53 +0100675{
Ard Biesheuvel56ca4992020-07-07 09:32:02 +0300676 struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
Ard Biesheuvel44c10a832019-11-09 18:09:43 +0100677 struct sahara_ctx *ctx = crypto_skcipher_ctx(
678 crypto_skcipher_reqtfm(req));
Javier Martin5de88752013-03-01 12:37:53 +0100679
680 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
Ard Biesheuvel56ca4992020-07-07 09:32:02 +0300681 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
682 skcipher_request_set_callback(&rctx->fallback_req,
683 req->base.flags,
684 req->base.complete,
685 req->base.data);
686 skcipher_request_set_crypt(&rctx->fallback_req, req->src,
687 req->dst, req->cryptlen, req->iv);
688 return crypto_skcipher_decrypt(&rctx->fallback_req);
Javier Martin5de88752013-03-01 12:37:53 +0100689 }
690
691 return sahara_aes_crypt(req, 0);
692}
693
Ard Biesheuvel44c10a832019-11-09 18:09:43 +0100694static int sahara_aes_cbc_encrypt(struct skcipher_request *req)
Javier Martin5de88752013-03-01 12:37:53 +0100695{
Ard Biesheuvel56ca4992020-07-07 09:32:02 +0300696 struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
Ard Biesheuvel44c10a832019-11-09 18:09:43 +0100697 struct sahara_ctx *ctx = crypto_skcipher_ctx(
698 crypto_skcipher_reqtfm(req));
Javier Martin5de88752013-03-01 12:37:53 +0100699
700 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
Ard Biesheuvel56ca4992020-07-07 09:32:02 +0300701 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
702 skcipher_request_set_callback(&rctx->fallback_req,
703 req->base.flags,
704 req->base.complete,
705 req->base.data);
706 skcipher_request_set_crypt(&rctx->fallback_req, req->src,
707 req->dst, req->cryptlen, req->iv);
708 return crypto_skcipher_encrypt(&rctx->fallback_req);
Javier Martin5de88752013-03-01 12:37:53 +0100709 }
710
711 return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
712}
713
Ard Biesheuvel44c10a832019-11-09 18:09:43 +0100714static int sahara_aes_cbc_decrypt(struct skcipher_request *req)
Javier Martin5de88752013-03-01 12:37:53 +0100715{
Ard Biesheuvel56ca4992020-07-07 09:32:02 +0300716 struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req);
Ard Biesheuvel44c10a832019-11-09 18:09:43 +0100717 struct sahara_ctx *ctx = crypto_skcipher_ctx(
718 crypto_skcipher_reqtfm(req));
Javier Martin5de88752013-03-01 12:37:53 +0100719
720 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
Ard Biesheuvel56ca4992020-07-07 09:32:02 +0300721 skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback);
722 skcipher_request_set_callback(&rctx->fallback_req,
723 req->base.flags,
724 req->base.complete,
725 req->base.data);
726 skcipher_request_set_crypt(&rctx->fallback_req, req->src,
727 req->dst, req->cryptlen, req->iv);
728 return crypto_skcipher_decrypt(&rctx->fallback_req);
Javier Martin5de88752013-03-01 12:37:53 +0100729 }
730
731 return sahara_aes_crypt(req, FLAGS_CBC);
732}
733
Ard Biesheuvel44c10a832019-11-09 18:09:43 +0100734static int sahara_aes_init_tfm(struct crypto_skcipher *tfm)
Javier Martin5de88752013-03-01 12:37:53 +0100735{
Ard Biesheuvel44c10a832019-11-09 18:09:43 +0100736 const char *name = crypto_tfm_alg_name(&tfm->base);
737 struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
Javier Martin5de88752013-03-01 12:37:53 +0100738
Ard Biesheuvel56ca4992020-07-07 09:32:02 +0300739 ctx->fallback = crypto_alloc_skcipher(name, 0,
Herbert Xu678adec2016-06-29 18:04:05 +0800740 CRYPTO_ALG_NEED_FALLBACK);
Javier Martin5de88752013-03-01 12:37:53 +0100741 if (IS_ERR(ctx->fallback)) {
742 pr_err("Error allocating fallback algo %s\n", name);
743 return PTR_ERR(ctx->fallback);
744 }
745
Ard Biesheuvel56ca4992020-07-07 09:32:02 +0300746 crypto_skcipher_set_reqsize(tfm, sizeof(struct sahara_aes_reqctx) +
747 crypto_skcipher_reqsize(ctx->fallback));
Javier Martin5de88752013-03-01 12:37:53 +0100748
749 return 0;
750}
751
Ard Biesheuvel44c10a832019-11-09 18:09:43 +0100752static void sahara_aes_exit_tfm(struct crypto_skcipher *tfm)
Javier Martin5de88752013-03-01 12:37:53 +0100753{
Ard Biesheuvel44c10a832019-11-09 18:09:43 +0100754 struct sahara_ctx *ctx = crypto_skcipher_ctx(tfm);
Javier Martin5de88752013-03-01 12:37:53 +0100755
Ard Biesheuvel56ca4992020-07-07 09:32:02 +0300756 crypto_free_skcipher(ctx->fallback);
Javier Martin5de88752013-03-01 12:37:53 +0100757}
758
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100759static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
760 struct sahara_sha_reqctx *rctx)
761{
762 u32 hdr = 0;
763
764 hdr = rctx->mode;
765
766 if (rctx->first) {
767 hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
768 hdr |= SAHARA_HDR_MDHA_INIT;
769 } else {
770 hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
771 }
772
773 if (rctx->last)
774 hdr |= SAHARA_HDR_MDHA_PDATA;
775
776 if (hweight_long(hdr) % 2 == 0)
777 hdr |= SAHARA_HDR_PARITY_BIT;
778
779 return hdr;
780}
781
782static int sahara_sha_hw_links_create(struct sahara_dev *dev,
783 struct sahara_sha_reqctx *rctx,
784 int start)
785{
786 struct scatterlist *sg;
787 unsigned int i;
788 int ret;
789
790 dev->in_sg = rctx->in_sg;
791
LABBE Corentind23afa12015-09-18 14:57:11 +0200792 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
LABBE Corentin6c2b74d2015-11-04 21:13:35 +0100793 if (dev->nb_in_sg < 0) {
794 dev_err(dev->device, "Invalid numbers of src SG.\n");
795 return dev->nb_in_sg;
796 }
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100797 if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
798 dev_err(dev->device, "not enough hw links (%d)\n",
799 dev->nb_in_sg + dev->nb_out_sg);
800 return -EINVAL;
801 }
802
LABBE Corentin640eec52015-09-23 13:55:28 +0200803 sg = dev->in_sg;
804 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
805 if (!ret)
806 return -EFAULT;
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100807
LABBE Corentin640eec52015-09-23 13:55:28 +0200808 for (i = start; i < dev->nb_in_sg + start; i++) {
809 dev->hw_link[i]->len = sg->length;
810 dev->hw_link[i]->p = sg->dma_address;
811 if (i == (dev->nb_in_sg + start - 1)) {
812 dev->hw_link[i]->next = 0;
813 } else {
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100814 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
815 sg = sg_next(sg);
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100816 }
817 }
818
819 return i;
820}
821
822static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
823 struct sahara_sha_reqctx *rctx,
824 struct ahash_request *req,
825 int index)
826{
827 unsigned result_len;
828 int i = index;
829
830 if (rctx->first)
831 /* Create initial descriptor: #8*/
832 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
833 else
834 /* Create hash descriptor: #10. Must follow #6. */
835 dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
836
837 dev->hw_desc[index]->len1 = rctx->total;
838 if (dev->hw_desc[index]->len1 == 0) {
839 /* if len1 is 0, p1 must be 0, too */
840 dev->hw_desc[index]->p1 = 0;
841 rctx->sg_in_idx = 0;
842 } else {
843 /* Create input links */
844 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
845 i = sahara_sha_hw_links_create(dev, rctx, index);
846
847 rctx->sg_in_idx = index;
848 if (i < 0)
849 return i;
850 }
851
852 dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
853
854 /* Save the context for the next operation */
855 result_len = rctx->context_size;
856 dev->hw_link[i]->p = dev->context_phys_base;
857
858 dev->hw_link[i]->len = result_len;
859 dev->hw_desc[index]->len2 = result_len;
860
861 dev->hw_link[i]->next = 0;
862
863 return 0;
864}
865
866/*
867 * Load descriptor aka #6
868 *
869 * To load a previously saved context back to the MDHA unit
870 *
871 * p1: Saved Context
872 * p2: NULL
873 *
874 */
875static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
876 struct sahara_sha_reqctx *rctx,
877 struct ahash_request *req,
878 int index)
879{
880 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
881
882 dev->hw_desc[index]->len1 = rctx->context_size;
883 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
884 dev->hw_desc[index]->len2 = 0;
885 dev->hw_desc[index]->p2 = 0;
886
887 dev->hw_link[index]->len = rctx->context_size;
888 dev->hw_link[index]->p = dev->context_phys_base;
889 dev->hw_link[index]->next = 0;
890
891 return 0;
892}
893
894static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
895{
896 if (!sg || !sg->length)
897 return nbytes;
898
899 while (nbytes && sg) {
900 if (nbytes <= sg->length) {
901 sg->length = nbytes;
902 sg_mark_end(sg);
903 break;
904 }
905 nbytes -= sg->length;
Cristian Stoica5be4d4c2015-01-20 10:06:16 +0200906 sg = sg_next(sg);
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100907 }
908
909 return nbytes;
910}
911
912static int sahara_sha_prepare_request(struct ahash_request *req)
913{
914 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
915 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
916 unsigned int hash_later;
917 unsigned int block_size;
918 unsigned int len;
919
920 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
921
922 /* append bytes from previous operation */
923 len = rctx->buf_cnt + req->nbytes;
924
925 /* only the last transfer can be padded in hardware */
926 if (!rctx->last && (len < block_size)) {
927 /* to few data, save for next operation */
928 scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
929 0, req->nbytes, 0);
930 rctx->buf_cnt += req->nbytes;
931
932 return 0;
933 }
934
935 /* add data from previous operation first */
936 if (rctx->buf_cnt)
937 memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
938
939 /* data must always be a multiple of block_size */
940 hash_later = rctx->last ? 0 : len & (block_size - 1);
941 if (hash_later) {
942 unsigned int offset = req->nbytes - hash_later;
943 /* Save remaining bytes for later use */
944 scatterwalk_map_and_copy(rctx->buf, req->src, offset,
945 hash_later, 0);
946 }
947
948 /* nbytes should now be multiple of blocksize */
949 req->nbytes = req->nbytes - hash_later;
950
951 sahara_walk_and_recalc(req->src, req->nbytes);
952
953 /* have data from previous operation and current */
954 if (rctx->buf_cnt && req->nbytes) {
955 sg_init_table(rctx->in_sg_chain, 2);
956 sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
957
Dan Williamsc56f6d12015-08-07 18:15:13 +0200958 sg_chain(rctx->in_sg_chain, 2, req->src);
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100959
960 rctx->total = req->nbytes + rctx->buf_cnt;
961 rctx->in_sg = rctx->in_sg_chain;
962
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100963 req->src = rctx->in_sg_chain;
964 /* only data from previous operation */
965 } else if (rctx->buf_cnt) {
966 if (req->src)
967 rctx->in_sg = req->src;
968 else
969 rctx->in_sg = rctx->in_sg_chain;
970 /* buf was copied into rembuf above */
971 sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
972 rctx->total = rctx->buf_cnt;
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100973 /* no data from previous operation */
974 } else {
975 rctx->in_sg = req->src;
976 rctx->total = req->nbytes;
977 req->src = rctx->in_sg;
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100978 }
979
980 /* on next call, we only have the remaining data in the buffer */
981 rctx->buf_cnt = hash_later;
982
983 return -EINPROGRESS;
984}
985
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100986static int sahara_sha_process(struct ahash_request *req)
987{
988 struct sahara_dev *dev = dev_ptr;
989 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
Nicholas Mc Guiredf586cb2015-02-07 06:16:46 -0500990 int ret;
Nicholas Mc Guire58ed7982015-02-07 06:17:13 -0500991 unsigned long timeout;
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +0100992
993 ret = sahara_sha_prepare_request(req);
994 if (!ret)
995 return ret;
996
997 if (rctx->first) {
998 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
999 dev->hw_desc[0]->next = 0;
1000 rctx->first = 0;
1001 } else {
1002 memcpy(dev->context_base, rctx->context, rctx->context_size);
1003
1004 sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1005 dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1006 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
1007 dev->hw_desc[1]->next = 0;
1008 }
1009
1010 sahara_dump_descriptors(dev);
1011 sahara_dump_links(dev);
1012
1013 reinit_completion(&dev->dma_completion);
1014
1015 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1016
Nicholas Mc Guire58ed7982015-02-07 06:17:13 -05001017 timeout = wait_for_completion_timeout(&dev->dma_completion,
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001018 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
Nicholas Mc Guire58ed7982015-02-07 06:17:13 -05001019 if (!timeout) {
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001020 dev_err(dev->device, "SHA timeout\n");
1021 return -ETIMEDOUT;
1022 }
1023
1024 if (rctx->sg_in_idx)
LABBE Corentin640eec52015-09-23 13:55:28 +02001025 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1026 DMA_TO_DEVICE);
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001027
1028 memcpy(rctx->context, dev->context_base, rctx->context_size);
1029
1030 if (req->result)
1031 memcpy(req->result, rctx->context, rctx->digest_size);
1032
1033 return 0;
1034}
1035
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001036static int sahara_queue_manage(void *data)
1037{
Yu Zheaedf8182023-03-17 14:26:03 +08001038 struct sahara_dev *dev = data;
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001039 struct crypto_async_request *async_req;
Steffen Trumtrarddacc622015-04-07 17:13:41 +02001040 struct crypto_async_request *backlog;
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001041 int ret = 0;
1042
1043 do {
1044 __set_current_state(TASK_INTERRUPTIBLE);
1045
Zhengchao Shao108586e2022-07-25 12:09:28 +08001046 spin_lock_bh(&dev->queue_spinlock);
Steffen Trumtrarddacc622015-04-07 17:13:41 +02001047 backlog = crypto_get_backlog(&dev->queue);
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001048 async_req = crypto_dequeue_request(&dev->queue);
Zhengchao Shao108586e2022-07-25 12:09:28 +08001049 spin_unlock_bh(&dev->queue_spinlock);
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001050
Steffen Trumtrarddacc622015-04-07 17:13:41 +02001051 if (backlog)
Herbert Xu555c5662023-01-31 16:02:48 +08001052 crypto_request_complete(backlog, -EINPROGRESS);
Steffen Trumtrarddacc622015-04-07 17:13:41 +02001053
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001054 if (async_req) {
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001055 if (crypto_tfm_alg_type(async_req->tfm) ==
1056 CRYPTO_ALG_TYPE_AHASH) {
1057 struct ahash_request *req =
1058 ahash_request_cast(async_req);
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001059
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001060 ret = sahara_sha_process(req);
1061 } else {
Ard Biesheuvel44c10a832019-11-09 18:09:43 +01001062 struct skcipher_request *req =
1063 skcipher_request_cast(async_req);
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001064
1065 ret = sahara_aes_process(req);
1066 }
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001067
Herbert Xu555c5662023-01-31 16:02:48 +08001068 crypto_request_complete(async_req, ret);
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001069
1070 continue;
1071 }
1072
1073 schedule();
1074 } while (!kthread_should_stop());
1075
1076 return 0;
1077}
1078
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001079static int sahara_sha_enqueue(struct ahash_request *req, int last)
1080{
1081 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1082 struct sahara_dev *dev = dev_ptr;
1083 int ret;
1084
1085 if (!req->nbytes && !last)
1086 return 0;
1087
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001088 rctx->last = last;
1089
1090 if (!rctx->active) {
1091 rctx->active = 1;
1092 rctx->first = 1;
1093 }
1094
Zhengchao Shao108586e2022-07-25 12:09:28 +08001095 spin_lock_bh(&dev->queue_spinlock);
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001096 ret = crypto_enqueue_request(&dev->queue, &req->base);
Zhengchao Shao108586e2022-07-25 12:09:28 +08001097 spin_unlock_bh(&dev->queue_spinlock);
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001098
1099 wake_up_process(dev->kthread);
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001100
1101 return ret;
1102}
1103
1104static int sahara_sha_init(struct ahash_request *req)
1105{
1106 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1107 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1108
1109 memset(rctx, 0, sizeof(*rctx));
1110
1111 switch (crypto_ahash_digestsize(tfm)) {
1112 case SHA1_DIGEST_SIZE:
1113 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1114 rctx->digest_size = SHA1_DIGEST_SIZE;
1115 break;
1116 case SHA256_DIGEST_SIZE:
1117 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1118 rctx->digest_size = SHA256_DIGEST_SIZE;
1119 break;
1120 default:
1121 return -EINVAL;
1122 }
1123
1124 rctx->context_size = rctx->digest_size + 4;
1125 rctx->active = 0;
1126
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001127 return 0;
1128}
1129
1130static int sahara_sha_update(struct ahash_request *req)
1131{
1132 return sahara_sha_enqueue(req, 0);
1133}
1134
1135static int sahara_sha_final(struct ahash_request *req)
1136{
1137 req->nbytes = 0;
1138 return sahara_sha_enqueue(req, 1);
1139}
1140
1141static int sahara_sha_finup(struct ahash_request *req)
1142{
1143 return sahara_sha_enqueue(req, 1);
1144}
1145
1146static int sahara_sha_digest(struct ahash_request *req)
1147{
1148 sahara_sha_init(req);
1149
1150 return sahara_sha_finup(req);
1151}
1152
1153static int sahara_sha_export(struct ahash_request *req, void *out)
1154{
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001155 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1156
Fabio Estevambceab442016-02-03 10:46:51 -02001157 memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001158
1159 return 0;
1160}
1161
1162static int sahara_sha_import(struct ahash_request *req, const void *in)
1163{
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001164 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1165
Fabio Estevambceab442016-02-03 10:46:51 -02001166 memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001167
1168 return 0;
1169}
1170
1171static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1172{
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001173 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1174 sizeof(struct sahara_sha_reqctx) +
1175 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1176
1177 return 0;
1178}
1179
Ard Biesheuvel44c10a832019-11-09 18:09:43 +01001180static struct skcipher_alg aes_algs[] = {
Javier Martin5de88752013-03-01 12:37:53 +01001181{
Ard Biesheuvel44c10a832019-11-09 18:09:43 +01001182 .base.cra_name = "ecb(aes)",
1183 .base.cra_driver_name = "sahara-ecb-aes",
1184 .base.cra_priority = 300,
1185 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1186 .base.cra_blocksize = AES_BLOCK_SIZE,
1187 .base.cra_ctxsize = sizeof(struct sahara_ctx),
1188 .base.cra_alignmask = 0x0,
1189 .base.cra_module = THIS_MODULE,
1190
1191 .init = sahara_aes_init_tfm,
1192 .exit = sahara_aes_exit_tfm,
1193 .min_keysize = AES_MIN_KEY_SIZE ,
1194 .max_keysize = AES_MAX_KEY_SIZE,
1195 .setkey = sahara_aes_setkey,
1196 .encrypt = sahara_aes_ecb_encrypt,
1197 .decrypt = sahara_aes_ecb_decrypt,
Javier Martin5de88752013-03-01 12:37:53 +01001198}, {
Ard Biesheuvel44c10a832019-11-09 18:09:43 +01001199 .base.cra_name = "cbc(aes)",
1200 .base.cra_driver_name = "sahara-cbc-aes",
1201 .base.cra_priority = 300,
1202 .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1203 .base.cra_blocksize = AES_BLOCK_SIZE,
1204 .base.cra_ctxsize = sizeof(struct sahara_ctx),
1205 .base.cra_alignmask = 0x0,
1206 .base.cra_module = THIS_MODULE,
1207
1208 .init = sahara_aes_init_tfm,
1209 .exit = sahara_aes_exit_tfm,
1210 .min_keysize = AES_MIN_KEY_SIZE ,
1211 .max_keysize = AES_MAX_KEY_SIZE,
1212 .ivsize = AES_BLOCK_SIZE,
1213 .setkey = sahara_aes_setkey,
1214 .encrypt = sahara_aes_cbc_encrypt,
1215 .decrypt = sahara_aes_cbc_decrypt,
Javier Martin5de88752013-03-01 12:37:53 +01001216}
1217};
1218
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001219static struct ahash_alg sha_v3_algs[] = {
1220{
1221 .init = sahara_sha_init,
1222 .update = sahara_sha_update,
1223 .final = sahara_sha_final,
1224 .finup = sahara_sha_finup,
1225 .digest = sahara_sha_digest,
1226 .export = sahara_sha_export,
1227 .import = sahara_sha_import,
1228 .halg.digestsize = SHA1_DIGEST_SIZE,
Fabio Estevamd42cf2f12016-02-03 10:46:52 -02001229 .halg.statesize = sizeof(struct sahara_sha_reqctx),
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001230 .halg.base = {
1231 .cra_name = "sha1",
1232 .cra_driver_name = "sahara-sha1",
1233 .cra_priority = 300,
Eric Biggers6a38f6222018-06-30 15:16:12 -07001234 .cra_flags = CRYPTO_ALG_ASYNC |
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001235 CRYPTO_ALG_NEED_FALLBACK,
1236 .cra_blocksize = SHA1_BLOCK_SIZE,
1237 .cra_ctxsize = sizeof(struct sahara_ctx),
1238 .cra_alignmask = 0,
1239 .cra_module = THIS_MODULE,
1240 .cra_init = sahara_sha_cra_init,
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001241 }
1242},
1243};
1244
1245static struct ahash_alg sha_v4_algs[] = {
1246{
1247 .init = sahara_sha_init,
1248 .update = sahara_sha_update,
1249 .final = sahara_sha_final,
1250 .finup = sahara_sha_finup,
1251 .digest = sahara_sha_digest,
1252 .export = sahara_sha_export,
1253 .import = sahara_sha_import,
1254 .halg.digestsize = SHA256_DIGEST_SIZE,
Fabio Estevamd42cf2f12016-02-03 10:46:52 -02001255 .halg.statesize = sizeof(struct sahara_sha_reqctx),
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001256 .halg.base = {
1257 .cra_name = "sha256",
1258 .cra_driver_name = "sahara-sha256",
1259 .cra_priority = 300,
Eric Biggers6a38f6222018-06-30 15:16:12 -07001260 .cra_flags = CRYPTO_ALG_ASYNC |
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001261 CRYPTO_ALG_NEED_FALLBACK,
1262 .cra_blocksize = SHA256_BLOCK_SIZE,
1263 .cra_ctxsize = sizeof(struct sahara_ctx),
1264 .cra_alignmask = 0,
1265 .cra_module = THIS_MODULE,
1266 .cra_init = sahara_sha_cra_init,
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001267 }
1268},
1269};
1270
Javier Martin5de88752013-03-01 12:37:53 +01001271static irqreturn_t sahara_irq_handler(int irq, void *data)
1272{
Yu Zheaedf8182023-03-17 14:26:03 +08001273 struct sahara_dev *dev = data;
Javier Martin5de88752013-03-01 12:37:53 +01001274 unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1275 unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1276
Javier Martin5de88752013-03-01 12:37:53 +01001277 sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1278 SAHARA_REG_CMD);
1279
1280 sahara_decode_status(dev, stat);
1281
1282 if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1283 return IRQ_NONE;
1284 } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1285 dev->error = 0;
1286 } else {
1287 sahara_decode_error(dev, err);
1288 dev->error = -EINVAL;
1289 }
1290
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001291 complete(&dev->dma_completion);
Javier Martin5de88752013-03-01 12:37:53 +01001292
1293 return IRQ_HANDLED;
1294}
1295
1296
1297static int sahara_register_algs(struct sahara_dev *dev)
1298{
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001299 int err;
1300 unsigned int i, j, k, l;
Javier Martin5de88752013-03-01 12:37:53 +01001301
1302 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
Ard Biesheuvel44c10a832019-11-09 18:09:43 +01001303 err = crypto_register_skcipher(&aes_algs[i]);
Javier Martin5de88752013-03-01 12:37:53 +01001304 if (err)
1305 goto err_aes_algs;
1306 }
1307
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001308 for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1309 err = crypto_register_ahash(&sha_v3_algs[k]);
1310 if (err)
1311 goto err_sha_v3_algs;
1312 }
1313
1314 if (dev->version > SAHARA_VERSION_3)
1315 for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1316 err = crypto_register_ahash(&sha_v4_algs[l]);
1317 if (err)
1318 goto err_sha_v4_algs;
1319 }
1320
Javier Martin5de88752013-03-01 12:37:53 +01001321 return 0;
1322
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001323err_sha_v4_algs:
1324 for (j = 0; j < l; j++)
1325 crypto_unregister_ahash(&sha_v4_algs[j]);
1326
1327err_sha_v3_algs:
1328 for (j = 0; j < k; j++)
Michael Müller0e7d4d92018-07-15 00:27:06 +02001329 crypto_unregister_ahash(&sha_v3_algs[j]);
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001330
Javier Martin5de88752013-03-01 12:37:53 +01001331err_aes_algs:
1332 for (j = 0; j < i; j++)
Ard Biesheuvel44c10a832019-11-09 18:09:43 +01001333 crypto_unregister_skcipher(&aes_algs[j]);
Javier Martin5de88752013-03-01 12:37:53 +01001334
1335 return err;
1336}
1337
1338static void sahara_unregister_algs(struct sahara_dev *dev)
1339{
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001340 unsigned int i;
Javier Martin5de88752013-03-01 12:37:53 +01001341
1342 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
Ard Biesheuvel44c10a832019-11-09 18:09:43 +01001343 crypto_unregister_skcipher(&aes_algs[i]);
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001344
Michael Müller0e7d4d92018-07-15 00:27:06 +02001345 for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001346 crypto_unregister_ahash(&sha_v3_algs[i]);
1347
1348 if (dev->version > SAHARA_VERSION_3)
1349 for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1350 crypto_unregister_ahash(&sha_v4_algs[i]);
Javier Martin5de88752013-03-01 12:37:53 +01001351}
1352
Arvind Yadav30aabe32017-06-27 17:11:23 +05301353static const struct of_device_id sahara_dt_ids[] = {
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +01001354 { .compatible = "fsl,imx53-sahara" },
Javier Martin5de88752013-03-01 12:37:53 +01001355 { .compatible = "fsl,imx27-sahara" },
1356 { /* sentinel */ }
1357};
Arnd Bergmann68be0b1a2013-06-03 23:57:37 +02001358MODULE_DEVICE_TABLE(of, sahara_dt_ids);
Javier Martin5de88752013-03-01 12:37:53 +01001359
1360static int sahara_probe(struct platform_device *pdev)
1361{
1362 struct sahara_dev *dev;
Javier Martin5de88752013-03-01 12:37:53 +01001363 u32 version;
1364 int irq;
1365 int err;
1366 int i;
1367
Markus Elfringa8bc22f2018-02-14 14:14:05 +01001368 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
Markus Elfring0d576d92018-02-14 14:10:03 +01001369 if (!dev)
Javier Martin5de88752013-03-01 12:37:53 +01001370 return -ENOMEM;
Javier Martin5de88752013-03-01 12:37:53 +01001371
1372 dev->device = &pdev->dev;
1373 platform_set_drvdata(pdev, dev);
1374
1375 /* Get the base address */
Fabio Estevamb0d76522019-06-06 13:13:49 -03001376 dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
Jingoo Han9e952752014-02-12 13:23:37 +09001377 if (IS_ERR(dev->regs_base))
1378 return PTR_ERR(dev->regs_base);
Javier Martin5de88752013-03-01 12:37:53 +01001379
1380 /* Get the IRQ */
1381 irq = platform_get_irq(pdev, 0);
Stephen Boyd514838e2019-07-30 11:15:05 -07001382 if (irq < 0)
Javier Martin5de88752013-03-01 12:37:53 +01001383 return irq;
Javier Martin5de88752013-03-01 12:37:53 +01001384
Alexander Shiyan3d6f1d12014-03-10 20:13:32 +08001385 err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1386 0, dev_name(&pdev->dev), dev);
1387 if (err) {
Javier Martin5de88752013-03-01 12:37:53 +01001388 dev_err(&pdev->dev, "failed to request irq\n");
Alexander Shiyan3d6f1d12014-03-10 20:13:32 +08001389 return err;
Javier Martin5de88752013-03-01 12:37:53 +01001390 }
1391
1392 /* clocks */
1393 dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1394 if (IS_ERR(dev->clk_ipg)) {
1395 dev_err(&pdev->dev, "Could not get ipg clock\n");
1396 return PTR_ERR(dev->clk_ipg);
1397 }
1398
1399 dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1400 if (IS_ERR(dev->clk_ahb)) {
1401 dev_err(&pdev->dev, "Could not get ahb clock\n");
1402 return PTR_ERR(dev->clk_ahb);
1403 }
1404
1405 /* Allocate HW descriptors */
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301406 dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
Javier Martin5de88752013-03-01 12:37:53 +01001407 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1408 &dev->hw_phys_desc[0], GFP_KERNEL);
1409 if (!dev->hw_desc[0]) {
1410 dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1411 return -ENOMEM;
1412 }
1413 dev->hw_desc[1] = dev->hw_desc[0] + 1;
1414 dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1415 sizeof(struct sahara_hw_desc);
1416
1417 /* Allocate space for iv and key */
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301418 dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
Javier Martin5de88752013-03-01 12:37:53 +01001419 &dev->key_phys_base, GFP_KERNEL);
1420 if (!dev->key_base) {
1421 dev_err(&pdev->dev, "Could not allocate memory for key\n");
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301422 return -ENOMEM;
Javier Martin5de88752013-03-01 12:37:53 +01001423 }
1424 dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1425 dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1426
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001427 /* Allocate space for context: largest digest + message length field */
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301428 dev->context_base = dmam_alloc_coherent(&pdev->dev,
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001429 SHA256_DIGEST_SIZE + 4,
1430 &dev->context_phys_base, GFP_KERNEL);
1431 if (!dev->context_base) {
1432 dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301433 return -ENOMEM;
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001434 }
1435
Javier Martin5de88752013-03-01 12:37:53 +01001436 /* Allocate space for HW links */
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301437 dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
Javier Martin5de88752013-03-01 12:37:53 +01001438 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1439 &dev->hw_phys_link[0], GFP_KERNEL);
Dan Carpenter393e6612013-08-20 11:51:41 +03001440 if (!dev->hw_link[0]) {
Javier Martin5de88752013-03-01 12:37:53 +01001441 dev_err(&pdev->dev, "Could not allocate hw links\n");
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301442 return -ENOMEM;
Javier Martin5de88752013-03-01 12:37:53 +01001443 }
1444 for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1445 dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1446 sizeof(struct sahara_hw_link);
1447 dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1448 }
1449
1450 crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1451
Zhengchao Shao108586e2022-07-25 12:09:28 +08001452 spin_lock_init(&dev->queue_spinlock);
Steffen Trumtrar20ec9d82014-12-01 13:26:31 +01001453
Javier Martin5de88752013-03-01 12:37:53 +01001454 dev_ptr = dev;
1455
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001456 dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1457 if (IS_ERR(dev->kthread)) {
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301458 return PTR_ERR(dev->kthread);
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001459 }
Javier Martin5de88752013-03-01 12:37:53 +01001460
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001461 init_completion(&dev->dma_completion);
Javier Martin5de88752013-03-01 12:37:53 +01001462
Fabio Estevam7eac7142015-06-20 15:30:22 -03001463 err = clk_prepare_enable(dev->clk_ipg);
1464 if (err)
Vaishali Thakkar66c9a042015-08-18 11:36:05 +05301465 return err;
Fabio Estevam7eac7142015-06-20 15:30:22 -03001466 err = clk_prepare_enable(dev->clk_ahb);
1467 if (err)
1468 goto clk_ipg_disable;
Javier Martin5de88752013-03-01 12:37:53 +01001469
1470 version = sahara_read(dev, SAHARA_REG_VERSION);
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +01001471 if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1472 if (version != SAHARA_VERSION_3)
1473 err = -ENODEV;
1474 } else if (of_device_is_compatible(pdev->dev.of_node,
1475 "fsl,imx53-sahara")) {
1476 if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1477 err = -ENODEV;
1478 version = (version >> 8) & 0xff;
1479 }
1480 if (err == -ENODEV) {
Javier Martin5de88752013-03-01 12:37:53 +01001481 dev_err(&pdev->dev, "SAHARA version %d not supported\n",
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +01001482 version);
Javier Martin5de88752013-03-01 12:37:53 +01001483 goto err_algs;
1484 }
1485
Steffen Trumtrar5ed903b2014-12-01 13:26:32 +01001486 dev->version = version;
1487
Javier Martin5de88752013-03-01 12:37:53 +01001488 sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1489 SAHARA_REG_CMD);
1490 sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1491 SAHARA_CONTROL_SET_MAXBURST(8) |
1492 SAHARA_CONTROL_RNG_AUTORSD |
1493 SAHARA_CONTROL_ENABLE_INT,
1494 SAHARA_REG_CONTROL);
1495
1496 err = sahara_register_algs(dev);
1497 if (err)
1498 goto err_algs;
1499
1500 dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1501
1502 return 0;
1503
1504err_algs:
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001505 kthread_stop(dev->kthread);
Javier Martin5de88752013-03-01 12:37:53 +01001506 dev_ptr = NULL;
Fabio Estevam7eac7142015-06-20 15:30:22 -03001507 clk_disable_unprepare(dev->clk_ahb);
1508clk_ipg_disable:
1509 clk_disable_unprepare(dev->clk_ipg);
Javier Martin5de88752013-03-01 12:37:53 +01001510
1511 return err;
1512}
1513
1514static int sahara_remove(struct platform_device *pdev)
1515{
1516 struct sahara_dev *dev = platform_get_drvdata(pdev);
1517
Steffen Trumtrarc0c3c892014-12-01 13:26:33 +01001518 kthread_stop(dev->kthread);
Javier Martin5de88752013-03-01 12:37:53 +01001519
1520 sahara_unregister_algs(dev);
1521
1522 clk_disable_unprepare(dev->clk_ipg);
1523 clk_disable_unprepare(dev->clk_ahb);
1524
1525 dev_ptr = NULL;
1526
1527 return 0;
1528}
1529
1530static struct platform_driver sahara_driver = {
1531 .probe = sahara_probe,
1532 .remove = sahara_remove,
1533 .driver = {
1534 .name = SAHARA_NAME,
Sachin Kamat1b0b2602013-09-30 08:49:41 +05301535 .of_match_table = sahara_dt_ids,
Javier Martin5de88752013-03-01 12:37:53 +01001536 },
Javier Martin5de88752013-03-01 12:37:53 +01001537};
1538
1539module_platform_driver(sahara_driver);
1540
1541MODULE_LICENSE("GPL");
1542MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
Steffen Trumtrar5a2bb932014-12-01 13:26:34 +01001543MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
Javier Martin5de88752013-03-01 12:37:53 +01001544MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");