blob: aef031946f337b65dbca4b045ecb21e40efef318 [file] [log] [blame]
Horia Geantă618b5dc2018-10-10 14:26:48 +03001// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
Tudor Ambarus8c419772016-07-04 13:12:08 +03002/*
3 * caam - Freescale FSL CAAM support for Public Key Cryptography
4 *
5 * Copyright 2016 Freescale Semiconductor, Inc.
Horia Geantăa5e5c132019-05-03 17:17:38 +03006 * Copyright 2018-2019 NXP
Tudor Ambarus8c419772016-07-04 13:12:08 +03007 *
8 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
9 * all the desired key parameters, input and output pointers.
10 */
11#include "compat.h"
12#include "regs.h"
13#include "intern.h"
14#include "jr.h"
15#include "error.h"
16#include "desc_constr.h"
17#include "sg_sw_sec4.h"
18#include "caampkc.h"
19
Andrey Smirnova1cf5732019-08-20 13:23:59 -070020#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
Tudor Ambarus8c419772016-07-04 13:12:08 +030021#define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
Andrey Smirnova1cf5732019-08-20 13:23:59 -070022 SIZEOF_RSA_PRIV_F1_PDB)
Radu Alexe52e26d72017-04-25 16:26:38 +030023#define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
Andrey Smirnova1cf5732019-08-20 13:23:59 -070024 SIZEOF_RSA_PRIV_F2_PDB)
Radu Alexe4a651b12017-04-25 16:26:39 +030025#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
Andrey Smirnova1cf5732019-08-20 13:23:59 -070026 SIZEOF_RSA_PRIV_F3_PDB)
Iuliana Prodanc3725f72019-05-28 12:52:10 +030027#define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */
28
29/* buffer filled with zeros, used for padding */
30static u8 *zero_buffer;
Tudor Ambarus8c419772016-07-04 13:12:08 +030031
Iuliana Prodan4e3a61c2019-07-31 16:08:13 +030032/*
33 * variable used to avoid double free of resources in case
34 * algorithm registration was unsuccessful
35 */
36static bool init_done;
37
Iuliana Prodan58068cf2019-07-31 16:08:14 +030038struct caam_akcipher_alg {
39 struct akcipher_alg akcipher;
40 bool registered;
41};
42
Tudor Ambarus8c419772016-07-04 13:12:08 +030043static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
44 struct akcipher_request *req)
45{
Iuliana Prodan3b2614c2019-05-28 12:52:11 +030046 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
47
Tudor Ambarus8c419772016-07-04 13:12:08 +030048 dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
Iuliana Prodan3b2614c2019-05-28 12:52:11 +030049 dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE);
Tudor Ambarus8c419772016-07-04 13:12:08 +030050
51 if (edesc->sec4_sg_bytes)
52 dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
53 DMA_TO_DEVICE);
54}
55
56static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
57 struct akcipher_request *req)
58{
59 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
Herbert Xu4cb4f7c2022-11-25 12:36:45 +080060 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
Tudor Ambarus8c419772016-07-04 13:12:08 +030061 struct caam_rsa_key *key = &ctx->key;
62 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
63
64 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
65 dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
66}
67
68static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
69 struct akcipher_request *req)
70{
71 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
Herbert Xu4cb4f7c2022-11-25 12:36:45 +080072 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
Tudor Ambarus8c419772016-07-04 13:12:08 +030073 struct caam_rsa_key *key = &ctx->key;
74 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
75
76 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
77 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
78}
79
Radu Alexe52e26d72017-04-25 16:26:38 +030080static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
81 struct akcipher_request *req)
82{
83 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
Herbert Xu4cb4f7c2022-11-25 12:36:45 +080084 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
Radu Alexe52e26d72017-04-25 16:26:38 +030085 struct caam_rsa_key *key = &ctx->key;
86 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
87 size_t p_sz = key->p_sz;
Horia Geantă4bffaab2018-04-27 11:40:11 +030088 size_t q_sz = key->q_sz;
Radu Alexe52e26d72017-04-25 16:26:38 +030089
90 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
91 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
92 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
Horia Geantăf1bf9e62018-08-06 15:29:55 +030093 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
94 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
Radu Alexe52e26d72017-04-25 16:26:38 +030095}
96
Radu Alexe4a651b12017-04-25 16:26:39 +030097static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
98 struct akcipher_request *req)
99{
100 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
Herbert Xu4cb4f7c2022-11-25 12:36:45 +0800101 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
Radu Alexe4a651b12017-04-25 16:26:39 +0300102 struct caam_rsa_key *key = &ctx->key;
103 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
104 size_t p_sz = key->p_sz;
Horia Geantă4bffaab2018-04-27 11:40:11 +0300105 size_t q_sz = key->q_sz;
Radu Alexe4a651b12017-04-25 16:26:39 +0300106
107 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
108 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
109 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
110 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
111 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
Horia Geantăf1bf9e62018-08-06 15:29:55 +0300112 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
113 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
Radu Alexe4a651b12017-04-25 16:26:39 +0300114}
115
Tudor Ambarus8c419772016-07-04 13:12:08 +0300116/* RSA Job Completion handler */
117static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
118{
119 struct akcipher_request *req = context;
Iuliana Prodanbf537952020-02-12 19:55:23 +0200120 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
121 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300122 struct rsa_edesc *edesc;
Horia Geantă1984aae2019-07-31 16:08:03 +0300123 int ecode = 0;
Iuliana Prodan80994e32020-04-07 01:47:28 +0300124 bool has_bklog;
Tudor Ambarus8c419772016-07-04 13:12:08 +0300125
126 if (err)
Horia Geantă1984aae2019-07-31 16:08:03 +0300127 ecode = caam_jr_strstatus(dev, err);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300128
Iuliana Prodanbf537952020-02-12 19:55:23 +0200129 edesc = req_ctx->edesc;
Iuliana Prodan80994e32020-04-07 01:47:28 +0300130 has_bklog = edesc->bklog;
Tudor Ambarus8c419772016-07-04 13:12:08 +0300131
132 rsa_pub_unmap(dev, edesc, req);
133 rsa_io_unmap(dev, edesc, req);
134 kfree(edesc);
135
Iuliana Prodanbf537952020-02-12 19:55:23 +0200136 /*
137 * If no backlog flag, the completion of the request is done
138 * by CAAM, not crypto engine.
139 */
Iuliana Prodan80994e32020-04-07 01:47:28 +0300140 if (!has_bklog)
Iuliana Prodanbf537952020-02-12 19:55:23 +0200141 akcipher_request_complete(req, ecode);
142 else
143 crypto_finalize_akcipher_request(jrp->engine, req, ecode);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300144}
145
Iuliana Prodand53e44f2020-02-12 19:55:19 +0200146static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err,
147 void *context)
Tudor Ambarus8c419772016-07-04 13:12:08 +0300148{
149 struct akcipher_request *req = context;
Iuliana Prodand53e44f2020-02-12 19:55:19 +0200150 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
Iuliana Prodanbf537952020-02-12 19:55:23 +0200151 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
Herbert Xu4cb4f7c2022-11-25 12:36:45 +0800152 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
Iuliana Prodand53e44f2020-02-12 19:55:19 +0200153 struct caam_rsa_key *key = &ctx->key;
Iuliana Prodanbf537952020-02-12 19:55:23 +0200154 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300155 struct rsa_edesc *edesc;
Horia Geantă1984aae2019-07-31 16:08:03 +0300156 int ecode = 0;
Iuliana Prodan80994e32020-04-07 01:47:28 +0300157 bool has_bklog;
Tudor Ambarus8c419772016-07-04 13:12:08 +0300158
159 if (err)
Horia Geantă1984aae2019-07-31 16:08:03 +0300160 ecode = caam_jr_strstatus(dev, err);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300161
Iuliana Prodanbf537952020-02-12 19:55:23 +0200162 edesc = req_ctx->edesc;
Iuliana Prodan80994e32020-04-07 01:47:28 +0300163 has_bklog = edesc->bklog;
Tudor Ambarus8c419772016-07-04 13:12:08 +0300164
Iuliana Prodand53e44f2020-02-12 19:55:19 +0200165 switch (key->priv_form) {
166 case FORM1:
167 rsa_priv_f1_unmap(dev, edesc, req);
168 break;
169 case FORM2:
170 rsa_priv_f2_unmap(dev, edesc, req);
171 break;
172 case FORM3:
173 rsa_priv_f3_unmap(dev, edesc, req);
174 }
Tudor Ambarus8c419772016-07-04 13:12:08 +0300175
Radu Alexe4a651b12017-04-25 16:26:39 +0300176 rsa_io_unmap(dev, edesc, req);
177 kfree(edesc);
178
Iuliana Prodanbf537952020-02-12 19:55:23 +0200179 /*
180 * If no backlog flag, the completion of the request is done
181 * by CAAM, not crypto engine.
182 */
Iuliana Prodan80994e32020-04-07 01:47:28 +0300183 if (!has_bklog)
Iuliana Prodanbf537952020-02-12 19:55:23 +0200184 akcipher_request_complete(req, ecode);
185 else
186 crypto_finalize_akcipher_request(jrp->engine, req, ecode);
Radu Alexe4a651b12017-04-25 16:26:39 +0300187}
188
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300189/**
Lee Jones0beb2b62021-03-18 12:44:19 +0000190 * caam_rsa_count_leading_zeros - Count leading zeros, need it to strip,
191 * from a given scatterlist
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300192 *
193 * @sgl : scatterlist to count zeros from
194 * @nbytes: number of zeros, in bytes, to strip
195 * @flags : operation flags
196 */
Horia Geantă8a2a0dd2018-04-16 08:07:05 -0500197static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
198 unsigned int nbytes,
199 unsigned int flags)
200{
201 struct sg_mapping_iter miter;
202 int lzeros, ents;
203 unsigned int len;
204 unsigned int tbytes = nbytes;
205 const u8 *buff;
206
207 ents = sg_nents_for_len(sgl, nbytes);
208 if (ents < 0)
209 return ents;
210
211 sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
212
213 lzeros = 0;
214 len = 0;
215 while (nbytes > 0) {
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300216 /* do not strip more than given bytes */
217 while (len && !*buff && lzeros < nbytes) {
Horia Geantă8a2a0dd2018-04-16 08:07:05 -0500218 lzeros++;
219 len--;
220 buff++;
221 }
222
223 if (len && *buff)
224 break;
225
226 sg_miter_next(&miter);
227 buff = miter.addr;
228 len = miter.length;
229
230 nbytes -= lzeros;
231 lzeros = 0;
232 }
233
234 miter.consumed = lzeros;
235 sg_miter_stop(&miter);
236 nbytes -= lzeros;
237
238 return tbytes - nbytes;
239}
240
Tudor Ambarus8c419772016-07-04 13:12:08 +0300241static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
242 size_t desclen)
243{
244 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
Herbert Xu4cb4f7c2022-11-25 12:36:45 +0800245 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300246 struct device *dev = ctx->dev;
Horia Geantă8a2a0dd2018-04-16 08:07:05 -0500247 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300248 struct caam_rsa_key *key = &ctx->key;
Tudor Ambarus8c419772016-07-04 13:12:08 +0300249 struct rsa_edesc *edesc;
Horia Geantă019d62d2017-06-19 11:44:46 +0300250 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
251 GFP_KERNEL : GFP_ATOMIC;
Horia Geantă8a2a0dd2018-04-16 08:07:05 -0500252 int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
Tudor Ambarus8c419772016-07-04 13:12:08 +0300253 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
254 int src_nents, dst_nents;
Iuliana Prodaneff97712019-09-26 15:26:29 +0300255 int mapped_src_nents, mapped_dst_nents;
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300256 unsigned int diff_size = 0;
Horia Geantă8a2a0dd2018-04-16 08:07:05 -0500257 int lzeros;
258
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300259 if (req->src_len > key->n_sz) {
260 /*
261 * strip leading zeros and
262 * return the number of zeros to skip
263 */
264 lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len -
265 key->n_sz, sg_flags);
266 if (lzeros < 0)
267 return ERR_PTR(lzeros);
Horia Geantă8a2a0dd2018-04-16 08:07:05 -0500268
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300269 req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src,
270 lzeros);
271 req_ctx->fixup_src_len = req->src_len - lzeros;
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300272 } else {
273 /*
274 * input src is less then n key modulus,
275 * so there will be zero padding
276 */
277 diff_size = key->n_sz - req->src_len;
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300278 req_ctx->fixup_src = req->src;
279 req_ctx->fixup_src_len = req->src_len;
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300280 }
Tudor Ambarus8c419772016-07-04 13:12:08 +0300281
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300282 src_nents = sg_nents_for_len(req_ctx->fixup_src,
283 req_ctx->fixup_src_len);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300284 dst_nents = sg_nents_for_len(req->dst, req->dst_len);
285
Iuliana Prodaneff97712019-09-26 15:26:29 +0300286 mapped_src_nents = dma_map_sg(dev, req_ctx->fixup_src, src_nents,
287 DMA_TO_DEVICE);
288 if (unlikely(!mapped_src_nents)) {
289 dev_err(dev, "unable to map source\n");
290 return ERR_PTR(-ENOMEM);
291 }
292 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
293 DMA_FROM_DEVICE);
294 if (unlikely(!mapped_dst_nents)) {
295 dev_err(dev, "unable to map destination\n");
296 goto src_fail;
297 }
298
299 if (!diff_size && mapped_src_nents == 1)
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300300 sec4_sg_len = 0; /* no need for an input hw s/g table */
301 else
Iuliana Prodaneff97712019-09-26 15:26:29 +0300302 sec4_sg_len = mapped_src_nents + !!diff_size;
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300303 sec4_sg_index = sec4_sg_len;
Iuliana Prodaneff97712019-09-26 15:26:29 +0300304
305 if (mapped_dst_nents > 1)
306 sec4_sg_len += pad_sg_nents(mapped_dst_nents);
Horia Geantăa5e5c132019-05-03 17:17:38 +0300307 else
308 sec4_sg_len = pad_sg_nents(sec4_sg_len);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300309
310 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
311
312 /* allocate space for base edesc, hw desc commands and link tables */
313 edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
314 GFP_DMA | flags);
315 if (!edesc)
Tudor Ambarus8c419772016-07-04 13:12:08 +0300316 goto dst_fail;
Tudor Ambarus8c419772016-07-04 13:12:08 +0300317
318 edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300319 if (diff_size)
320 dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size,
321 0);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300322
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300323 if (sec4_sg_index)
Horia Geantă059d73e2019-06-10 16:30:58 +0300324 sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300325 edesc->sec4_sg + !!diff_size, 0);
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300326
Iuliana Prodaneff97712019-09-26 15:26:29 +0300327 if (mapped_dst_nents > 1)
Horia Geantă059d73e2019-06-10 16:30:58 +0300328 sg_to_sec4_sg_last(req->dst, req->dst_len,
Tudor Ambarus8c419772016-07-04 13:12:08 +0300329 edesc->sec4_sg + sec4_sg_index, 0);
330
331 /* Save nents for later use in Job Descriptor */
332 edesc->src_nents = src_nents;
333 edesc->dst_nents = dst_nents;
334
Iuliana Prodanbf537952020-02-12 19:55:23 +0200335 req_ctx->edesc = edesc;
336
Tudor Ambarus8c419772016-07-04 13:12:08 +0300337 if (!sec4_sg_bytes)
338 return edesc;
339
Iuliana Prodaneff97712019-09-26 15:26:29 +0300340 edesc->mapped_src_nents = mapped_src_nents;
341 edesc->mapped_dst_nents = mapped_dst_nents;
342
Tudor Ambarus8c419772016-07-04 13:12:08 +0300343 edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
344 sec4_sg_bytes, DMA_TO_DEVICE);
345 if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
346 dev_err(dev, "unable to map S/G table\n");
347 goto sec4_sg_fail;
348 }
349
350 edesc->sec4_sg_bytes = sec4_sg_bytes;
351
Iuliana Prodanc3725f72019-05-28 12:52:10 +0300352 print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ",
353 DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
354 edesc->sec4_sg_bytes, 1);
355
Tudor Ambarus8c419772016-07-04 13:12:08 +0300356 return edesc;
357
358sec4_sg_fail:
Tudor Ambarus8c419772016-07-04 13:12:08 +0300359 kfree(edesc);
Iuliana Prodaneff97712019-09-26 15:26:29 +0300360dst_fail:
361 dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
362src_fail:
363 dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300364 return ERR_PTR(-ENOMEM);
365}
366
Iuliana Prodanbf537952020-02-12 19:55:23 +0200367static int akcipher_do_one_req(struct crypto_engine *engine, void *areq)
368{
369 struct akcipher_request *req = container_of(areq,
370 struct akcipher_request,
371 base);
372 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
373 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
Herbert Xu4cb4f7c2022-11-25 12:36:45 +0800374 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
Iuliana Prodanbf537952020-02-12 19:55:23 +0200375 struct device *jrdev = ctx->dev;
376 u32 *desc = req_ctx->edesc->hw_desc;
377 int ret;
378
379 req_ctx->edesc->bklog = true;
380
381 ret = caam_jr_enqueue(jrdev, desc, req_ctx->akcipher_op_done, req);
382
Gaurav Jain087e1d72021-11-22 17:02:34 +0530383 if (ret == -ENOSPC && engine->retry_support)
384 return ret;
385
Iuliana Prodanbf537952020-02-12 19:55:23 +0200386 if (ret != -EINPROGRESS) {
387 rsa_pub_unmap(jrdev, req_ctx->edesc, req);
388 rsa_io_unmap(jrdev, req_ctx->edesc, req);
389 kfree(req_ctx->edesc);
390 } else {
391 ret = 0;
392 }
393
394 return ret;
395}
396
Tudor Ambarus8c419772016-07-04 13:12:08 +0300397static int set_rsa_pub_pdb(struct akcipher_request *req,
398 struct rsa_edesc *edesc)
399{
400 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300401 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
Herbert Xu4cb4f7c2022-11-25 12:36:45 +0800402 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300403 struct caam_rsa_key *key = &ctx->key;
404 struct device *dev = ctx->dev;
405 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
406 int sec4_sg_index = 0;
407
408 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
409 if (dma_mapping_error(dev, pdb->n_dma)) {
410 dev_err(dev, "Unable to map RSA modulus memory\n");
411 return -ENOMEM;
412 }
413
414 pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
415 if (dma_mapping_error(dev, pdb->e_dma)) {
416 dev_err(dev, "Unable to map RSA public exponent memory\n");
417 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
418 return -ENOMEM;
419 }
420
Iuliana Prodaneff97712019-09-26 15:26:29 +0300421 if (edesc->mapped_src_nents > 1) {
Tudor Ambarus8c419772016-07-04 13:12:08 +0300422 pdb->sgf |= RSA_PDB_SGF_F;
423 pdb->f_dma = edesc->sec4_sg_dma;
Iuliana Prodaneff97712019-09-26 15:26:29 +0300424 sec4_sg_index += edesc->mapped_src_nents;
Tudor Ambarus8c419772016-07-04 13:12:08 +0300425 } else {
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300426 pdb->f_dma = sg_dma_address(req_ctx->fixup_src);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300427 }
428
Iuliana Prodaneff97712019-09-26 15:26:29 +0300429 if (edesc->mapped_dst_nents > 1) {
Tudor Ambarus8c419772016-07-04 13:12:08 +0300430 pdb->sgf |= RSA_PDB_SGF_G;
431 pdb->g_dma = edesc->sec4_sg_dma +
432 sec4_sg_index * sizeof(struct sec4_sg_entry);
433 } else {
434 pdb->g_dma = sg_dma_address(req->dst);
435 }
436
437 pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300438 pdb->f_len = req_ctx->fixup_src_len;
Tudor Ambarus8c419772016-07-04 13:12:08 +0300439
440 return 0;
441}
442
443static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
444 struct rsa_edesc *edesc)
445{
446 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
Herbert Xu4cb4f7c2022-11-25 12:36:45 +0800447 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300448 struct caam_rsa_key *key = &ctx->key;
449 struct device *dev = ctx->dev;
450 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
451 int sec4_sg_index = 0;
452
453 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
454 if (dma_mapping_error(dev, pdb->n_dma)) {
455 dev_err(dev, "Unable to map modulus memory\n");
456 return -ENOMEM;
457 }
458
459 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
460 if (dma_mapping_error(dev, pdb->d_dma)) {
461 dev_err(dev, "Unable to map RSA private exponent memory\n");
462 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
463 return -ENOMEM;
464 }
465
Iuliana Prodaneff97712019-09-26 15:26:29 +0300466 if (edesc->mapped_src_nents > 1) {
Tudor Ambarus8c419772016-07-04 13:12:08 +0300467 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
468 pdb->g_dma = edesc->sec4_sg_dma;
Iuliana Prodaneff97712019-09-26 15:26:29 +0300469 sec4_sg_index += edesc->mapped_src_nents;
470
Tudor Ambarus8c419772016-07-04 13:12:08 +0300471 } else {
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300472 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
473
474 pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300475 }
476
Iuliana Prodaneff97712019-09-26 15:26:29 +0300477 if (edesc->mapped_dst_nents > 1) {
Tudor Ambarus8c419772016-07-04 13:12:08 +0300478 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
479 pdb->f_dma = edesc->sec4_sg_dma +
480 sec4_sg_index * sizeof(struct sec4_sg_entry);
481 } else {
482 pdb->f_dma = sg_dma_address(req->dst);
483 }
484
485 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
486
487 return 0;
488}
489
Radu Alexe52e26d72017-04-25 16:26:38 +0300490static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
491 struct rsa_edesc *edesc)
492{
493 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
Herbert Xu4cb4f7c2022-11-25 12:36:45 +0800494 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
Radu Alexe52e26d72017-04-25 16:26:38 +0300495 struct caam_rsa_key *key = &ctx->key;
496 struct device *dev = ctx->dev;
497 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
498 int sec4_sg_index = 0;
499 size_t p_sz = key->p_sz;
Horia Geantă4bffaab2018-04-27 11:40:11 +0300500 size_t q_sz = key->q_sz;
Radu Alexe52e26d72017-04-25 16:26:38 +0300501
502 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
503 if (dma_mapping_error(dev, pdb->d_dma)) {
504 dev_err(dev, "Unable to map RSA private exponent memory\n");
505 return -ENOMEM;
506 }
507
508 pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
509 if (dma_mapping_error(dev, pdb->p_dma)) {
510 dev_err(dev, "Unable to map RSA prime factor p memory\n");
511 goto unmap_d;
512 }
513
514 pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
515 if (dma_mapping_error(dev, pdb->q_dma)) {
516 dev_err(dev, "Unable to map RSA prime factor q memory\n");
517 goto unmap_p;
518 }
519
Horia Geantăf1bf9e62018-08-06 15:29:55 +0300520 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
Radu Alexe52e26d72017-04-25 16:26:38 +0300521 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
522 dev_err(dev, "Unable to map RSA tmp1 memory\n");
523 goto unmap_q;
524 }
525
Horia Geantăf1bf9e62018-08-06 15:29:55 +0300526 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
Radu Alexe52e26d72017-04-25 16:26:38 +0300527 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
528 dev_err(dev, "Unable to map RSA tmp2 memory\n");
529 goto unmap_tmp1;
530 }
531
Iuliana Prodaneff97712019-09-26 15:26:29 +0300532 if (edesc->mapped_src_nents > 1) {
Radu Alexe52e26d72017-04-25 16:26:38 +0300533 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
534 pdb->g_dma = edesc->sec4_sg_dma;
Iuliana Prodaneff97712019-09-26 15:26:29 +0300535 sec4_sg_index += edesc->mapped_src_nents;
Radu Alexe52e26d72017-04-25 16:26:38 +0300536 } else {
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300537 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
538
539 pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
Radu Alexe52e26d72017-04-25 16:26:38 +0300540 }
541
Iuliana Prodaneff97712019-09-26 15:26:29 +0300542 if (edesc->mapped_dst_nents > 1) {
Radu Alexe52e26d72017-04-25 16:26:38 +0300543 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
544 pdb->f_dma = edesc->sec4_sg_dma +
545 sec4_sg_index * sizeof(struct sec4_sg_entry);
546 } else {
547 pdb->f_dma = sg_dma_address(req->dst);
548 }
549
550 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
551 pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
552
553 return 0;
554
555unmap_tmp1:
Horia Geantăf1bf9e62018-08-06 15:29:55 +0300556 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
Radu Alexe52e26d72017-04-25 16:26:38 +0300557unmap_q:
558 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
559unmap_p:
560 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
561unmap_d:
562 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
563
564 return -ENOMEM;
565}
566
Radu Alexe4a651b12017-04-25 16:26:39 +0300567static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
568 struct rsa_edesc *edesc)
569{
570 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
Herbert Xu4cb4f7c2022-11-25 12:36:45 +0800571 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
Radu Alexe4a651b12017-04-25 16:26:39 +0300572 struct caam_rsa_key *key = &ctx->key;
573 struct device *dev = ctx->dev;
574 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
575 int sec4_sg_index = 0;
576 size_t p_sz = key->p_sz;
Horia Geantă4bffaab2018-04-27 11:40:11 +0300577 size_t q_sz = key->q_sz;
Radu Alexe4a651b12017-04-25 16:26:39 +0300578
579 pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
580 if (dma_mapping_error(dev, pdb->p_dma)) {
581 dev_err(dev, "Unable to map RSA prime factor p memory\n");
582 return -ENOMEM;
583 }
584
585 pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
586 if (dma_mapping_error(dev, pdb->q_dma)) {
587 dev_err(dev, "Unable to map RSA prime factor q memory\n");
588 goto unmap_p;
589 }
590
591 pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
592 if (dma_mapping_error(dev, pdb->dp_dma)) {
593 dev_err(dev, "Unable to map RSA exponent dp memory\n");
594 goto unmap_q;
595 }
596
597 pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
598 if (dma_mapping_error(dev, pdb->dq_dma)) {
599 dev_err(dev, "Unable to map RSA exponent dq memory\n");
600 goto unmap_dp;
601 }
602
603 pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
604 if (dma_mapping_error(dev, pdb->c_dma)) {
605 dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
606 goto unmap_dq;
607 }
608
Horia Geantăf1bf9e62018-08-06 15:29:55 +0300609 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
Radu Alexe4a651b12017-04-25 16:26:39 +0300610 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
611 dev_err(dev, "Unable to map RSA tmp1 memory\n");
612 goto unmap_qinv;
613 }
614
Horia Geantăf1bf9e62018-08-06 15:29:55 +0300615 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
Radu Alexe4a651b12017-04-25 16:26:39 +0300616 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
617 dev_err(dev, "Unable to map RSA tmp2 memory\n");
618 goto unmap_tmp1;
619 }
620
Iuliana Prodaneff97712019-09-26 15:26:29 +0300621 if (edesc->mapped_src_nents > 1) {
Radu Alexe4a651b12017-04-25 16:26:39 +0300622 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
623 pdb->g_dma = edesc->sec4_sg_dma;
Iuliana Prodaneff97712019-09-26 15:26:29 +0300624 sec4_sg_index += edesc->mapped_src_nents;
Radu Alexe4a651b12017-04-25 16:26:39 +0300625 } else {
Iuliana Prodan3b2614c2019-05-28 12:52:11 +0300626 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
627
628 pdb->g_dma = sg_dma_address(req_ctx->fixup_src);
Radu Alexe4a651b12017-04-25 16:26:39 +0300629 }
630
Iuliana Prodaneff97712019-09-26 15:26:29 +0300631 if (edesc->mapped_dst_nents > 1) {
Radu Alexe4a651b12017-04-25 16:26:39 +0300632 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
633 pdb->f_dma = edesc->sec4_sg_dma +
634 sec4_sg_index * sizeof(struct sec4_sg_entry);
635 } else {
636 pdb->f_dma = sg_dma_address(req->dst);
637 }
638
639 pdb->sgf |= key->n_sz;
640 pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
641
642 return 0;
643
644unmap_tmp1:
Horia Geantăf1bf9e62018-08-06 15:29:55 +0300645 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
Radu Alexe4a651b12017-04-25 16:26:39 +0300646unmap_qinv:
647 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
648unmap_dq:
649 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
650unmap_dp:
651 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
652unmap_q:
653 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
654unmap_p:
655 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
656
657 return -ENOMEM;
658}
659
Iuliana Prodanbf537952020-02-12 19:55:23 +0200660static int akcipher_enqueue_req(struct device *jrdev,
661 void (*cbk)(struct device *jrdev, u32 *desc,
662 u32 err, void *context),
663 struct akcipher_request *req)
664{
665 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
666 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
Herbert Xu4cb4f7c2022-11-25 12:36:45 +0800667 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
Iuliana Prodanbf537952020-02-12 19:55:23 +0200668 struct caam_rsa_key *key = &ctx->key;
669 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
670 struct rsa_edesc *edesc = req_ctx->edesc;
671 u32 *desc = edesc->hw_desc;
672 int ret;
673
674 req_ctx->akcipher_op_done = cbk;
675 /*
676 * Only the backlog request are sent to crypto-engine since the others
677 * can be handled by CAAM, if free, especially since JR has up to 1024
678 * entries (more than the 10 entries from crypto-engine).
679 */
680 if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
681 ret = crypto_transfer_akcipher_request_to_engine(jrpriv->engine,
682 req);
683 else
684 ret = caam_jr_enqueue(jrdev, desc, cbk, req);
685
686 if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
687 switch (key->priv_form) {
688 case FORM1:
689 rsa_priv_f1_unmap(jrdev, edesc, req);
690 break;
691 case FORM2:
692 rsa_priv_f2_unmap(jrdev, edesc, req);
693 break;
694 case FORM3:
695 rsa_priv_f3_unmap(jrdev, edesc, req);
696 break;
697 default:
698 rsa_pub_unmap(jrdev, edesc, req);
699 }
700 rsa_io_unmap(jrdev, edesc, req);
701 kfree(edesc);
702 }
703
704 return ret;
705}
706
Tudor Ambarus8c419772016-07-04 13:12:08 +0300707static int caam_rsa_enc(struct akcipher_request *req)
708{
709 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
Herbert Xu4cb4f7c2022-11-25 12:36:45 +0800710 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300711 struct caam_rsa_key *key = &ctx->key;
712 struct device *jrdev = ctx->dev;
713 struct rsa_edesc *edesc;
714 int ret;
715
716 if (unlikely(!key->n || !key->e))
717 return -EINVAL;
718
719 if (req->dst_len < key->n_sz) {
720 req->dst_len = key->n_sz;
721 dev_err(jrdev, "Output buffer length less than parameter n\n");
722 return -EOVERFLOW;
723 }
724
725 /* Allocate extended descriptor */
726 edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
727 if (IS_ERR(edesc))
728 return PTR_ERR(edesc);
729
730 /* Set RSA Encrypt Protocol Data Block */
731 ret = set_rsa_pub_pdb(req, edesc);
732 if (ret)
733 goto init_fail;
734
735 /* Initialize Job Descriptor */
736 init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
737
Iuliana Prodanbf537952020-02-12 19:55:23 +0200738 return akcipher_enqueue_req(jrdev, rsa_pub_done, req);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300739
740init_fail:
741 rsa_io_unmap(jrdev, edesc, req);
742 kfree(edesc);
743 return ret;
744}
745
Radu Alexe52e26d72017-04-25 16:26:38 +0300746static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
Tudor Ambarus8c419772016-07-04 13:12:08 +0300747{
748 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
Herbert Xu4cb4f7c2022-11-25 12:36:45 +0800749 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300750 struct device *jrdev = ctx->dev;
751 struct rsa_edesc *edesc;
752 int ret;
753
Tudor Ambarus8c419772016-07-04 13:12:08 +0300754 /* Allocate extended descriptor */
755 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
756 if (IS_ERR(edesc))
757 return PTR_ERR(edesc);
758
759 /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
760 ret = set_rsa_priv_f1_pdb(req, edesc);
761 if (ret)
762 goto init_fail;
763
764 /* Initialize Job Descriptor */
765 init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
766
Iuliana Prodanbf537952020-02-12 19:55:23 +0200767 return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300768
769init_fail:
770 rsa_io_unmap(jrdev, edesc, req);
771 kfree(edesc);
772 return ret;
773}
774
Radu Alexe52e26d72017-04-25 16:26:38 +0300775static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
776{
777 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
Herbert Xu4cb4f7c2022-11-25 12:36:45 +0800778 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
Radu Alexe52e26d72017-04-25 16:26:38 +0300779 struct device *jrdev = ctx->dev;
780 struct rsa_edesc *edesc;
781 int ret;
782
783 /* Allocate extended descriptor */
784 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
785 if (IS_ERR(edesc))
786 return PTR_ERR(edesc);
787
788 /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
789 ret = set_rsa_priv_f2_pdb(req, edesc);
790 if (ret)
791 goto init_fail;
792
793 /* Initialize Job Descriptor */
794 init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
795
Iuliana Prodanbf537952020-02-12 19:55:23 +0200796 return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
Radu Alexe52e26d72017-04-25 16:26:38 +0300797
798init_fail:
799 rsa_io_unmap(jrdev, edesc, req);
800 kfree(edesc);
801 return ret;
802}
803
Radu Alexe4a651b12017-04-25 16:26:39 +0300804static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
805{
806 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
Herbert Xu4cb4f7c2022-11-25 12:36:45 +0800807 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
Radu Alexe4a651b12017-04-25 16:26:39 +0300808 struct device *jrdev = ctx->dev;
809 struct rsa_edesc *edesc;
810 int ret;
811
812 /* Allocate extended descriptor */
813 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
814 if (IS_ERR(edesc))
815 return PTR_ERR(edesc);
816
817 /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
818 ret = set_rsa_priv_f3_pdb(req, edesc);
819 if (ret)
820 goto init_fail;
821
822 /* Initialize Job Descriptor */
823 init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
824
Iuliana Prodanbf537952020-02-12 19:55:23 +0200825 return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req);
Radu Alexe4a651b12017-04-25 16:26:39 +0300826
827init_fail:
828 rsa_io_unmap(jrdev, edesc, req);
829 kfree(edesc);
830 return ret;
831}
832
Radu Alexe52e26d72017-04-25 16:26:38 +0300833static int caam_rsa_dec(struct akcipher_request *req)
834{
835 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
Herbert Xu4cb4f7c2022-11-25 12:36:45 +0800836 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
Radu Alexe52e26d72017-04-25 16:26:38 +0300837 struct caam_rsa_key *key = &ctx->key;
838 int ret;
839
840 if (unlikely(!key->n || !key->d))
841 return -EINVAL;
842
843 if (req->dst_len < key->n_sz) {
844 req->dst_len = key->n_sz;
845 dev_err(ctx->dev, "Output buffer length less than parameter n\n");
846 return -EOVERFLOW;
847 }
848
Radu Alexe4a651b12017-04-25 16:26:39 +0300849 if (key->priv_form == FORM3)
850 ret = caam_rsa_dec_priv_f3(req);
851 else if (key->priv_form == FORM2)
Radu Alexe52e26d72017-04-25 16:26:38 +0300852 ret = caam_rsa_dec_priv_f2(req);
853 else
854 ret = caam_rsa_dec_priv_f1(req);
855
856 return ret;
857}
858
Tudor Ambarus8c419772016-07-04 13:12:08 +0300859static void caam_rsa_free_key(struct caam_rsa_key *key)
860{
Waiman Long453431a2020-08-06 23:18:13 -0700861 kfree_sensitive(key->d);
862 kfree_sensitive(key->p);
863 kfree_sensitive(key->q);
864 kfree_sensitive(key->dp);
865 kfree_sensitive(key->dq);
866 kfree_sensitive(key->qinv);
867 kfree_sensitive(key->tmp1);
868 kfree_sensitive(key->tmp2);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300869 kfree(key->e);
870 kfree(key->n);
Radu Alexe52e26d72017-04-25 16:26:38 +0300871 memset(key, 0, sizeof(*key));
Tudor Ambarus8c419772016-07-04 13:12:08 +0300872}
873
Radu Alexe7ca4a9a2017-04-25 16:26:37 +0300874static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
875{
876 while (!**ptr && *nbytes) {
877 (*ptr)++;
878 (*nbytes)--;
879 }
880}
881
Tudor Ambarus8c419772016-07-04 13:12:08 +0300882/**
Radu Alexe4a651b12017-04-25 16:26:39 +0300883 * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
884 * dP, dQ and qInv could decode to less than corresponding p, q length, as the
885 * BER-encoding requires that the minimum number of bytes be used to encode the
886 * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
887 * length.
888 *
889 * @ptr : pointer to {dP, dQ, qInv} CRT member
890 * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
891 * @dstlen: length in bytes of corresponding p or q prime factor
892 */
893static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
894{
895 u8 *dst;
896
897 caam_rsa_drop_leading_zeros(&ptr, &nbytes);
898 if (!nbytes)
899 return NULL;
900
901 dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
902 if (!dst)
903 return NULL;
904
905 memcpy(dst + (dstlen - nbytes), ptr, nbytes);
906
907 return dst;
908}
909
910/**
Tudor Ambarus8c419772016-07-04 13:12:08 +0300911 * caam_read_raw_data - Read a raw byte stream as a positive integer.
912 * The function skips buffer's leading zeros, copies the remained data
913 * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
914 * the address of the new buffer.
915 *
916 * @buf : The data to read
917 * @nbytes: The amount of data to read
918 */
919static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
920{
Tudor Ambarus8c419772016-07-04 13:12:08 +0300921
Radu Alexe7ca4a9a2017-04-25 16:26:37 +0300922 caam_rsa_drop_leading_zeros(&buf, nbytes);
Tudor Ambarus7fcaf622017-04-25 16:26:36 +0300923 if (!*nbytes)
924 return NULL;
Tudor Ambarus8c419772016-07-04 13:12:08 +0300925
Fabio Estevamb930f3a2018-04-16 13:05:01 -0300926 return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300927}
928
929static int caam_rsa_check_key_length(unsigned int len)
930{
931 if (len > 4096)
932 return -EINVAL;
933 return 0;
934}
935
936static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
937 unsigned int keylen)
938{
Herbert Xu4cb4f7c2022-11-25 12:36:45 +0800939 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
Horia Geantă8439e942016-11-09 10:46:14 +0200940 struct rsa_key raw_key = {NULL};
Tudor Ambarus8c419772016-07-04 13:12:08 +0300941 struct caam_rsa_key *rsa_key = &ctx->key;
942 int ret;
943
944 /* Free the old RSA key if any */
945 caam_rsa_free_key(rsa_key);
946
947 ret = rsa_parse_pub_key(&raw_key, key, keylen);
948 if (ret)
949 return ret;
950
951 /* Copy key in DMA zone */
Fuqian Huangcc2a58f2019-07-04 00:27:08 +0800952 rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
Tudor Ambarus8c419772016-07-04 13:12:08 +0300953 if (!rsa_key->e)
954 goto err;
955
956 /*
957 * Skip leading zeros and copy the positive integer to a buffer
958 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
959 * expects a positive integer for the RSA modulus and uses its length as
960 * decryption output length.
961 */
962 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
963 if (!rsa_key->n)
964 goto err;
965
966 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
967 caam_rsa_free_key(rsa_key);
968 return -EINVAL;
969 }
970
971 rsa_key->e_sz = raw_key.e_sz;
972 rsa_key->n_sz = raw_key.n_sz;
973
Tudor Ambarus8c419772016-07-04 13:12:08 +0300974 return 0;
975err:
976 caam_rsa_free_key(rsa_key);
977 return -ENOMEM;
978}
979
Radu Alexe52e26d72017-04-25 16:26:38 +0300980static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
981 struct rsa_key *raw_key)
982{
983 struct caam_rsa_key *rsa_key = &ctx->key;
984 size_t p_sz = raw_key->p_sz;
985 size_t q_sz = raw_key->q_sz;
986
987 rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
988 if (!rsa_key->p)
989 return;
990 rsa_key->p_sz = p_sz;
991
992 rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
993 if (!rsa_key->q)
994 goto free_p;
995 rsa_key->q_sz = q_sz;
996
997 rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
998 if (!rsa_key->tmp1)
999 goto free_q;
1000
1001 rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
1002 if (!rsa_key->tmp2)
1003 goto free_tmp1;
1004
1005 rsa_key->priv_form = FORM2;
1006
Radu Alexe4a651b12017-04-25 16:26:39 +03001007 rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
1008 if (!rsa_key->dp)
1009 goto free_tmp2;
1010
1011 rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
1012 if (!rsa_key->dq)
1013 goto free_dp;
1014
1015 rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
1016 q_sz);
1017 if (!rsa_key->qinv)
1018 goto free_dq;
1019
1020 rsa_key->priv_form = FORM3;
1021
Radu Alexe52e26d72017-04-25 16:26:38 +03001022 return;
1023
Radu Alexe4a651b12017-04-25 16:26:39 +03001024free_dq:
Waiman Long453431a2020-08-06 23:18:13 -07001025 kfree_sensitive(rsa_key->dq);
Radu Alexe4a651b12017-04-25 16:26:39 +03001026free_dp:
Waiman Long453431a2020-08-06 23:18:13 -07001027 kfree_sensitive(rsa_key->dp);
Radu Alexe4a651b12017-04-25 16:26:39 +03001028free_tmp2:
Waiman Long453431a2020-08-06 23:18:13 -07001029 kfree_sensitive(rsa_key->tmp2);
Radu Alexe52e26d72017-04-25 16:26:38 +03001030free_tmp1:
Waiman Long453431a2020-08-06 23:18:13 -07001031 kfree_sensitive(rsa_key->tmp1);
Radu Alexe52e26d72017-04-25 16:26:38 +03001032free_q:
Waiman Long453431a2020-08-06 23:18:13 -07001033 kfree_sensitive(rsa_key->q);
Radu Alexe52e26d72017-04-25 16:26:38 +03001034free_p:
Waiman Long453431a2020-08-06 23:18:13 -07001035 kfree_sensitive(rsa_key->p);
Radu Alexe52e26d72017-04-25 16:26:38 +03001036}
1037
Tudor Ambarus8c419772016-07-04 13:12:08 +03001038static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
1039 unsigned int keylen)
1040{
Herbert Xu4cb4f7c2022-11-25 12:36:45 +08001041 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
Horia Geantă8439e942016-11-09 10:46:14 +02001042 struct rsa_key raw_key = {NULL};
Tudor Ambarus8c419772016-07-04 13:12:08 +03001043 struct caam_rsa_key *rsa_key = &ctx->key;
1044 int ret;
1045
1046 /* Free the old RSA key if any */
1047 caam_rsa_free_key(rsa_key);
1048
1049 ret = rsa_parse_priv_key(&raw_key, key, keylen);
1050 if (ret)
1051 return ret;
1052
1053 /* Copy key in DMA zone */
Fuqian Huangcc2a58f2019-07-04 00:27:08 +08001054 rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_DMA | GFP_KERNEL);
Tudor Ambarus8c419772016-07-04 13:12:08 +03001055 if (!rsa_key->d)
1056 goto err;
1057
Fuqian Huangcc2a58f2019-07-04 00:27:08 +08001058 rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
Tudor Ambarus8c419772016-07-04 13:12:08 +03001059 if (!rsa_key->e)
1060 goto err;
1061
1062 /*
1063 * Skip leading zeros and copy the positive integer to a buffer
1064 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
1065 * expects a positive integer for the RSA modulus and uses its length as
1066 * decryption output length.
1067 */
1068 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
1069 if (!rsa_key->n)
1070 goto err;
1071
1072 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
1073 caam_rsa_free_key(rsa_key);
1074 return -EINVAL;
1075 }
1076
1077 rsa_key->d_sz = raw_key.d_sz;
1078 rsa_key->e_sz = raw_key.e_sz;
1079 rsa_key->n_sz = raw_key.n_sz;
1080
Radu Alexe52e26d72017-04-25 16:26:38 +03001081 caam_rsa_set_priv_key_form(ctx, &raw_key);
1082
Tudor Ambarus8c419772016-07-04 13:12:08 +03001083 return 0;
1084
1085err:
1086 caam_rsa_free_key(rsa_key);
1087 return -ENOMEM;
1088}
1089
Tudor-Dan Ambaruse1984292017-05-25 10:18:14 +03001090static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
Tudor Ambarus8c419772016-07-04 13:12:08 +03001091{
Herbert Xu4cb4f7c2022-11-25 12:36:45 +08001092 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
Tudor Ambarus8c419772016-07-04 13:12:08 +03001093
Tudor-Dan Ambaruse1984292017-05-25 10:18:14 +03001094 return ctx->key.n_sz;
Tudor Ambarus8c419772016-07-04 13:12:08 +03001095}
1096
1097/* Per session pkc's driver context creation function */
1098static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
1099{
Herbert Xu4cb4f7c2022-11-25 12:36:45 +08001100 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
Tudor Ambarus8c419772016-07-04 13:12:08 +03001101
Herbert Xu908d3832022-11-22 17:40:51 +08001102 akcipher_set_reqsize(tfm, sizeof(struct caam_rsa_req_ctx));
1103
Tudor Ambarus8c419772016-07-04 13:12:08 +03001104 ctx->dev = caam_jr_alloc();
1105
1106 if (IS_ERR(ctx->dev)) {
Horia Geantă33fa46d2017-04-03 18:30:07 +03001107 pr_err("Job Ring Device allocation for transform failed\n");
Tudor Ambarus8c419772016-07-04 13:12:08 +03001108 return PTR_ERR(ctx->dev);
1109 }
1110
Iuliana Prodanc3725f72019-05-28 12:52:10 +03001111 ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer,
1112 CAAM_RSA_MAX_INPUT_SIZE - 1,
1113 DMA_TO_DEVICE);
1114 if (dma_mapping_error(ctx->dev, ctx->padding_dma)) {
1115 dev_err(ctx->dev, "unable to map padding\n");
1116 caam_jr_free(ctx->dev);
1117 return -ENOMEM;
1118 }
1119
Iuliana Prodanbf537952020-02-12 19:55:23 +02001120 ctx->enginectx.op.do_one_request = akcipher_do_one_req;
1121
Tudor Ambarus8c419772016-07-04 13:12:08 +03001122 return 0;
1123}
1124
1125/* Per session pkc's driver context cleanup function */
1126static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
1127{
Herbert Xu4cb4f7c2022-11-25 12:36:45 +08001128 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm);
Tudor Ambarus8c419772016-07-04 13:12:08 +03001129 struct caam_rsa_key *key = &ctx->key;
1130
Iuliana Prodanc3725f72019-05-28 12:52:10 +03001131 dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE -
1132 1, DMA_TO_DEVICE);
Tudor Ambarus8c419772016-07-04 13:12:08 +03001133 caam_rsa_free_key(key);
1134 caam_jr_free(ctx->dev);
1135}
1136
Iuliana Prodan58068cf2019-07-31 16:08:14 +03001137static struct caam_akcipher_alg caam_rsa = {
1138 .akcipher = {
1139 .encrypt = caam_rsa_enc,
1140 .decrypt = caam_rsa_dec,
1141 .set_pub_key = caam_rsa_set_pub_key,
1142 .set_priv_key = caam_rsa_set_priv_key,
1143 .max_size = caam_rsa_max_size,
1144 .init = caam_rsa_init_tfm,
1145 .exit = caam_rsa_exit_tfm,
Iuliana Prodan58068cf2019-07-31 16:08:14 +03001146 .base = {
1147 .cra_name = "rsa",
1148 .cra_driver_name = "rsa-caam",
1149 .cra_priority = 3000,
1150 .cra_module = THIS_MODULE,
Herbert Xu4cb4f7c2022-11-25 12:36:45 +08001151 .cra_ctxsize = sizeof(struct caam_rsa_ctx) +
1152 CRYPTO_DMA_PADDING,
Iuliana Prodan58068cf2019-07-31 16:08:14 +03001153 },
1154 }
Tudor Ambarus8c419772016-07-04 13:12:08 +03001155};
1156
1157/* Public Key Cryptography module initialization handler */
Horia Geantă1b46c902019-05-03 17:17:39 +03001158int caam_pkc_init(struct device *ctrldev)
Tudor Ambarus8c419772016-07-04 13:12:08 +03001159{
Horia Geantă1b46c902019-05-03 17:17:39 +03001160 struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
Michael Wallef20311c2021-09-16 00:03:07 +02001161 u32 pk_inst, pkha;
Tudor Ambarus8c419772016-07-04 13:12:08 +03001162 int err;
Iuliana Prodan4e3a61c2019-07-31 16:08:13 +03001163 init_done = false;
Tudor Ambarus8c419772016-07-04 13:12:08 +03001164
Tudor Ambarus8c419772016-07-04 13:12:08 +03001165 /* Determine public key hardware accelerator presence. */
Michael Wallef20311c2021-09-16 00:03:07 +02001166 if (priv->era < 10) {
Horia Geantăd239b102018-11-08 15:36:27 +02001167 pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1168 CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
Michael Wallef20311c2021-09-16 00:03:07 +02001169 } else {
1170 pkha = rd_reg32(&priv->ctrl->vreg.pkha);
1171 pk_inst = pkha & CHA_VER_NUM_MASK;
1172
1173 /*
1174 * Newer CAAMs support partially disabled functionality. If this is the
1175 * case, the number is non-zero, but this bit is set to indicate that
1176 * no encryption or decryption is supported. Only signing and verifying
1177 * is supported.
1178 */
1179 if (pkha & CHA_VER_MISC_PKHA_NO_CRYPT)
1180 pk_inst = 0;
1181 }
Tudor Ambarus8c419772016-07-04 13:12:08 +03001182
1183 /* Do not register algorithms if PKHA is not present. */
Horia Geantă1b46c902019-05-03 17:17:39 +03001184 if (!pk_inst)
1185 return 0;
Tudor Ambarus8c419772016-07-04 13:12:08 +03001186
Iuliana Prodanc3725f72019-05-28 12:52:10 +03001187 /* allocate zero buffer, used for padding input */
1188 zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA |
1189 GFP_KERNEL);
1190 if (!zero_buffer)
1191 return -ENOMEM;
1192
Iuliana Prodan58068cf2019-07-31 16:08:14 +03001193 err = crypto_register_akcipher(&caam_rsa.akcipher);
1194
Iuliana Prodanc3725f72019-05-28 12:52:10 +03001195 if (err) {
1196 kfree(zero_buffer);
Tudor Ambarus8c419772016-07-04 13:12:08 +03001197 dev_warn(ctrldev, "%s alg registration failed\n",
Iuliana Prodan58068cf2019-07-31 16:08:14 +03001198 caam_rsa.akcipher.base.cra_driver_name);
Iuliana Prodanc3725f72019-05-28 12:52:10 +03001199 } else {
Iuliana Prodan4e3a61c2019-07-31 16:08:13 +03001200 init_done = true;
Iuliana Prodan58068cf2019-07-31 16:08:14 +03001201 caam_rsa.registered = true;
Tudor Ambarus8c419772016-07-04 13:12:08 +03001202 dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
Iuliana Prodanc3725f72019-05-28 12:52:10 +03001203 }
Tudor Ambarus8c419772016-07-04 13:12:08 +03001204
Tudor Ambarus8c419772016-07-04 13:12:08 +03001205 return err;
1206}
1207
Horia Geantă1b46c902019-05-03 17:17:39 +03001208void caam_pkc_exit(void)
Tudor Ambarus8c419772016-07-04 13:12:08 +03001209{
Iuliana Prodan4e3a61c2019-07-31 16:08:13 +03001210 if (!init_done)
1211 return;
1212
Iuliana Prodan58068cf2019-07-31 16:08:14 +03001213 if (caam_rsa.registered)
1214 crypto_unregister_akcipher(&caam_rsa.akcipher);
1215
Iuliana Prodanc3725f72019-05-28 12:52:10 +03001216 kfree(zero_buffer);
Tudor Ambarus8c419772016-07-04 13:12:08 +03001217}