blob: 90f15032c8df0c84105a7f5a42b354e7b10e4540 [file] [log] [blame]
Antoine Tenart301422e2018-07-13 16:51:37 +02001// SPDX-License-Identifier: GPL-2.0
Antoine Ténart1b44c5a2017-05-24 16:10:34 +02002/*
3 * Copyright (C) 2017 Marvell
4 *
5 * Antoine Tenart <antoine.tenart@free-electrons.com>
Antoine Ténart1b44c5a2017-05-24 16:10:34 +02006 */
7
8#include <linux/dma-mapping.h>
9#include <linux/spinlock.h>
10
11#include "safexcel.h"
12
13int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
Ofer Heifetz18e0e952018-06-28 17:15:36 +020014 struct safexcel_desc_ring *cdr,
15 struct safexcel_desc_ring *rdr)
Antoine Ténart1b44c5a2017-05-24 16:10:34 +020016{
Pascal van Leeuwen098e51e2019-12-11 17:32:35 +010017 int i;
18 struct safexcel_command_desc *cdesc;
19 dma_addr_t atok;
20
21 /* Actual command descriptor ring */
Pascal van Leeuwen84ca4e52019-09-18 08:42:39 +020022 cdr->offset = priv->config.cd_offset;
Antoine Ténart1b44c5a2017-05-24 16:10:34 +020023 cdr->base = dmam_alloc_coherent(priv->dev,
24 cdr->offset * EIP197_DEFAULT_RING_SIZE,
25 &cdr->base_dma, GFP_KERNEL);
26 if (!cdr->base)
27 return -ENOMEM;
28 cdr->write = cdr->base;
Ofer Heifetz9744fec2018-06-28 17:21:57 +020029 cdr->base_end = cdr->base + cdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
Antoine Ténart1b44c5a2017-05-24 16:10:34 +020030 cdr->read = cdr->base;
31
Pascal van Leeuwen098e51e2019-12-11 17:32:35 +010032 /* Command descriptor shadow ring for storing additional token data */
33 cdr->shoffset = priv->config.cdsh_offset;
34 cdr->shbase = dmam_alloc_coherent(priv->dev,
35 cdr->shoffset *
36 EIP197_DEFAULT_RING_SIZE,
37 &cdr->shbase_dma, GFP_KERNEL);
38 if (!cdr->shbase)
39 return -ENOMEM;
40 cdr->shwrite = cdr->shbase;
41 cdr->shbase_end = cdr->shbase + cdr->shoffset *
42 (EIP197_DEFAULT_RING_SIZE - 1);
43
44 /*
45 * Populate command descriptors with physical pointers to shadow descs.
46 * Note that we only need to do this once if we don't overwrite them.
47 */
48 cdesc = cdr->base;
49 atok = cdr->shbase_dma;
50 for (i = 0; i < EIP197_DEFAULT_RING_SIZE; i++) {
51 cdesc->atok_lo = lower_32_bits(atok);
52 cdesc->atok_hi = upper_32_bits(atok);
53 cdesc = (void *)cdesc + cdr->offset;
54 atok += cdr->shoffset;
55 }
56
Pascal van Leeuwen84ca4e52019-09-18 08:42:39 +020057 rdr->offset = priv->config.rd_offset;
Pascal van Leeuwen098e51e2019-12-11 17:32:35 +010058 /* Use shoffset for result token offset here */
59 rdr->shoffset = priv->config.res_offset;
Antoine Ténart1b44c5a2017-05-24 16:10:34 +020060 rdr->base = dmam_alloc_coherent(priv->dev,
61 rdr->offset * EIP197_DEFAULT_RING_SIZE,
62 &rdr->base_dma, GFP_KERNEL);
63 if (!rdr->base)
64 return -ENOMEM;
65 rdr->write = rdr->base;
Ofer Heifetz9744fec2018-06-28 17:21:57 +020066 rdr->base_end = rdr->base + rdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
Antoine Ténart1b44c5a2017-05-24 16:10:34 +020067 rdr->read = rdr->base;
68
69 return 0;
70}
71
72inline int safexcel_select_ring(struct safexcel_crypto_priv *priv)
73{
74 return (atomic_inc_return(&priv->ring_used) % priv->config.rings);
75}
76
Pascal van Leeuwen098e51e2019-12-11 17:32:35 +010077static void *safexcel_ring_next_cwptr(struct safexcel_crypto_priv *priv,
78 struct safexcel_desc_ring *ring,
79 bool first,
80 struct safexcel_token **atoken)
Antoine Ténart1b44c5a2017-05-24 16:10:34 +020081{
82 void *ptr = ring->write;
83
Pascal van Leeuwen098e51e2019-12-11 17:32:35 +010084 if (first)
85 *atoken = ring->shwrite;
86
87 if ((ring->write == ring->read - ring->offset) ||
88 (ring->read == ring->base && ring->write == ring->base_end))
89 return ERR_PTR(-ENOMEM);
90
91 if (ring->write == ring->base_end) {
92 ring->write = ring->base;
93 ring->shwrite = ring->shbase;
94 } else {
95 ring->write += ring->offset;
96 ring->shwrite += ring->shoffset;
97 }
98
99 return ptr;
100}
101
102static void *safexcel_ring_next_rwptr(struct safexcel_crypto_priv *priv,
103 struct safexcel_desc_ring *ring,
104 struct result_data_desc **rtoken)
105{
106 void *ptr = ring->write;
107
108 /* Result token at relative offset shoffset */
109 *rtoken = ring->write + ring->shoffset;
110
Ofer Heifetz9744fec2018-06-28 17:21:57 +0200111 if ((ring->write == ring->read - ring->offset) ||
112 (ring->read == ring->base && ring->write == ring->base_end))
Antoine Ténart1b44c5a2017-05-24 16:10:34 +0200113 return ERR_PTR(-ENOMEM);
114
Antoine Ténart1b44c5a2017-05-24 16:10:34 +0200115 if (ring->write == ring->base_end)
116 ring->write = ring->base;
Ofer Heifetz9744fec2018-06-28 17:21:57 +0200117 else
118 ring->write += ring->offset;
Antoine Ténart1b44c5a2017-05-24 16:10:34 +0200119
Antoine Ténart1b44c5a2017-05-24 16:10:34 +0200120 return ptr;
121}
122
123void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
Ofer Heifetz18e0e952018-06-28 17:15:36 +0200124 struct safexcel_desc_ring *ring)
Antoine Ténart1b44c5a2017-05-24 16:10:34 +0200125{
126 void *ptr = ring->read;
127
Ofer Heifetz9744fec2018-06-28 17:21:57 +0200128 if (ring->write == ring->read)
Antoine Ténart1b44c5a2017-05-24 16:10:34 +0200129 return ERR_PTR(-ENOENT);
130
Antoine Ténart1b44c5a2017-05-24 16:10:34 +0200131 if (ring->read == ring->base_end)
132 ring->read = ring->base;
Ofer Heifetz9744fec2018-06-28 17:21:57 +0200133 else
134 ring->read += ring->offset;
Antoine Ténart1b44c5a2017-05-24 16:10:34 +0200135
Antoine Ténart1b44c5a2017-05-24 16:10:34 +0200136 return ptr;
137}
138
Ofer Heifetz9744fec2018-06-28 17:21:57 +0200139inline void *safexcel_ring_curr_rptr(struct safexcel_crypto_priv *priv,
140 int ring)
141{
142 struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
143
144 return rdr->read;
145}
146
147inline int safexcel_ring_first_rdr_index(struct safexcel_crypto_priv *priv,
148 int ring)
149{
150 struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
151
152 return (rdr->read - rdr->base) / rdr->offset;
153}
154
155inline int safexcel_ring_rdr_rdesc_index(struct safexcel_crypto_priv *priv,
156 int ring,
157 struct safexcel_result_desc *rdesc)
158{
159 struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
160
161 return ((void *)rdesc - rdr->base) / rdr->offset;
162}
163
Antoine Ténart1b44c5a2017-05-24 16:10:34 +0200164void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
Ofer Heifetz18e0e952018-06-28 17:15:36 +0200165 struct safexcel_desc_ring *ring)
Antoine Ténart1b44c5a2017-05-24 16:10:34 +0200166{
Ofer Heifetz9744fec2018-06-28 17:21:57 +0200167 if (ring->write == ring->read)
Antoine Ténart1b44c5a2017-05-24 16:10:34 +0200168 return;
169
Pascal van Leeuwen098e51e2019-12-11 17:32:35 +0100170 if (ring->write == ring->base) {
Ofer Heifetz9744fec2018-06-28 17:21:57 +0200171 ring->write = ring->base_end;
Pascal van Leeuwen098e51e2019-12-11 17:32:35 +0100172 ring->shwrite = ring->shbase_end;
173 } else {
Antoine Ténart1b44c5a2017-05-24 16:10:34 +0200174 ring->write -= ring->offset;
Pascal van Leeuwen098e51e2019-12-11 17:32:35 +0100175 ring->shwrite -= ring->shoffset;
176 }
Antoine Ténart1b44c5a2017-05-24 16:10:34 +0200177}
178
179struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
180 int ring_id,
181 bool first, bool last,
182 dma_addr_t data, u32 data_len,
183 u32 full_data_len,
Pascal van Leeuwen098e51e2019-12-11 17:32:35 +0100184 dma_addr_t context,
185 struct safexcel_token **atoken)
186{
Antoine Ténart1b44c5a2017-05-24 16:10:34 +0200187 struct safexcel_command_desc *cdesc;
Antoine Ténart1b44c5a2017-05-24 16:10:34 +0200188
Pascal van Leeuwen098e51e2019-12-11 17:32:35 +0100189 cdesc = safexcel_ring_next_cwptr(priv, &priv->ring[ring_id].cdr,
190 first, atoken);
Antoine Ténart1b44c5a2017-05-24 16:10:34 +0200191 if (IS_ERR(cdesc))
192 return cdesc;
193
Antoine Ténart1b44c5a2017-05-24 16:10:34 +0200194 cdesc->particle_size = data_len;
Pascal van Leeuwen098e51e2019-12-11 17:32:35 +0100195 cdesc->rsvd0 = 0;
196 cdesc->last_seg = last;
197 cdesc->first_seg = first;
198 cdesc->additional_cdata_size = 0;
199 cdesc->rsvd1 = 0;
Antoine Ténart1b44c5a2017-05-24 16:10:34 +0200200 cdesc->data_lo = lower_32_bits(data);
201 cdesc->data_hi = upper_32_bits(data);
202
Pascal van Leeuwen098e51e2019-12-11 17:32:35 +0100203 if (first) {
Pascal van Leeuwen3e450882019-08-30 09:52:30 +0200204 /*
205 * Note that the length here MUST be >0 or else the EIP(1)97
206 * may hang. Newer EIP197 firmware actually incorporates this
207 * fix already, but that doesn't help the EIP97 and we may
208 * also be running older firmware.
209 */
210 cdesc->control_data.packet_length = full_data_len ?: 1;
Antoine Ténart1b44c5a2017-05-24 16:10:34 +0200211 cdesc->control_data.options = EIP197_OPTION_MAGIC_VALUE |
212 EIP197_OPTION_64BIT_CTX |
Pascal van Leeuwen098e51e2019-12-11 17:32:35 +0100213 EIP197_OPTION_CTX_CTRL_IN_CMD |
214 EIP197_OPTION_RC_AUTO;
215 cdesc->control_data.type = EIP197_TYPE_BCLA;
216 cdesc->control_data.context_lo = lower_32_bits(context) |
217 EIP197_CONTEXT_SMALL;
Antoine Ténart1b44c5a2017-05-24 16:10:34 +0200218 cdesc->control_data.context_hi = upper_32_bits(context);
Antoine Ténart1b44c5a2017-05-24 16:10:34 +0200219 }
220
221 return cdesc;
222}
223
224struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *priv,
225 int ring_id,
226 bool first, bool last,
227 dma_addr_t data, u32 len)
228{
229 struct safexcel_result_desc *rdesc;
Pascal van Leeuwen098e51e2019-12-11 17:32:35 +0100230 struct result_data_desc *rtoken;
Antoine Ténart1b44c5a2017-05-24 16:10:34 +0200231
Pascal van Leeuwen098e51e2019-12-11 17:32:35 +0100232 rdesc = safexcel_ring_next_rwptr(priv, &priv->ring[ring_id].rdr,
233 &rtoken);
Antoine Ténart1b44c5a2017-05-24 16:10:34 +0200234 if (IS_ERR(rdesc))
235 return rdesc;
236
Antoine Ténart1b44c5a2017-05-24 16:10:34 +0200237 rdesc->particle_size = len;
Pascal van Leeuwen098e51e2019-12-11 17:32:35 +0100238 rdesc->rsvd0 = 0;
Pascal van Leeuwenbd03b022020-09-08 08:10:45 +0200239 rdesc->descriptor_overflow = 1; /* assume error */
240 rdesc->buffer_overflow = 1; /* assume error */
Pascal van Leeuwen098e51e2019-12-11 17:32:35 +0100241 rdesc->last_seg = last;
242 rdesc->first_seg = first;
243 rdesc->result_size = EIP197_RD64_RESULT_SIZE;
244 rdesc->rsvd1 = 0;
Antoine Ténart1b44c5a2017-05-24 16:10:34 +0200245 rdesc->data_lo = lower_32_bits(data);
246 rdesc->data_hi = upper_32_bits(data);
247
Pascal van Leeuwenbd03b022020-09-08 08:10:45 +0200248 /* Clear length in result token */
Pascal van Leeuwen098e51e2019-12-11 17:32:35 +0100249 rtoken->packet_length = 0;
Pascal van Leeuwenbd03b022020-09-08 08:10:45 +0200250 /* Assume errors - HW will clear if not the case */
251 rtoken->error_code = 0x7fff;
Pascal van Leeuwen098e51e2019-12-11 17:32:35 +0100252
Antoine Ténart1b44c5a2017-05-24 16:10:34 +0200253 return rdesc;
254}