Tim Chen | ad61e04 | 2014-07-31 10:30:03 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Multi buffer SHA1 algorithm Glue Code |
| 3 | * |
| 4 | * This file is provided under a dual BSD/GPLv2 license. When using or |
| 5 | * redistributing this file, you may do so under either license. |
| 6 | * |
| 7 | * GPL LICENSE SUMMARY |
| 8 | * |
| 9 | * Copyright(c) 2014 Intel Corporation. |
| 10 | * |
| 11 | * This program is free software; you can redistribute it and/or modify |
| 12 | * it under the terms of version 2 of the GNU General Public License as |
| 13 | * published by the Free Software Foundation. |
| 14 | * |
| 15 | * This program is distributed in the hope that it will be useful, but |
| 16 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 18 | * General Public License for more details. |
| 19 | * |
| 20 | * Contact Information: |
| 21 | * Tim Chen <tim.c.chen@linux.intel.com> |
| 22 | * |
| 23 | * BSD LICENSE |
| 24 | * |
| 25 | * Copyright(c) 2014 Intel Corporation. |
| 26 | * |
| 27 | * Redistribution and use in source and binary forms, with or without |
| 28 | * modification, are permitted provided that the following conditions |
| 29 | * are met: |
| 30 | * |
| 31 | * * Redistributions of source code must retain the above copyright |
| 32 | * notice, this list of conditions and the following disclaimer. |
| 33 | * * Redistributions in binary form must reproduce the above copyright |
| 34 | * notice, this list of conditions and the following disclaimer in |
| 35 | * the documentation and/or other materials provided with the |
| 36 | * distribution. |
| 37 | * * Neither the name of Intel Corporation nor the names of its |
| 38 | * contributors may be used to endorse or promote products derived |
| 39 | * from this software without specific prior written permission. |
| 40 | * |
| 41 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 42 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 43 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 44 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 45 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 46 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 47 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 48 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 49 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 50 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 51 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 52 | */ |
| 53 | |
| 54 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 55 | |
| 56 | #include <crypto/internal/hash.h> |
| 57 | #include <linux/init.h> |
| 58 | #include <linux/module.h> |
| 59 | #include <linux/mm.h> |
| 60 | #include <linux/cryptohash.h> |
| 61 | #include <linux/types.h> |
| 62 | #include <linux/list.h> |
| 63 | #include <crypto/scatterwalk.h> |
| 64 | #include <crypto/sha.h> |
| 65 | #include <crypto/mcryptd.h> |
| 66 | #include <crypto/crypto_wq.h> |
| 67 | #include <asm/byteorder.h> |
Tim Chen | ad61e04 | 2014-07-31 10:30:03 -0700 | [diff] [blame] | 68 | #include <linux/hardirq.h> |
Ingo Molnar | 57dd083 | 2015-04-28 10:59:00 +0200 | [diff] [blame] | 69 | #include <asm/fpu/api.h> |
Tim Chen | ad61e04 | 2014-07-31 10:30:03 -0700 | [diff] [blame] | 70 | #include "sha_mb_ctx.h" |
| 71 | |
| 72 | #define FLUSH_INTERVAL 1000 /* in usec */ |
| 73 | |
Fengguang Wu | 4c1948f | 2014-08-26 14:40:52 +0800 | [diff] [blame] | 74 | static struct mcryptd_alg_state sha1_mb_alg_state; |
Tim Chen | ad61e04 | 2014-07-31 10:30:03 -0700 | [diff] [blame] | 75 | |
| 76 | struct sha1_mb_ctx { |
| 77 | struct mcryptd_ahash *mcryptd_tfm; |
| 78 | }; |
| 79 | |
| 80 | static inline struct mcryptd_hash_request_ctx *cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx) |
| 81 | { |
| 82 | struct shash_desc *desc; |
| 83 | |
| 84 | desc = container_of((void *) hash_ctx, struct shash_desc, __ctx); |
| 85 | return container_of(desc, struct mcryptd_hash_request_ctx, desc); |
| 86 | } |
| 87 | |
| 88 | static inline struct ahash_request *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx) |
| 89 | { |
| 90 | return container_of((void *) ctx, struct ahash_request, __ctx); |
| 91 | } |
| 92 | |
| 93 | static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx, |
| 94 | struct shash_desc *desc) |
| 95 | { |
| 96 | rctx->flag = HASH_UPDATE; |
| 97 | } |
| 98 | |
Fengguang Wu | 4c1948f | 2014-08-26 14:40:52 +0800 | [diff] [blame] | 99 | static asmlinkage void (*sha1_job_mgr_init)(struct sha1_mb_mgr *state); |
| 100 | static asmlinkage struct job_sha1* (*sha1_job_mgr_submit)(struct sha1_mb_mgr *state, |
Tim Chen | ad61e04 | 2014-07-31 10:30:03 -0700 | [diff] [blame] | 101 | struct job_sha1 *job); |
Fengguang Wu | 4c1948f | 2014-08-26 14:40:52 +0800 | [diff] [blame] | 102 | static asmlinkage struct job_sha1* (*sha1_job_mgr_flush)(struct sha1_mb_mgr *state); |
| 103 | static asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job)(struct sha1_mb_mgr *state); |
Tim Chen | ad61e04 | 2014-07-31 10:30:03 -0700 | [diff] [blame] | 104 | |
Denys Vlasenko | a3819e3 | 2016-04-15 19:00:26 +0200 | [diff] [blame] | 105 | static inline void sha1_init_digest(uint32_t *digest) |
Tim Chen | ad61e04 | 2014-07-31 10:30:03 -0700 | [diff] [blame] | 106 | { |
| 107 | static const uint32_t initial_digest[SHA1_DIGEST_LENGTH] = {SHA1_H0, |
| 108 | SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }; |
| 109 | memcpy(digest, initial_digest, sizeof(initial_digest)); |
| 110 | } |
| 111 | |
Denys Vlasenko | a3819e3 | 2016-04-15 19:00:26 +0200 | [diff] [blame] | 112 | static inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2], |
Tim Chen | ad61e04 | 2014-07-31 10:30:03 -0700 | [diff] [blame] | 113 | uint32_t total_len) |
| 114 | { |
| 115 | uint32_t i = total_len & (SHA1_BLOCK_SIZE - 1); |
| 116 | |
| 117 | memset(&padblock[i], 0, SHA1_BLOCK_SIZE); |
| 118 | padblock[i] = 0x80; |
| 119 | |
| 120 | i += ((SHA1_BLOCK_SIZE - 1) & |
| 121 | (0 - (total_len + SHA1_PADLENGTHFIELD_SIZE + 1))) |
| 122 | + 1 + SHA1_PADLENGTHFIELD_SIZE; |
| 123 | |
| 124 | #if SHA1_PADLENGTHFIELD_SIZE == 16 |
| 125 | *((uint64_t *) &padblock[i - 16]) = 0; |
| 126 | #endif |
| 127 | |
| 128 | *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3); |
| 129 | |
| 130 | /* Number of extra blocks to hash */ |
| 131 | return i >> SHA1_LOG2_BLOCK_SIZE; |
| 132 | } |
| 133 | |
| 134 | static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr, struct sha1_hash_ctx *ctx) |
| 135 | { |
| 136 | while (ctx) { |
| 137 | if (ctx->status & HASH_CTX_STS_COMPLETE) { |
| 138 | /* Clear PROCESSING bit */ |
| 139 | ctx->status = HASH_CTX_STS_COMPLETE; |
| 140 | return ctx; |
| 141 | } |
| 142 | |
| 143 | /* |
| 144 | * If the extra blocks are empty, begin hashing what remains |
| 145 | * in the user's buffer. |
| 146 | */ |
| 147 | if (ctx->partial_block_buffer_length == 0 && |
| 148 | ctx->incoming_buffer_length) { |
| 149 | |
| 150 | const void *buffer = ctx->incoming_buffer; |
| 151 | uint32_t len = ctx->incoming_buffer_length; |
| 152 | uint32_t copy_len; |
| 153 | |
| 154 | /* |
| 155 | * Only entire blocks can be hashed. |
| 156 | * Copy remainder to extra blocks buffer. |
| 157 | */ |
| 158 | copy_len = len & (SHA1_BLOCK_SIZE-1); |
| 159 | |
| 160 | if (copy_len) { |
| 161 | len -= copy_len; |
| 162 | memcpy(ctx->partial_block_buffer, |
| 163 | ((const char *) buffer + len), |
| 164 | copy_len); |
| 165 | ctx->partial_block_buffer_length = copy_len; |
| 166 | } |
| 167 | |
| 168 | ctx->incoming_buffer_length = 0; |
| 169 | |
| 170 | /* len should be a multiple of the block size now */ |
| 171 | assert((len % SHA1_BLOCK_SIZE) == 0); |
| 172 | |
| 173 | /* Set len to the number of blocks to be hashed */ |
| 174 | len >>= SHA1_LOG2_BLOCK_SIZE; |
| 175 | |
| 176 | if (len) { |
| 177 | |
| 178 | ctx->job.buffer = (uint8_t *) buffer; |
| 179 | ctx->job.len = len; |
| 180 | ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr, |
| 181 | &ctx->job); |
| 182 | continue; |
| 183 | } |
| 184 | } |
| 185 | |
| 186 | /* |
| 187 | * If the extra blocks are not empty, then we are |
| 188 | * either on the last block(s) or we need more |
| 189 | * user input before continuing. |
| 190 | */ |
| 191 | if (ctx->status & HASH_CTX_STS_LAST) { |
| 192 | |
| 193 | uint8_t *buf = ctx->partial_block_buffer; |
| 194 | uint32_t n_extra_blocks = sha1_pad(buf, ctx->total_length); |
| 195 | |
| 196 | ctx->status = (HASH_CTX_STS_PROCESSING | |
| 197 | HASH_CTX_STS_COMPLETE); |
| 198 | ctx->job.buffer = buf; |
| 199 | ctx->job.len = (uint32_t) n_extra_blocks; |
| 200 | ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr, &ctx->job); |
| 201 | continue; |
| 202 | } |
| 203 | |
Dan Carpenter | 5d1b3c9 | 2014-11-22 21:36:28 +0300 | [diff] [blame] | 204 | ctx->status = HASH_CTX_STS_IDLE; |
Tim Chen | ad61e04 | 2014-07-31 10:30:03 -0700 | [diff] [blame] | 205 | return ctx; |
| 206 | } |
| 207 | |
| 208 | return NULL; |
| 209 | } |
| 210 | |
Fengguang Wu | 4c1948f | 2014-08-26 14:40:52 +0800 | [diff] [blame] | 211 | static struct sha1_hash_ctx *sha1_ctx_mgr_get_comp_ctx(struct sha1_ctx_mgr *mgr) |
Tim Chen | ad61e04 | 2014-07-31 10:30:03 -0700 | [diff] [blame] | 212 | { |
| 213 | /* |
| 214 | * If get_comp_job returns NULL, there are no jobs complete. |
| 215 | * If get_comp_job returns a job, verify that it is safe to return to the user. |
| 216 | * If it is not ready, resubmit the job to finish processing. |
| 217 | * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned. |
| 218 | * Otherwise, all jobs currently being managed by the hash_ctx_mgr still need processing. |
| 219 | */ |
| 220 | struct sha1_hash_ctx *ctx; |
| 221 | |
| 222 | ctx = (struct sha1_hash_ctx *) sha1_job_mgr_get_comp_job(&mgr->mgr); |
| 223 | return sha1_ctx_mgr_resubmit(mgr, ctx); |
| 224 | } |
| 225 | |
Fengguang Wu | 4c1948f | 2014-08-26 14:40:52 +0800 | [diff] [blame] | 226 | static void sha1_ctx_mgr_init(struct sha1_ctx_mgr *mgr) |
Tim Chen | ad61e04 | 2014-07-31 10:30:03 -0700 | [diff] [blame] | 227 | { |
| 228 | sha1_job_mgr_init(&mgr->mgr); |
| 229 | } |
| 230 | |
Fengguang Wu | 4c1948f | 2014-08-26 14:40:52 +0800 | [diff] [blame] | 231 | static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr, |
Tim Chen | ad61e04 | 2014-07-31 10:30:03 -0700 | [diff] [blame] | 232 | struct sha1_hash_ctx *ctx, |
| 233 | const void *buffer, |
| 234 | uint32_t len, |
| 235 | int flags) |
| 236 | { |
| 237 | if (flags & (~HASH_ENTIRE)) { |
| 238 | /* User should not pass anything other than FIRST, UPDATE, or LAST */ |
| 239 | ctx->error = HASH_CTX_ERROR_INVALID_FLAGS; |
| 240 | return ctx; |
| 241 | } |
| 242 | |
| 243 | if (ctx->status & HASH_CTX_STS_PROCESSING) { |
| 244 | /* Cannot submit to a currently processing job. */ |
| 245 | ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING; |
| 246 | return ctx; |
| 247 | } |
| 248 | |
| 249 | if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) { |
| 250 | /* Cannot update a finished job. */ |
| 251 | ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED; |
| 252 | return ctx; |
| 253 | } |
| 254 | |
| 255 | |
| 256 | if (flags & HASH_FIRST) { |
| 257 | /* Init digest */ |
| 258 | sha1_init_digest(ctx->job.result_digest); |
| 259 | |
| 260 | /* Reset byte counter */ |
| 261 | ctx->total_length = 0; |
| 262 | |
| 263 | /* Clear extra blocks */ |
| 264 | ctx->partial_block_buffer_length = 0; |
| 265 | } |
| 266 | |
| 267 | /* If we made it here, there were no errors during this call to submit */ |
| 268 | ctx->error = HASH_CTX_ERROR_NONE; |
| 269 | |
| 270 | /* Store buffer ptr info from user */ |
| 271 | ctx->incoming_buffer = buffer; |
| 272 | ctx->incoming_buffer_length = len; |
| 273 | |
| 274 | /* Store the user's request flags and mark this ctx as currently being processed. */ |
| 275 | ctx->status = (flags & HASH_LAST) ? |
| 276 | (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) : |
| 277 | HASH_CTX_STS_PROCESSING; |
| 278 | |
| 279 | /* Advance byte counter */ |
| 280 | ctx->total_length += len; |
| 281 | |
| 282 | /* |
| 283 | * If there is anything currently buffered in the extra blocks, |
| 284 | * append to it until it contains a whole block. |
| 285 | * Or if the user's buffer contains less than a whole block, |
| 286 | * append as much as possible to the extra block. |
| 287 | */ |
| 288 | if ((ctx->partial_block_buffer_length) | (len < SHA1_BLOCK_SIZE)) { |
| 289 | /* Compute how many bytes to copy from user buffer into extra block */ |
| 290 | uint32_t copy_len = SHA1_BLOCK_SIZE - ctx->partial_block_buffer_length; |
| 291 | if (len < copy_len) |
| 292 | copy_len = len; |
| 293 | |
| 294 | if (copy_len) { |
| 295 | /* Copy and update relevant pointers and counters */ |
| 296 | memcpy(&ctx->partial_block_buffer[ctx->partial_block_buffer_length], |
| 297 | buffer, copy_len); |
| 298 | |
| 299 | ctx->partial_block_buffer_length += copy_len; |
| 300 | ctx->incoming_buffer = (const void *)((const char *)buffer + copy_len); |
| 301 | ctx->incoming_buffer_length = len - copy_len; |
| 302 | } |
| 303 | |
| 304 | /* The extra block should never contain more than 1 block here */ |
| 305 | assert(ctx->partial_block_buffer_length <= SHA1_BLOCK_SIZE); |
| 306 | |
| 307 | /* If the extra block buffer contains exactly 1 block, it can be hashed. */ |
| 308 | if (ctx->partial_block_buffer_length >= SHA1_BLOCK_SIZE) { |
| 309 | ctx->partial_block_buffer_length = 0; |
| 310 | |
| 311 | ctx->job.buffer = ctx->partial_block_buffer; |
| 312 | ctx->job.len = 1; |
| 313 | ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr, &ctx->job); |
| 314 | } |
| 315 | } |
| 316 | |
| 317 | return sha1_ctx_mgr_resubmit(mgr, ctx); |
| 318 | } |
| 319 | |
Fengguang Wu | 4c1948f | 2014-08-26 14:40:52 +0800 | [diff] [blame] | 320 | static struct sha1_hash_ctx *sha1_ctx_mgr_flush(struct sha1_ctx_mgr *mgr) |
Tim Chen | ad61e04 | 2014-07-31 10:30:03 -0700 | [diff] [blame] | 321 | { |
| 322 | struct sha1_hash_ctx *ctx; |
| 323 | |
| 324 | while (1) { |
| 325 | ctx = (struct sha1_hash_ctx *) sha1_job_mgr_flush(&mgr->mgr); |
| 326 | |
| 327 | /* If flush returned 0, there are no more jobs in flight. */ |
| 328 | if (!ctx) |
| 329 | return NULL; |
| 330 | |
| 331 | /* |
| 332 | * If flush returned a job, resubmit the job to finish processing. |
| 333 | */ |
| 334 | ctx = sha1_ctx_mgr_resubmit(mgr, ctx); |
| 335 | |
| 336 | /* |
| 337 | * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned. |
| 338 | * Otherwise, all jobs currently being managed by the sha1_ctx_mgr |
| 339 | * still need processing. Loop. |
| 340 | */ |
| 341 | if (ctx) |
| 342 | return ctx; |
| 343 | } |
| 344 | } |
| 345 | |
| 346 | static int sha1_mb_init(struct shash_desc *desc) |
| 347 | { |
| 348 | struct sha1_hash_ctx *sctx = shash_desc_ctx(desc); |
| 349 | |
| 350 | hash_ctx_init(sctx); |
| 351 | sctx->job.result_digest[0] = SHA1_H0; |
| 352 | sctx->job.result_digest[1] = SHA1_H1; |
| 353 | sctx->job.result_digest[2] = SHA1_H2; |
| 354 | sctx->job.result_digest[3] = SHA1_H3; |
| 355 | sctx->job.result_digest[4] = SHA1_H4; |
| 356 | sctx->total_length = 0; |
| 357 | sctx->partial_block_buffer_length = 0; |
| 358 | sctx->status = HASH_CTX_STS_IDLE; |
| 359 | |
| 360 | return 0; |
| 361 | } |
| 362 | |
| 363 | static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx) |
| 364 | { |
| 365 | int i; |
| 366 | struct sha1_hash_ctx *sctx = shash_desc_ctx(&rctx->desc); |
| 367 | __be32 *dst = (__be32 *) rctx->out; |
| 368 | |
| 369 | for (i = 0; i < 5; ++i) |
| 370 | dst[i] = cpu_to_be32(sctx->job.result_digest[i]); |
| 371 | |
| 372 | return 0; |
| 373 | } |
| 374 | |
| 375 | static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx, |
| 376 | struct mcryptd_alg_cstate *cstate, bool flush) |
| 377 | { |
| 378 | int flag = HASH_UPDATE; |
| 379 | int nbytes, err = 0; |
| 380 | struct mcryptd_hash_request_ctx *rctx = *ret_rctx; |
| 381 | struct sha1_hash_ctx *sha_ctx; |
| 382 | |
| 383 | /* more work ? */ |
| 384 | while (!(rctx->flag & HASH_DONE)) { |
| 385 | nbytes = crypto_ahash_walk_done(&rctx->walk, 0); |
| 386 | if (nbytes < 0) { |
| 387 | err = nbytes; |
| 388 | goto out; |
| 389 | } |
| 390 | /* check if the walk is done */ |
| 391 | if (crypto_ahash_walk_last(&rctx->walk)) { |
| 392 | rctx->flag |= HASH_DONE; |
| 393 | if (rctx->flag & HASH_FINAL) |
| 394 | flag |= HASH_LAST; |
| 395 | |
| 396 | } |
| 397 | sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(&rctx->desc); |
| 398 | kernel_fpu_begin(); |
| 399 | sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, flag); |
| 400 | if (!sha_ctx) { |
| 401 | if (flush) |
| 402 | sha_ctx = sha1_ctx_mgr_flush(cstate->mgr); |
| 403 | } |
| 404 | kernel_fpu_end(); |
| 405 | if (sha_ctx) |
| 406 | rctx = cast_hash_to_mcryptd_ctx(sha_ctx); |
| 407 | else { |
| 408 | rctx = NULL; |
| 409 | goto out; |
| 410 | } |
| 411 | } |
| 412 | |
| 413 | /* copy the results */ |
| 414 | if (rctx->flag & HASH_FINAL) |
| 415 | sha1_mb_set_results(rctx); |
| 416 | |
| 417 | out: |
| 418 | *ret_rctx = rctx; |
| 419 | return err; |
| 420 | } |
| 421 | |
| 422 | static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx, |
| 423 | struct mcryptd_alg_cstate *cstate, |
| 424 | int err) |
| 425 | { |
| 426 | struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); |
| 427 | struct sha1_hash_ctx *sha_ctx; |
| 428 | struct mcryptd_hash_request_ctx *req_ctx; |
| 429 | int ret; |
| 430 | |
| 431 | /* remove from work list */ |
| 432 | spin_lock(&cstate->work_lock); |
| 433 | list_del(&rctx->waiter); |
| 434 | spin_unlock(&cstate->work_lock); |
| 435 | |
| 436 | if (irqs_disabled()) |
| 437 | rctx->complete(&req->base, err); |
| 438 | else { |
| 439 | local_bh_disable(); |
| 440 | rctx->complete(&req->base, err); |
| 441 | local_bh_enable(); |
| 442 | } |
| 443 | |
| 444 | /* check to see if there are other jobs that are done */ |
| 445 | sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr); |
| 446 | while (sha_ctx) { |
| 447 | req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx); |
| 448 | ret = sha_finish_walk(&req_ctx, cstate, false); |
| 449 | if (req_ctx) { |
| 450 | spin_lock(&cstate->work_lock); |
| 451 | list_del(&req_ctx->waiter); |
| 452 | spin_unlock(&cstate->work_lock); |
| 453 | |
| 454 | req = cast_mcryptd_ctx_to_req(req_ctx); |
| 455 | if (irqs_disabled()) |
Xiaodong Liu | 0851561 | 2016-04-12 09:45:51 +0000 | [diff] [blame] | 456 | req_ctx->complete(&req->base, ret); |
Tim Chen | ad61e04 | 2014-07-31 10:30:03 -0700 | [diff] [blame] | 457 | else { |
| 458 | local_bh_disable(); |
Xiaodong Liu | 0851561 | 2016-04-12 09:45:51 +0000 | [diff] [blame] | 459 | req_ctx->complete(&req->base, ret); |
Tim Chen | ad61e04 | 2014-07-31 10:30:03 -0700 | [diff] [blame] | 460 | local_bh_enable(); |
| 461 | } |
| 462 | } |
| 463 | sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr); |
| 464 | } |
| 465 | |
| 466 | return 0; |
| 467 | } |
| 468 | |
| 469 | static void sha1_mb_add_list(struct mcryptd_hash_request_ctx *rctx, |
| 470 | struct mcryptd_alg_cstate *cstate) |
| 471 | { |
| 472 | unsigned long next_flush; |
| 473 | unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL); |
| 474 | |
| 475 | /* initialize tag */ |
| 476 | rctx->tag.arrival = jiffies; /* tag the arrival time */ |
| 477 | rctx->tag.seq_num = cstate->next_seq_num++; |
| 478 | next_flush = rctx->tag.arrival + delay; |
| 479 | rctx->tag.expire = next_flush; |
| 480 | |
| 481 | spin_lock(&cstate->work_lock); |
| 482 | list_add_tail(&rctx->waiter, &cstate->work_list); |
| 483 | spin_unlock(&cstate->work_lock); |
| 484 | |
| 485 | mcryptd_arm_flusher(cstate, delay); |
| 486 | } |
| 487 | |
| 488 | static int sha1_mb_update(struct shash_desc *desc, const u8 *data, |
| 489 | unsigned int len) |
| 490 | { |
| 491 | struct mcryptd_hash_request_ctx *rctx = |
| 492 | container_of(desc, struct mcryptd_hash_request_ctx, desc); |
| 493 | struct mcryptd_alg_cstate *cstate = |
| 494 | this_cpu_ptr(sha1_mb_alg_state.alg_cstate); |
| 495 | |
| 496 | struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); |
| 497 | struct sha1_hash_ctx *sha_ctx; |
| 498 | int ret = 0, nbytes; |
| 499 | |
| 500 | |
| 501 | /* sanity check */ |
| 502 | if (rctx->tag.cpu != smp_processor_id()) { |
| 503 | pr_err("mcryptd error: cpu clash\n"); |
| 504 | goto done; |
| 505 | } |
| 506 | |
| 507 | /* need to init context */ |
| 508 | req_ctx_init(rctx, desc); |
| 509 | |
| 510 | nbytes = crypto_ahash_walk_first(req, &rctx->walk); |
| 511 | |
| 512 | if (nbytes < 0) { |
| 513 | ret = nbytes; |
| 514 | goto done; |
| 515 | } |
| 516 | |
| 517 | if (crypto_ahash_walk_last(&rctx->walk)) |
| 518 | rctx->flag |= HASH_DONE; |
| 519 | |
| 520 | /* submit */ |
| 521 | sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc); |
| 522 | sha1_mb_add_list(rctx, cstate); |
| 523 | kernel_fpu_begin(); |
| 524 | sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, HASH_UPDATE); |
| 525 | kernel_fpu_end(); |
| 526 | |
| 527 | /* check if anything is returned */ |
| 528 | if (!sha_ctx) |
| 529 | return -EINPROGRESS; |
| 530 | |
| 531 | if (sha_ctx->error) { |
| 532 | ret = sha_ctx->error; |
| 533 | rctx = cast_hash_to_mcryptd_ctx(sha_ctx); |
| 534 | goto done; |
| 535 | } |
| 536 | |
| 537 | rctx = cast_hash_to_mcryptd_ctx(sha_ctx); |
| 538 | ret = sha_finish_walk(&rctx, cstate, false); |
| 539 | |
| 540 | if (!rctx) |
| 541 | return -EINPROGRESS; |
| 542 | done: |
| 543 | sha_complete_job(rctx, cstate, ret); |
| 544 | return ret; |
| 545 | } |
| 546 | |
| 547 | static int sha1_mb_finup(struct shash_desc *desc, const u8 *data, |
| 548 | unsigned int len, u8 *out) |
| 549 | { |
| 550 | struct mcryptd_hash_request_ctx *rctx = |
| 551 | container_of(desc, struct mcryptd_hash_request_ctx, desc); |
| 552 | struct mcryptd_alg_cstate *cstate = |
| 553 | this_cpu_ptr(sha1_mb_alg_state.alg_cstate); |
| 554 | |
| 555 | struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx); |
| 556 | struct sha1_hash_ctx *sha_ctx; |
| 557 | int ret = 0, flag = HASH_UPDATE, nbytes; |
| 558 | |
| 559 | /* sanity check */ |
| 560 | if (rctx->tag.cpu != smp_processor_id()) { |
| 561 | pr_err("mcryptd error: cpu clash\n"); |
| 562 | goto done; |
| 563 | } |
| 564 | |
| 565 | /* need to init context */ |
| 566 | req_ctx_init(rctx, desc); |
| 567 | |
| 568 | nbytes = crypto_ahash_walk_first(req, &rctx->walk); |
| 569 | |
| 570 | if (nbytes < 0) { |
| 571 | ret = nbytes; |
| 572 | goto done; |
| 573 | } |
| 574 | |
| 575 | if (crypto_ahash_walk_last(&rctx->walk)) { |
| 576 | rctx->flag |= HASH_DONE; |
| 577 | flag = HASH_LAST; |
| 578 | } |
| 579 | rctx->out = out; |
| 580 | |
| 581 | /* submit */ |
| 582 | rctx->flag |= HASH_FINAL; |
| 583 | sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc); |
| 584 | sha1_mb_add_list(rctx, cstate); |
| 585 | |
| 586 | kernel_fpu_begin(); |
| 587 | sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data, nbytes, flag); |
| 588 | kernel_fpu_end(); |
| 589 | |
| 590 | /* check if anything is returned */ |
| 591 | if (!sha_ctx) |
| 592 | return -EINPROGRESS; |
| 593 | |
| 594 | if (sha_ctx->error) { |
| 595 | ret = sha_ctx->error; |
| 596 | goto done; |
| 597 | } |
| 598 | |
| 599 | rctx = cast_hash_to_mcryptd_ctx(sha_ctx); |
| 600 | ret = sha_finish_walk(&rctx, cstate, false); |
| 601 | if (!rctx) |
| 602 | return -EINPROGRESS; |
| 603 | done: |
| 604 | sha_complete_job(rctx, cstate, ret); |
| 605 | return ret; |
| 606 | } |
| 607 | |
| 608 | static int sha1_mb_final(struct shash_desc *desc, u8 *out) |
| 609 | { |
| 610 | struct mcryptd_hash_request_ctx *rctx = |
| 611 | container_of(desc, struct mcryptd_hash_request_ctx, desc); |
| 612 | struct mcryptd_alg_cstate *cstate = |
| 613 | this_cpu_ptr(sha1_mb_alg_state.alg_cstate); |
| 614 | |
| 615 | struct sha1_hash_ctx *sha_ctx; |
| 616 | int ret = 0; |
| 617 | u8 data; |
| 618 | |
| 619 | /* sanity check */ |
| 620 | if (rctx->tag.cpu != smp_processor_id()) { |
| 621 | pr_err("mcryptd error: cpu clash\n"); |
| 622 | goto done; |
| 623 | } |
| 624 | |
| 625 | /* need to init context */ |
| 626 | req_ctx_init(rctx, desc); |
| 627 | |
| 628 | rctx->out = out; |
| 629 | rctx->flag |= HASH_DONE | HASH_FINAL; |
| 630 | |
| 631 | sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc); |
| 632 | /* flag HASH_FINAL and 0 data size */ |
| 633 | sha1_mb_add_list(rctx, cstate); |
| 634 | kernel_fpu_begin(); |
| 635 | sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0, HASH_LAST); |
| 636 | kernel_fpu_end(); |
| 637 | |
| 638 | /* check if anything is returned */ |
| 639 | if (!sha_ctx) |
| 640 | return -EINPROGRESS; |
| 641 | |
| 642 | if (sha_ctx->error) { |
| 643 | ret = sha_ctx->error; |
| 644 | rctx = cast_hash_to_mcryptd_ctx(sha_ctx); |
| 645 | goto done; |
| 646 | } |
| 647 | |
| 648 | rctx = cast_hash_to_mcryptd_ctx(sha_ctx); |
| 649 | ret = sha_finish_walk(&rctx, cstate, false); |
| 650 | if (!rctx) |
| 651 | return -EINPROGRESS; |
| 652 | done: |
| 653 | sha_complete_job(rctx, cstate, ret); |
| 654 | return ret; |
| 655 | } |
| 656 | |
| 657 | static int sha1_mb_export(struct shash_desc *desc, void *out) |
| 658 | { |
| 659 | struct sha1_hash_ctx *sctx = shash_desc_ctx(desc); |
| 660 | |
| 661 | memcpy(out, sctx, sizeof(*sctx)); |
| 662 | |
| 663 | return 0; |
| 664 | } |
| 665 | |
| 666 | static int sha1_mb_import(struct shash_desc *desc, const void *in) |
| 667 | { |
| 668 | struct sha1_hash_ctx *sctx = shash_desc_ctx(desc); |
| 669 | |
| 670 | memcpy(sctx, in, sizeof(*sctx)); |
| 671 | |
| 672 | return 0; |
| 673 | } |
| 674 | |
| 675 | |
| 676 | static struct shash_alg sha1_mb_shash_alg = { |
| 677 | .digestsize = SHA1_DIGEST_SIZE, |
| 678 | .init = sha1_mb_init, |
| 679 | .update = sha1_mb_update, |
| 680 | .final = sha1_mb_final, |
| 681 | .finup = sha1_mb_finup, |
| 682 | .export = sha1_mb_export, |
| 683 | .import = sha1_mb_import, |
| 684 | .descsize = sizeof(struct sha1_hash_ctx), |
| 685 | .statesize = sizeof(struct sha1_hash_ctx), |
| 686 | .base = { |
| 687 | .cra_name = "__sha1-mb", |
| 688 | .cra_driver_name = "__intel_sha1-mb", |
| 689 | .cra_priority = 100, |
| 690 | /* |
| 691 | * use ASYNC flag as some buffers in multi-buffer |
| 692 | * algo may not have completed before hashing thread sleep |
| 693 | */ |
Stephan Mueller | 555fa17 | 2015-03-30 22:11:46 +0200 | [diff] [blame] | 694 | .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_ASYNC | |
| 695 | CRYPTO_ALG_INTERNAL, |
Tim Chen | ad61e04 | 2014-07-31 10:30:03 -0700 | [diff] [blame] | 696 | .cra_blocksize = SHA1_BLOCK_SIZE, |
| 697 | .cra_module = THIS_MODULE, |
| 698 | .cra_list = LIST_HEAD_INIT(sha1_mb_shash_alg.base.cra_list), |
| 699 | } |
| 700 | }; |
| 701 | |
| 702 | static int sha1_mb_async_init(struct ahash_request *req) |
| 703 | { |
| 704 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
| 705 | struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); |
| 706 | struct ahash_request *mcryptd_req = ahash_request_ctx(req); |
| 707 | struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; |
| 708 | |
| 709 | memcpy(mcryptd_req, req, sizeof(*req)); |
| 710 | ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); |
| 711 | return crypto_ahash_init(mcryptd_req); |
| 712 | } |
| 713 | |
| 714 | static int sha1_mb_async_update(struct ahash_request *req) |
| 715 | { |
| 716 | struct ahash_request *mcryptd_req = ahash_request_ctx(req); |
| 717 | |
| 718 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
| 719 | struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); |
| 720 | struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; |
| 721 | |
| 722 | memcpy(mcryptd_req, req, sizeof(*req)); |
| 723 | ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); |
| 724 | return crypto_ahash_update(mcryptd_req); |
| 725 | } |
| 726 | |
| 727 | static int sha1_mb_async_finup(struct ahash_request *req) |
| 728 | { |
| 729 | struct ahash_request *mcryptd_req = ahash_request_ctx(req); |
| 730 | |
| 731 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
| 732 | struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); |
| 733 | struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; |
| 734 | |
| 735 | memcpy(mcryptd_req, req, sizeof(*req)); |
| 736 | ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); |
| 737 | return crypto_ahash_finup(mcryptd_req); |
| 738 | } |
| 739 | |
| 740 | static int sha1_mb_async_final(struct ahash_request *req) |
| 741 | { |
| 742 | struct ahash_request *mcryptd_req = ahash_request_ctx(req); |
| 743 | |
| 744 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
| 745 | struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); |
| 746 | struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; |
| 747 | |
| 748 | memcpy(mcryptd_req, req, sizeof(*req)); |
| 749 | ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); |
| 750 | return crypto_ahash_final(mcryptd_req); |
| 751 | } |
| 752 | |
Fengguang Wu | 4c1948f | 2014-08-26 14:40:52 +0800 | [diff] [blame] | 753 | static int sha1_mb_async_digest(struct ahash_request *req) |
Tim Chen | ad61e04 | 2014-07-31 10:30:03 -0700 | [diff] [blame] | 754 | { |
| 755 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
| 756 | struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); |
| 757 | struct ahash_request *mcryptd_req = ahash_request_ctx(req); |
| 758 | struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; |
| 759 | |
| 760 | memcpy(mcryptd_req, req, sizeof(*req)); |
| 761 | ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); |
| 762 | return crypto_ahash_digest(mcryptd_req); |
| 763 | } |
| 764 | |
Wang, Rui Y | fd09967 | 2016-02-02 21:56:45 +0800 | [diff] [blame] | 765 | static int sha1_mb_async_export(struct ahash_request *req, void *out) |
| 766 | { |
| 767 | struct ahash_request *mcryptd_req = ahash_request_ctx(req); |
| 768 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
| 769 | struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); |
| 770 | struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; |
| 771 | |
| 772 | memcpy(mcryptd_req, req, sizeof(*req)); |
| 773 | ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); |
| 774 | return crypto_ahash_export(mcryptd_req, out); |
| 775 | } |
| 776 | |
| 777 | static int sha1_mb_async_import(struct ahash_request *req, const void *in) |
| 778 | { |
| 779 | struct ahash_request *mcryptd_req = ahash_request_ctx(req); |
| 780 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
| 781 | struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); |
| 782 | struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; |
| 783 | struct crypto_shash *child = mcryptd_ahash_child(mcryptd_tfm); |
| 784 | struct mcryptd_hash_request_ctx *rctx; |
| 785 | struct shash_desc *desc; |
| 786 | |
| 787 | memcpy(mcryptd_req, req, sizeof(*req)); |
| 788 | ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base); |
| 789 | rctx = ahash_request_ctx(mcryptd_req); |
| 790 | desc = &rctx->desc; |
| 791 | desc->tfm = child; |
| 792 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; |
| 793 | |
| 794 | return crypto_ahash_import(mcryptd_req, in); |
| 795 | } |
| 796 | |
Tim Chen | ad61e04 | 2014-07-31 10:30:03 -0700 | [diff] [blame] | 797 | static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm) |
| 798 | { |
| 799 | struct mcryptd_ahash *mcryptd_tfm; |
| 800 | struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm); |
| 801 | struct mcryptd_hash_ctx *mctx; |
| 802 | |
Stephan Mueller | 555fa17 | 2015-03-30 22:11:46 +0200 | [diff] [blame] | 803 | mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb", |
| 804 | CRYPTO_ALG_INTERNAL, |
| 805 | CRYPTO_ALG_INTERNAL); |
Tim Chen | ad61e04 | 2014-07-31 10:30:03 -0700 | [diff] [blame] | 806 | if (IS_ERR(mcryptd_tfm)) |
| 807 | return PTR_ERR(mcryptd_tfm); |
| 808 | mctx = crypto_ahash_ctx(&mcryptd_tfm->base); |
| 809 | mctx->alg_state = &sha1_mb_alg_state; |
| 810 | ctx->mcryptd_tfm = mcryptd_tfm; |
| 811 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
| 812 | sizeof(struct ahash_request) + |
| 813 | crypto_ahash_reqsize(&mcryptd_tfm->base)); |
| 814 | |
| 815 | return 0; |
| 816 | } |
| 817 | |
| 818 | static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm) |
| 819 | { |
| 820 | struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm); |
| 821 | |
| 822 | mcryptd_free_ahash(ctx->mcryptd_tfm); |
| 823 | } |
| 824 | |
| 825 | static struct ahash_alg sha1_mb_async_alg = { |
| 826 | .init = sha1_mb_async_init, |
| 827 | .update = sha1_mb_async_update, |
| 828 | .final = sha1_mb_async_final, |
| 829 | .finup = sha1_mb_async_finup, |
| 830 | .digest = sha1_mb_async_digest, |
Wang, Rui Y | fd09967 | 2016-02-02 21:56:45 +0800 | [diff] [blame] | 831 | .export = sha1_mb_async_export, |
| 832 | .import = sha1_mb_async_import, |
Tim Chen | ad61e04 | 2014-07-31 10:30:03 -0700 | [diff] [blame] | 833 | .halg = { |
| 834 | .digestsize = SHA1_DIGEST_SIZE, |
Wang, Rui Y | fd09967 | 2016-02-02 21:56:45 +0800 | [diff] [blame] | 835 | .statesize = sizeof(struct sha1_hash_ctx), |
Tim Chen | ad61e04 | 2014-07-31 10:30:03 -0700 | [diff] [blame] | 836 | .base = { |
| 837 | .cra_name = "sha1", |
| 838 | .cra_driver_name = "sha1_mb", |
| 839 | .cra_priority = 200, |
| 840 | .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC, |
| 841 | .cra_blocksize = SHA1_BLOCK_SIZE, |
| 842 | .cra_type = &crypto_ahash_type, |
| 843 | .cra_module = THIS_MODULE, |
| 844 | .cra_list = LIST_HEAD_INIT(sha1_mb_async_alg.halg.base.cra_list), |
| 845 | .cra_init = sha1_mb_async_init_tfm, |
| 846 | .cra_exit = sha1_mb_async_exit_tfm, |
| 847 | .cra_ctxsize = sizeof(struct sha1_mb_ctx), |
| 848 | .cra_alignmask = 0, |
| 849 | }, |
| 850 | }, |
| 851 | }; |
| 852 | |
Fengguang Wu | 4c1948f | 2014-08-26 14:40:52 +0800 | [diff] [blame] | 853 | static unsigned long sha1_mb_flusher(struct mcryptd_alg_cstate *cstate) |
Tim Chen | ad61e04 | 2014-07-31 10:30:03 -0700 | [diff] [blame] | 854 | { |
| 855 | struct mcryptd_hash_request_ctx *rctx; |
| 856 | unsigned long cur_time; |
| 857 | unsigned long next_flush = 0; |
| 858 | struct sha1_hash_ctx *sha_ctx; |
| 859 | |
| 860 | |
| 861 | cur_time = jiffies; |
| 862 | |
| 863 | while (!list_empty(&cstate->work_list)) { |
| 864 | rctx = list_entry(cstate->work_list.next, |
| 865 | struct mcryptd_hash_request_ctx, waiter); |
Ameen Ali | c42e990 | 2015-03-13 23:38:21 +0200 | [diff] [blame] | 866 | if (time_before(cur_time, rctx->tag.expire)) |
Tim Chen | ad61e04 | 2014-07-31 10:30:03 -0700 | [diff] [blame] | 867 | break; |
| 868 | kernel_fpu_begin(); |
| 869 | sha_ctx = (struct sha1_hash_ctx *) sha1_ctx_mgr_flush(cstate->mgr); |
| 870 | kernel_fpu_end(); |
| 871 | if (!sha_ctx) { |
| 872 | pr_err("sha1_mb error: nothing got flushed for non-empty list\n"); |
| 873 | break; |
| 874 | } |
| 875 | rctx = cast_hash_to_mcryptd_ctx(sha_ctx); |
| 876 | sha_finish_walk(&rctx, cstate, true); |
| 877 | sha_complete_job(rctx, cstate, 0); |
| 878 | } |
| 879 | |
| 880 | if (!list_empty(&cstate->work_list)) { |
| 881 | rctx = list_entry(cstate->work_list.next, |
| 882 | struct mcryptd_hash_request_ctx, waiter); |
| 883 | /* get the hash context and then flush time */ |
| 884 | next_flush = rctx->tag.expire; |
| 885 | mcryptd_arm_flusher(cstate, get_delay(next_flush)); |
| 886 | } |
| 887 | return next_flush; |
| 888 | } |
| 889 | |
| 890 | static int __init sha1_mb_mod_init(void) |
| 891 | { |
| 892 | |
| 893 | int cpu; |
| 894 | int err; |
| 895 | struct mcryptd_alg_cstate *cpu_state; |
| 896 | |
| 897 | /* check for dependent cpu features */ |
| 898 | if (!boot_cpu_has(X86_FEATURE_AVX2) || |
| 899 | !boot_cpu_has(X86_FEATURE_BMI2)) |
| 900 | return -ENODEV; |
| 901 | |
| 902 | /* initialize multibuffer structures */ |
| 903 | sha1_mb_alg_state.alg_cstate = alloc_percpu(struct mcryptd_alg_cstate); |
| 904 | |
| 905 | sha1_job_mgr_init = sha1_mb_mgr_init_avx2; |
| 906 | sha1_job_mgr_submit = sha1_mb_mgr_submit_avx2; |
| 907 | sha1_job_mgr_flush = sha1_mb_mgr_flush_avx2; |
| 908 | sha1_job_mgr_get_comp_job = sha1_mb_mgr_get_comp_job_avx2; |
| 909 | |
| 910 | if (!sha1_mb_alg_state.alg_cstate) |
| 911 | return -ENOMEM; |
| 912 | for_each_possible_cpu(cpu) { |
| 913 | cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); |
| 914 | cpu_state->next_flush = 0; |
| 915 | cpu_state->next_seq_num = 0; |
| 916 | cpu_state->flusher_engaged = false; |
| 917 | INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher); |
| 918 | cpu_state->cpu = cpu; |
| 919 | cpu_state->alg_state = &sha1_mb_alg_state; |
firo yang | 21a6dd5 | 2015-04-23 18:12:10 +0800 | [diff] [blame] | 920 | cpu_state->mgr = kzalloc(sizeof(struct sha1_ctx_mgr), |
| 921 | GFP_KERNEL); |
Tim Chen | ad61e04 | 2014-07-31 10:30:03 -0700 | [diff] [blame] | 922 | if (!cpu_state->mgr) |
| 923 | goto err2; |
| 924 | sha1_ctx_mgr_init(cpu_state->mgr); |
| 925 | INIT_LIST_HEAD(&cpu_state->work_list); |
| 926 | spin_lock_init(&cpu_state->work_lock); |
| 927 | } |
| 928 | sha1_mb_alg_state.flusher = &sha1_mb_flusher; |
| 929 | |
| 930 | err = crypto_register_shash(&sha1_mb_shash_alg); |
| 931 | if (err) |
| 932 | goto err2; |
| 933 | err = crypto_register_ahash(&sha1_mb_async_alg); |
| 934 | if (err) |
| 935 | goto err1; |
| 936 | |
| 937 | |
| 938 | return 0; |
| 939 | err1: |
| 940 | crypto_unregister_shash(&sha1_mb_shash_alg); |
| 941 | err2: |
| 942 | for_each_possible_cpu(cpu) { |
| 943 | cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); |
| 944 | kfree(cpu_state->mgr); |
| 945 | } |
| 946 | free_percpu(sha1_mb_alg_state.alg_cstate); |
| 947 | return -ENODEV; |
| 948 | } |
| 949 | |
| 950 | static void __exit sha1_mb_mod_fini(void) |
| 951 | { |
| 952 | int cpu; |
| 953 | struct mcryptd_alg_cstate *cpu_state; |
| 954 | |
| 955 | crypto_unregister_ahash(&sha1_mb_async_alg); |
| 956 | crypto_unregister_shash(&sha1_mb_shash_alg); |
| 957 | for_each_possible_cpu(cpu) { |
| 958 | cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); |
| 959 | kfree(cpu_state->mgr); |
| 960 | } |
| 961 | free_percpu(sha1_mb_alg_state.alg_cstate); |
| 962 | } |
| 963 | |
| 964 | module_init(sha1_mb_mod_init); |
| 965 | module_exit(sha1_mb_mod_fini); |
| 966 | |
| 967 | MODULE_LICENSE("GPL"); |
| 968 | MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, multi buffer accelerated"); |
| 969 | |
Mathias Krause | 3e14dcf | 2015-01-11 18:17:42 +0100 | [diff] [blame] | 970 | MODULE_ALIAS_CRYPTO("sha1"); |